Remove some obsolete TODO comments, fix indentation.
Test: Rely on TreeHugger.
Change-Id: I4e3c0ba13d576ef62121d47ebc4965f6667b624f
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1fea30a..404a427 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -627,8 +627,8 @@
locations->SetOut(calling_convention.GetReturnLocation(field_type));
}
} else {
- size_t set_index = is_instance ? 1 : 0;
- if (DataType::IsFloatingPointType(field_type)) {
+ size_t set_index = is_instance ? 1 : 0;
+ if (DataType::IsFloatingPointType(field_type)) {
// The set value comes from a float location while the calling convention
// expects it in a regular register location. Allocate a temp for it and
// make the transfer at codegen.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a4e4f46..2f08858 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5714,7 +5714,6 @@
break;
}
- // TODO: Re-add the compiler code to do string dex cache lookup again.
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), out.GetCode());
__ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_);
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 597ac8a..957f85a 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -116,7 +116,7 @@
vixl::aarch64::CPURegister::kRegister,
vixl::aarch64::kXRegSize,
(kReserveMarkingRegister ? vixl::aarch64::x21.GetCode() : vixl::aarch64::x20.GetCode()),
- vixl::aarch64::x30.GetCode());
+ vixl::aarch64::x30.GetCode());
const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister,
vixl::aarch64::kDRegSize,
vixl::aarch64::d8.GetCode(),
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 09f40c2..8e121f4 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -7963,7 +7963,6 @@
break;
}
- // TODO: Re-add the compiler code to do string dex cache lookup again.
DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
__ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 190361a..811b33e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3006,10 +3006,10 @@
constant_area));
__ xorps(out.AsFpuRegister<XmmRegister>(), mask);
} else {
- __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000),
- neg->GetBaseMethodAddress(),
- constant_area));
- __ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
+ __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000),
+ neg->GetBaseMethodAddress(),
+ constant_area));
+ __ xorpd(out.AsFpuRegister<XmmRegister>(), mask);
}
}
@@ -7522,7 +7522,6 @@
break;
}
- // TODO: Re-add the compiler code to do string dex cache lookup again.
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
__ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex().index_));
@@ -9077,7 +9076,7 @@
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
- dchecked_integral_cast<uint32_t>(address);
+ dchecked_integral_cast<uint32_t>(address);
}
void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index df83089..03c5602 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -6783,7 +6783,6 @@
break;
}
- // TODO: Re-add the compiler code to do string dex cache lookup again.
// Custom calling convention: RAX serves as both input and output.
__ movl(CpuRegister(RAX), Immediate(load->GetStringIndex().index_));
codegen_->InvokeRuntime(kQuickResolveString,
@@ -8268,7 +8267,7 @@
reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t;
reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] =
- dchecked_integral_cast<uint32_t>(address);
+ dchecked_integral_cast<uint32_t>(address);
}
void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 99da844..842af6b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -222,34 +222,34 @@
double kPositiveInfinity = std::numeric_limits<double>::infinity();
double kNegativeInfinity = -1 * kPositiveInfinity;
- __ xorq(output, output);
- __ comisd(input, codegen->LiteralDoubleAddress(kPositiveInfinity));
- __ j(kNotEqual, &done1);
- __ j(kParityEven, &done2);
- __ movq(output, Immediate(1));
- __ jmp(&done2);
- __ Bind(&done1);
- __ comisd(input, codegen->LiteralDoubleAddress(kNegativeInfinity));
- __ j(kNotEqual, &done2);
- __ j(kParityEven, &done2);
- __ movq(output, Immediate(1));
- __ Bind(&done2);
+ __ xorq(output, output);
+ __ comisd(input, codegen->LiteralDoubleAddress(kPositiveInfinity));
+ __ j(kNotEqual, &done1);
+ __ j(kParityEven, &done2);
+ __ movq(output, Immediate(1));
+ __ jmp(&done2);
+ __ Bind(&done1);
+ __ comisd(input, codegen->LiteralDoubleAddress(kNegativeInfinity));
+ __ j(kNotEqual, &done2);
+ __ j(kParityEven, &done2);
+ __ movq(output, Immediate(1));
+ __ Bind(&done2);
} else {
float kPositiveInfinity = std::numeric_limits<float>::infinity();
float kNegativeInfinity = -1 * kPositiveInfinity;
- __ xorl(output, output);
- __ comiss(input, codegen->LiteralFloatAddress(kPositiveInfinity));
- __ j(kNotEqual, &done1);
- __ j(kParityEven, &done2);
- __ movl(output, Immediate(1));
- __ jmp(&done2);
- __ Bind(&done1);
- __ comiss(input, codegen->LiteralFloatAddress(kNegativeInfinity));
- __ j(kNotEqual, &done2);
- __ j(kParityEven, &done2);
- __ movl(output, Immediate(1));
- __ Bind(&done2);
+ __ xorl(output, output);
+ __ comiss(input, codegen->LiteralFloatAddress(kPositiveInfinity));
+ __ j(kNotEqual, &done1);
+ __ j(kParityEven, &done2);
+ __ movl(output, Immediate(1));
+ __ jmp(&done2);
+ __ Bind(&done1);
+ __ comiss(input, codegen->LiteralFloatAddress(kNegativeInfinity));
+ __ j(kNotEqual, &done2);
+ __ j(kParityEven, &done2);
+ __ movl(output, Immediate(1));
+ __ Bind(&done2);
}
}
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index 5933d3d..6163624 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -259,7 +259,7 @@
case HInstruction::InstructionKind::kVecReplicateScalar:
return 2;
case HInstruction::InstructionKind::kVecExtractScalar:
- return 1;
+ return 1;
case HInstruction::InstructionKind::kVecReduce:
return 4;
case HInstruction::InstructionKind::kVecNeg:
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 0f8c95f..f6d69ca 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -418,7 +418,7 @@
++it;
if (true_succ->Dominates(user_block)) {
user->ReplaceInput(graph->GetIntConstant(1), index);
- } else if (false_succ->Dominates(user_block)) {
+ } else if (false_succ->Dominates(user_block)) {
user->ReplaceInput(graph->GetIntConstant(0), index);
}
}
@@ -1609,12 +1609,12 @@
0u);
HInstruction* cond =
- new (global_allocator_) HVecPredToBoolean(global_allocator_,
- pred_while,
- HVecPredToBoolean::PCondKind::kNFirst,
- DataType::Type::kInt32,
- vector_length_,
- 0u);
+ new (global_allocator_) HVecPredToBoolean(global_allocator_,
+ pred_while,
+ HVecPredToBoolean::PCondKind::kNFirst,
+ DataType::Type::kInt32,
+ vector_length_,
+ 0u);
vector_header_->AddInstruction(pred_while);
vector_header_->AddInstruction(cond);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b173764..1ad11d8 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2073,12 +2073,12 @@
ArtMethod* method,
uint32_t dex_pc,
HInstruction* holder)
- : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)),
- locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)),
- parent_(nullptr),
- method_(method),
- dex_pc_(dex_pc),
- holder_(holder) {
+ : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)),
+ locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)),
+ parent_(nullptr),
+ method_(method),
+ dex_pc_(dex_pc),
+ holder_(holder) {
}
ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator,
@@ -2740,7 +2740,7 @@
private:
using InstructionKindField =
- BitField<InstructionKind, kFieldInstructionKind, kFieldInstructionKindSize>;
+ BitField<InstructionKind, kFieldInstructionKind, kFieldInstructionKindSize>;
void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) {
auto before_use_node = uses_.before_begin();
@@ -6529,12 +6529,12 @@
HInstruction* index,
DataType::Type type,
uint32_t dex_pc)
- : HArrayGet(array,
- index,
- type,
- SideEffects::ArrayReadOfType(type),
- dex_pc,
- /* is_string_char_at= */ false) {
+ : HArrayGet(array,
+ index,
+ type,
+ SideEffects::ArrayReadOfType(type),
+ dex_pc,
+ /* is_string_char_at= */ false) {
}
HArrayGet(HInstruction* array,
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 12e9a10..4f20b55 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -313,8 +313,8 @@
opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
break;
case OptimizationPass::kInstructionSimplifierX86:
- opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
- break;
+ opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats);
+ break;
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case OptimizationPass::kInstructionSimplifierX86_64: