diff options
author | 2023-09-08 15:01:25 +0200 | |
---|---|---|
committer | 2023-09-08 15:33:03 +0000 | |
commit | 5899d7919d5ad8e0c437e72c4aa04d4024e32431 (patch) | |
tree | 02d3f577012faa5078a564ff4b611446cdf9bbd7 | |
parent | dd0bb03c7baf0b0b964f5cec432dee1f9a633e0b (diff) |
Remove some obsolete TODO comments, fix indentation.
Test: Rely on TreeHugger.
Change-Id: I4e3c0ba13d576ef62121d47ebc4965f6667b624f
-rw-r--r-- | compiler/optimizing/code_generator.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 11 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 3 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 48 | ||||
-rw-r--r-- | compiler/optimizing/loop_analysis.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/loop_optimization.cc | 14 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 26 | ||||
-rw-r--r-- | compiler/optimizing/optimization.cc | 4 |
11 files changed, 56 insertions, 60 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 1fea30a359..404a42771f 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -627,8 +627,8 @@ void CodeGenerator::CreateUnresolvedFieldLocationSummary( locations->SetOut(calling_convention.GetReturnLocation(field_type)); } } else { - size_t set_index = is_instance ? 1 : 0; - if (DataType::IsFloatingPointType(field_type)) { + size_t set_index = is_instance ? 1 : 0; + if (DataType::IsFloatingPointType(field_type)) { // The set value comes from a float location while the calling convention // expects it in a regular register location. Allocate a temp for it and // make the transfer at codegen. diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index a4e4f46129..2f08858e98 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -5714,7 +5714,6 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD break; } - // TODO: Re-add the compiler code to do string dex cache lookup again. InvokeRuntimeCallingConvention calling_convention; DCHECK_EQ(calling_convention.GetRegisterAt(0).GetCode(), out.GetCode()); __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 597ac8a679..957f85aa21 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -116,7 +116,7 @@ const vixl::aarch64::CPURegList callee_saved_core_registers( vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize, (kReserveMarkingRegister ? vixl::aarch64::x21.GetCode() : vixl::aarch64::x20.GetCode()), - vixl::aarch64::x30.GetCode()); + vixl::aarch64::x30.GetCode()); const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister, vixl::aarch64::kDRegSize, vixl::aarch64::d8.GetCode(), diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 09f40c2996..8e121f42fc 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -7963,7 +7963,6 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE break; } - // TODO: Re-add the compiler code to do string dex cache lookup again. DCHECK_EQ(load->GetLoadKind(), HLoadString::LoadKind::kRuntimeCall); InvokeRuntimeCallingConventionARMVIXL calling_convention; __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 190361a5ce..811b33ec37 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3006,10 +3006,10 @@ void InstructionCodeGeneratorX86::VisitX86FPNeg(HX86FPNeg* neg) { constant_area)); __ xorps(out.AsFpuRegister<XmmRegister>(), mask); } else { - __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000), - neg->GetBaseMethodAddress(), - constant_area)); - __ xorpd(out.AsFpuRegister<XmmRegister>(), mask); + __ movsd(mask, codegen_->LiteralInt64Address(INT64_C(0x8000000000000000), + neg->GetBaseMethodAddress(), + constant_area)); + __ xorpd(out.AsFpuRegister<XmmRegister>(), mask); } } @@ -7522,7 +7522,6 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S break; } - // TODO: Re-add the compiler code to do string dex cache lookup again. InvokeRuntimeCallingConvention calling_convention; DCHECK_EQ(calling_convention.GetRegisterAt(0), out); __ movl(calling_convention.GetRegisterAt(0), Immediate(load->GetStringIndex().index_)); @@ -9077,7 +9076,7 @@ void CodeGeneratorX86::PatchJitRootUse(uint8_t* code, reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t; reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = - dchecked_integral_cast<uint32_t>(address); + dchecked_integral_cast<uint32_t>(address); } void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index df83089917..03c56023c1 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -6783,7 +6783,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA break; } - // TODO: Re-add the compiler code to do string dex cache lookup again. // Custom calling convention: RAX serves as both input and output. __ movl(CpuRegister(RAX), Immediate(load->GetStringIndex().index_)); codegen_->InvokeRuntime(kQuickResolveString, @@ -8268,7 +8267,7 @@ void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code, reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); using unaligned_uint32_t __attribute__((__aligned__(1))) = uint32_t; reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = - dchecked_integral_cast<uint32_t>(address); + dchecked_integral_cast<uint32_t>(address); } void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 99da84408a..842af6b73f 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -222,34 +222,34 @@ static void GenIsInfinite(LocationSummary* locations, double kPositiveInfinity = std::numeric_limits<double>::infinity(); double kNegativeInfinity = -1 * kPositiveInfinity; - __ xorq(output, output); - __ comisd(input, codegen->LiteralDoubleAddress(kPositiveInfinity)); - __ j(kNotEqual, &done1); - __ j(kParityEven, &done2); - __ movq(output, Immediate(1)); - __ jmp(&done2); - __ Bind(&done1); - __ comisd(input, codegen->LiteralDoubleAddress(kNegativeInfinity)); - __ j(kNotEqual, &done2); - __ j(kParityEven, &done2); - __ movq(output, Immediate(1)); - __ Bind(&done2); + __ xorq(output, output); + __ comisd(input, codegen->LiteralDoubleAddress(kPositiveInfinity)); + __ j(kNotEqual, &done1); + __ j(kParityEven, &done2); + __ movq(output, Immediate(1)); + __ jmp(&done2); + __ Bind(&done1); + __ comisd(input, codegen->LiteralDoubleAddress(kNegativeInfinity)); + __ j(kNotEqual, &done2); + __ j(kParityEven, &done2); + __ movq(output, Immediate(1)); + __ Bind(&done2); } else { float kPositiveInfinity = std::numeric_limits<float>::infinity(); float kNegativeInfinity = -1 * kPositiveInfinity; - __ xorl(output, output); - __ comiss(input, codegen->LiteralFloatAddress(kPositiveInfinity)); - __ j(kNotEqual, &done1); - __ j(kParityEven, &done2); - __ movl(output, Immediate(1)); - __ jmp(&done2); - __ Bind(&done1); - __ comiss(input, codegen->LiteralFloatAddress(kNegativeInfinity)); - __ j(kNotEqual, &done2); - __ j(kParityEven, &done2); - __ movl(output, Immediate(1)); - __ Bind(&done2); + __ xorl(output, output); + __ comiss(input, codegen->LiteralFloatAddress(kPositiveInfinity)); + __ j(kNotEqual, &done1); + __ j(kParityEven, &done2); + __ movl(output, Immediate(1)); + __ jmp(&done2); + __ Bind(&done1); + __ comiss(input, codegen->LiteralFloatAddress(kNegativeInfinity)); + __ j(kNotEqual, &done2); + __ j(kParityEven, &done2); + __ movl(output, Immediate(1)); + __ Bind(&done2); } } diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc index 5933d3d306..6163624a97 100644 --- a/compiler/optimizing/loop_analysis.cc +++ b/compiler/optimizing/loop_analysis.cc @@ -259,7 +259,7 @@ class X86_64LoopHelper : public ArchDefaultLoopHelper { case HInstruction::InstructionKind::kVecReplicateScalar: return 2; case HInstruction::InstructionKind::kVecExtractScalar: - return 1; + return 1; case HInstruction::InstructionKind::kVecReduce: return 4; case HInstruction::InstructionKind::kVecNeg: diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc index 0f8c95f703..f6d69ca789 100644 --- a/compiler/optimizing/loop_optimization.cc +++ b/compiler/optimizing/loop_optimization.cc @@ -418,7 +418,7 @@ static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) { ++it; if (true_succ->Dominates(user_block)) { user->ReplaceInput(graph->GetIntConstant(1), index); - } else if (false_succ->Dominates(user_block)) { + } else if (false_succ->Dominates(user_block)) { user->ReplaceInput(graph->GetIntConstant(0), index); } } @@ -1609,12 +1609,12 @@ void HLoopOptimization::GenerateNewLoopPredicated(LoopNode* node, 0u); HInstruction* cond = - new (global_allocator_) HVecPredToBoolean(global_allocator_, - pred_while, - HVecPredToBoolean::PCondKind::kNFirst, - DataType::Type::kInt32, - vector_length_, - 0u); + new (global_allocator_) HVecPredToBoolean(global_allocator_, + pred_while, + HVecPredToBoolean::PCondKind::kNFirst, + DataType::Type::kInt32, + vector_length_, + 0u); vector_header_->AddInstruction(pred_while); vector_header_->AddInstruction(cond); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index b173764f2f..1ad11d80a5 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2073,12 +2073,12 @@ class HEnvironment : public ArenaObject<kArenaAllocEnvironment> { ArtMethod* method, uint32_t dex_pc, HInstruction* holder) - : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)), - locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)), - parent_(nullptr), - method_(method), - dex_pc_(dex_pc), - holder_(holder) { + : vregs_(number_of_vregs, allocator->Adapter(kArenaAllocEnvironmentVRegs)), + locations_(allocator->Adapter(kArenaAllocEnvironmentLocations)), + parent_(nullptr), + method_(method), + dex_pc_(dex_pc), + holder_(holder) { } ALWAYS_INLINE HEnvironment(ArenaAllocator* allocator, @@ -2740,7 +2740,7 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { private: using InstructionKindField = - BitField<InstructionKind, kFieldInstructionKind, kFieldInstructionKindSize>; + BitField<InstructionKind, kFieldInstructionKind, kFieldInstructionKindSize>; void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) { auto before_use_node = uses_.before_begin(); @@ -6529,12 +6529,12 @@ class HArrayGet final : public HExpression<2> { HInstruction* index, DataType::Type type, uint32_t dex_pc) - : HArrayGet(array, - index, - type, - SideEffects::ArrayReadOfType(type), - dex_pc, - /* is_string_char_at= */ false) { + : HArrayGet(array, + index, + type, + SideEffects::ArrayReadOfType(type), + dex_pc, + /* is_string_char_at= */ false) { } HArrayGet(HInstruction* array, diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc index 12e9a1046d..4f20b55c7e 100644 --- a/compiler/optimizing/optimization.cc +++ b/compiler/optimizing/optimization.cc @@ -313,8 +313,8 @@ ArenaVector<HOptimization*> ConstructOptimizations( opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats); break; case OptimizationPass::kInstructionSimplifierX86: - opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats); - break; + opt = new (allocator) x86::InstructionSimplifierX86(graph, codegen, stats); + break; #endif #ifdef ART_ENABLE_CODEGEN_x86_64 case OptimizationPass::kInstructionSimplifierX86_64: |