diff options
author | 2018-12-26 15:12:03 -0800 | |
---|---|---|
committer | 2018-12-27 12:56:39 -0800 | |
commit | 3db70689e3e1c92344d436a8ea4265046bdef449 (patch) | |
tree | 3db08743e968062ed5bdc143233cdb3c4564696b /compiler/optimizing | |
parent | 1650dafad62578a1766bd617d78458a4cf1e2a9a (diff) |
ART: Refactor for bugprone-argument-comment
Handles compiler.
Bug: 116054210
Test: WITH_TIDY=1 mmma art
Change-Id: I5cdfe73c31ac39144838a2736146b71de037425e
Diffstat (limited to 'compiler/optimizing')
54 files changed, 901 insertions, 900 deletions
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc index d9df23fd47..d1ccbee5f5 100644 --- a/compiler/optimizing/block_builder.cc +++ b/compiler/optimizing/block_builder.cc @@ -415,7 +415,7 @@ void HBasicBlockBuilder::BuildIntrinsic() { // Create blocks. HBasicBlock* entry_block = new (allocator_) HBasicBlock(graph_, kNoDexPc); HBasicBlock* exit_block = new (allocator_) HBasicBlock(graph_, kNoDexPc); - HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc */ kNoDexPc, /* store_dex_pc */ 0u); + HBasicBlock* body = MaybeCreateBlockAt(/* semantic_dex_pc= */ kNoDexPc, /* store_dex_pc= */ 0u); // Add blocks to the graph. graph_->AddBlock(entry_block); diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index 1c3660c0a7..54a1ae9f9e 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -1634,7 +1634,7 @@ class BCEVisitor : public HGraphVisitor { HBasicBlock* block = GetPreHeader(loop, check); HInstruction* cond = new (GetGraph()->GetAllocator()) HEqual(array, GetGraph()->GetNullConstant()); - InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true); + InsertDeoptInLoop(loop, block, cond, /* is_null_check= */ true); ReplaceInstruction(check, array); return true; } diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc index e15161e093..5927d681b2 100644 --- a/compiler/optimizing/bounds_check_elimination_test.cc +++ b/compiler/optimizing/bounds_check_elimination_test.cc @@ -43,7 +43,7 @@ class BoundsCheckEliminationTest : public OptimizingUnitTest { void RunBCE() { graph_->BuildDominatorTree(); - InstructionSimplifier(graph_, /* codegen */ nullptr).Run(); + InstructionSimplifier(graph_, /* codegen= */ nullptr).Run(); SideEffectsAnalysis side_effects(graph_); side_effects.Run(); diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 2184f99d76..04e0cc4bdd 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -414,7 +414,7 @@ void CodeGenerator::Compile(CodeAllocator* allocator) { // This ensures that we have correct native line mapping for all native instructions. // It is necessary to make stepping over a statement work. Otherwise, any initial // instructions (e.g. moves) would be assumed to be the start of next statement. - MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc()); + MaybeRecordNativeDebugInfo(/* instruction= */ nullptr, block->GetDexPc()); for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { HInstruction* current = it.Current(); if (current->HasEnvironment()) { @@ -1085,7 +1085,7 @@ void CodeGenerator::RecordPcInfo(HInstruction* instruction, // call). Therefore register_mask contains both callee-save and caller-save // registers that hold objects. We must remove the spilled caller-save from the // mask, since they will be overwritten by the callee. - uint32_t spills = GetSlowPathSpills(locations, /* core_registers */ true); + uint32_t spills = GetSlowPathSpills(locations, /* core_registers= */ true); register_mask &= ~spills; } else { // The register mask must be a subset of callee-save registers. @@ -1164,7 +1164,7 @@ void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, // Ensure that we do not collide with the stack map of the previous instruction. GenerateNop(); } - RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info */ true); + RecordPcInfo(instruction, dex_pc, slow_path, /* native_debug_info= */ true); } } @@ -1182,8 +1182,8 @@ void CodeGenerator::RecordCatchBlockInfo() { stack_map_stream->BeginStackMapEntry(dex_pc, native_pc, - /* register_mask */ 0, - /* stack_mask */ nullptr, + /* register_mask= */ 0, + /* sp_mask= */ nullptr, StackMap::Kind::Catch); HInstruction* current_phi = block->GetFirstPhi(); @@ -1555,7 +1555,7 @@ void CodeGenerator::ValidateInvokeRuntimeWithoutRecordingPcInfo(HInstruction* in void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); for (uint32_t i : LowToHighBits(core_spills)) { // If the register holds an object, update the stack mask. if (locations->RegisterContainsObject(i)) { @@ -1567,7 +1567,7 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo stack_offset += codegen->SaveCoreRegister(stack_offset, i); } - const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); @@ -1579,14 +1579,14 @@ void SlowPathCode::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* lo void SlowPathCode::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); for (uint32_t i : LowToHighBits(core_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); stack_offset += codegen->RestoreCoreRegister(stack_offset, i); } - const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 9e2fd9ef84..ff99a3eff2 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -168,8 +168,8 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen, LocationSummary* locations, int64_t spill_offset, bool is_save) { - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); - const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills, codegen->GetNumberOfCoreRegisters(), fp_spills, @@ -212,7 +212,7 @@ static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen, void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); for (uint32_t i : LowToHighBits(core_spills)) { // If the register holds an object, update the stack mask. if (locations->RegisterContainsObject(i)) { @@ -224,7 +224,7 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar stack_offset += kXRegSizeInBytes; } - const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); @@ -234,13 +234,13 @@ void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummar SaveRestoreLiveRegistersHelper(codegen, locations, - codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */); + codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ true); } void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) { SaveRestoreLiveRegistersHelper(codegen, locations, - codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */); + codegen->GetFirstRegisterSlotInSlowPath(), /* is_save= */ false); } class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { @@ -926,7 +926,7 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { uint32_t encoded_data = entry.first; vixl::aarch64::Label* slow_path_entry = &entry.second.label; __ Bind(slow_path_entry); - CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr); + CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr); } // Ensure we emit the literal pool. @@ -1118,7 +1118,7 @@ void CodeGeneratorARM64::GenerateFrameEntry() { } } - MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void CodeGeneratorARM64::GenerateFrameExit() { @@ -1888,7 +1888,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, base, offset, maybe_temp, - /* needs_null_check */ true, + /* needs_null_check= */ true, field_info.IsVolatile()); } else { // General case. @@ -1897,7 +1897,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, // CodeGeneratorARM64::LoadAcquire call. // NB: LoadAcquire will record the pc info if needed. codegen_->LoadAcquire( - instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true); + instruction, OutputCPURegister(instruction), field, /* needs_null_check= */ true); } else { // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted. EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); @@ -1952,7 +1952,7 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction, if (field_info.IsVolatile()) { codegen_->StoreRelease( - instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check */ true); + instruction, field_type, source, HeapOperand(obj, offset), /* needs_null_check= */ true); } else { // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted. EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes); @@ -2376,11 +2376,11 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { obj.W(), offset, maybe_temp, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); } else { codegen_->GenerateArrayLoadWithBakerReadBarrier( - instruction, out, obj.W(), offset, index, /* needs_null_check */ false); + instruction, out, obj.W(), offset, index, /* needs_null_check= */ false); } } else { // General case. @@ -2925,7 +2925,7 @@ void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperati int64_t magic; int shift; CalculateMagicAndShiftForDivRem( - imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift); + imm, /* is_long= */ type == DataType::Type::kInt64, &magic, &shift); UseScratchRegisterScope temps(GetVIXLAssembler()); Register temp = temps.AcquireSameSizeAs(out); @@ -3116,7 +3116,7 @@ void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* s } if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } if (!codegen_->GoesToNextBlock(block, successor)) { __ B(codegen_->GetLabelOf(successor)); @@ -3266,7 +3266,7 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { if (codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor)) { false_target = nullptr; } - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -3285,9 +3285,9 @@ void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCodeARM64* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize); GenerateTestAndBranch(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } void LocationsBuilderARM64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { @@ -3627,7 +3627,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -3659,7 +3659,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) { // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -3952,7 +3952,7 @@ void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) { @@ -4022,7 +4022,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invok codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { @@ -4201,7 +4201,7 @@ void LocationsBuilderARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { void InstructionCodeGeneratorARM64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { codegen_->GenerateInvokePolymorphicCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) { @@ -4210,21 +4210,21 @@ void LocationsBuilderARM64::VisitInvokeCustom(HInvokeCustom* invoke) { void InstructionCodeGeneratorARM64::VisitInvokeCustom(HInvokeCustom* invoke) { codegen_->GenerateInvokeCustomCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageIntrinsicPatch( uint32_t intrinsic_data, vixl::aarch64::Label* adrp_label) { return NewPcRelativePatch( - /* dex_file */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_); + /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_); } vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch( uint32_t boot_image_offset, vixl::aarch64::Label* adrp_label) { return NewPcRelativePatch( - /* dex_file */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_); + /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_); } vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch( @@ -4308,7 +4308,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), - [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); }); + [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); }); } vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral( @@ -4316,7 +4316,7 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), - [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); }); + [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); }); } void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, @@ -4513,7 +4513,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); if (TryGenerateIntrinsicCode(invoke, codegen_)) { - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); return; } @@ -4526,12 +4526,12 @@ void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDir invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { if (TryGenerateIntrinsicCode(invoke, codegen_)) { - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); return; } @@ -4543,7 +4543,7 @@ void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) { DCHECK(!codegen_->IsLeafMethod()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind( @@ -4611,7 +4611,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA HLoadClass::LoadKind load_kind = cls->GetLoadKind(); if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { codegen_->GenerateLoadClassRuntimeCall(cls); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); return; } DCHECK(!cls->NeedsAccessCheck()); @@ -4633,7 +4633,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value(), - /* fixup_label */ nullptr, + /* fixup_label= */ nullptr, read_barrier_option); break; } @@ -4696,8 +4696,8 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA codegen_->GenerateGcRootFieldLoad(cls, out_loc, out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, + /* offset= */ 0, + /* fixup_label= */ nullptr, read_barrier_option); break; } @@ -4721,7 +4721,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA } else { __ Bind(slow_path->GetExitLabel()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } } @@ -4859,7 +4859,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD codegen_->AddSlowPath(slow_path); __ Cbz(out.X(), slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); return; } case HLoadString::LoadKind::kJitBootImageAddress: { @@ -4875,8 +4875,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD codegen_->GenerateGcRootFieldLoad(load, out_loc, out.X(), - /* offset */ 0, - /* fixup_label */ nullptr, + /* offset= */ 0, + /* fixup_label= */ nullptr, kCompilerReadBarrierOption); return; } @@ -4890,7 +4890,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD __ Mov(calling_convention.GetRegisterAt(0).W(), load->GetStringIndex().index_); codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc()); CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>(); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) { @@ -4918,7 +4918,7 @@ void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* ins } else { CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>(); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitMul(HMul* mul) { @@ -5013,7 +5013,7 @@ void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) { QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction); codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>(); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { @@ -5027,7 +5027,7 @@ void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) { void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitNot(HNot* instruction) { @@ -5502,7 +5502,7 @@ void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction return; } GenerateSuspendCheck(instruction, nullptr); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void LocationsBuilderARM64::VisitThrow(HThrow* instruction) { @@ -5715,8 +5715,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister( out_reg, offset, maybe_temp, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -5756,8 +5756,8 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( obj_reg, offset, maybe_temp, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -5842,7 +5842,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( // Note that GC roots are not affected by heap poisoning, thus we // do not have to unpoison `root_reg` here. } - MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__); + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__); } void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier( @@ -5931,7 +5931,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins } __ bind(&return_address); } - MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1)); + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1)); } void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -6039,7 +6039,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instru } __ bind(&return_address); } - MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1)); + MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__, /* temp_loc= */ LocationFrom(ip1)); } void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index dad18134de..8204f1eecb 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -319,7 +319,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); size_t orig_offset = stack_offset; - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); for (uint32_t i : LowToHighBits(core_spills)) { // If the register holds an object, update the stack mask. if (locations->RegisterContainsObject(i)) { @@ -334,7 +334,7 @@ void SlowPathCodeARMVIXL::SaveLiveRegisters(CodeGenerator* codegen, LocationSumm CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset); - uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); orig_offset = stack_offset; for (uint32_t i : LowToHighBits(fp_spills)) { DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); @@ -357,7 +357,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath(); size_t orig_offset = stack_offset; - const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); + const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ true); for (uint32_t i : LowToHighBits(core_spills)) { DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize()); DCHECK_LT(i, kMaximumNumberOfExpectedRegisters); @@ -368,7 +368,7 @@ void SlowPathCodeARMVIXL::RestoreLiveRegisters(CodeGenerator* codegen, LocationS CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset); - uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); + uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false); while (fp_spills != 0u) { uint32_t begin = CTZ(fp_spills); uint32_t tmp = fp_spills + (1u << begin); @@ -1539,7 +1539,7 @@ static void GenerateConditionGeneric(HCondition* cond, CodeGeneratorARMVIXL* cod vixl32::Label done_label; vixl32::Label* const final_label = codegen->GetFinalLabel(cond, &done_label); - __ B(condition.second, final_label, /* far_target */ false); + __ B(condition.second, final_label, /* is_far_target= */ false); __ Mov(out, 1); if (done_label.IsReferenced()) { @@ -1934,7 +1934,7 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) { uint32_t encoded_data = entry.first; vixl::aarch32::Label* slow_path_entry = &entry.second.label; __ Bind(slow_path_entry); - CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr); + CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name= */ nullptr); } GetAssembler()->FinalizeCode(); @@ -2159,7 +2159,7 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() { GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag()); } - MaybeGenerateMarkingRegisterCheck(/* code */ 1); + MaybeGenerateMarkingRegisterCheck(/* code= */ 1); } void CodeGeneratorARMVIXL::GenerateFrameExit() { @@ -2427,7 +2427,7 @@ void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* } if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) { GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 2); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 2); } if (!codegen_->GoesToNextBlock(block, successor)) { __ B(codegen_->GetLabelOf(successor)); @@ -2606,7 +2606,7 @@ void InstructionCodeGeneratorARMVIXL::VisitIf(HIf* if_instr) { nullptr : codegen_->GetLabelOf(true_successor); vixl32::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -2625,9 +2625,9 @@ void InstructionCodeGeneratorARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCodeARMVIXL* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARMVIXL>(deoptimize); GenerateTestAndBranch(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } void LocationsBuilderARMVIXL::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { @@ -2793,7 +2793,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSelect(HSelect* select) { } } - GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target */ false); + GenerateTestAndBranch(select, 2, true_target, false_target, /* far_target= */ false); codegen_->MoveLocation(out, src, type); if (output_overlaps_with_condition_inputs) { __ B(target); @@ -3135,7 +3135,7 @@ void LocationsBuilderARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { void InstructionCodeGeneratorARMVIXL::VisitInvokeUnresolved(HInvokeUnresolved* invoke) { codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 3); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 3); } void LocationsBuilderARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { @@ -3166,7 +3166,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD DCHECK(!invoke->IsStaticWithExplicitClinitCheck()); if (TryGenerateIntrinsicCode(invoke, codegen_)) { - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 4); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 4); return; } @@ -3174,7 +3174,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeStaticOrDirect(HInvokeStaticOrD codegen_->GenerateStaticOrDirectCall( invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation()); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 5); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 5); } void LocationsBuilderARMVIXL::HandleInvoke(HInvoke* invoke) { @@ -3193,14 +3193,14 @@ void LocationsBuilderARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) { void InstructionCodeGeneratorARMVIXL::VisitInvokeVirtual(HInvokeVirtual* invoke) { if (TryGenerateIntrinsicCode(invoke, codegen_)) { - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 6); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 6); return; } codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0)); DCHECK(!codegen_->IsLeafMethod()); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 7); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 7); } void LocationsBuilderARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) { @@ -3278,7 +3278,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* inv DCHECK(!codegen_->IsLeafMethod()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 8); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 8); } void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { @@ -3287,7 +3287,7 @@ void LocationsBuilderARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) void InstructionCodeGeneratorARMVIXL::VisitInvokePolymorphic(HInvokePolymorphic* invoke) { codegen_->GenerateInvokePolymorphicCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 9); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 9); } void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) { @@ -3296,7 +3296,7 @@ void LocationsBuilderARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) { void InstructionCodeGeneratorARMVIXL::VisitInvokeCustom(HInvokeCustom* invoke) { codegen_->GenerateInvokeCustomCall(invoke); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 10); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 10); } void LocationsBuilderARMVIXL::VisitNeg(HNeg* neg) { @@ -4013,7 +4013,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateDivRemWithAnyConstant(HBinaryOpera int64_t magic; int shift; - CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); // TODO(VIXL): Change the static cast to Operand::From() after VIXL is fixed. __ Mov(temp1, static_cast<int32_t>(magic)); @@ -4421,7 +4421,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax, __ Vcmp(op1, op2); __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); - __ B(vs, &nan, /* far_target */ false); // if un-ordered, go to NaN handling. + __ B(vs, &nan, /* is_far_target= */ false); // if un-ordered, go to NaN handling. // op1 <> op2 vixl32::ConditionType cond = is_min ? gt : lt; @@ -4433,7 +4433,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxFloat(HInstruction* minmax, __ vmov(cond, F32, out, op2); } // for <>(not equal), we've done min/max calculation. - __ B(ne, final_label, /* far_target */ false); + __ B(ne, final_label, /* is_far_target= */ false); // handle op1 == op2, max(+0.0,-0.0), min(+0.0,-0.0). __ Vmov(temp1, op1); @@ -4478,7 +4478,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax, __ Vcmp(op1, op2); __ Vmrs(RegisterOrAPSR_nzcv(kPcCode), FPSCR); - __ B(vs, &handle_nan_eq, /* far_target */ false); // if un-ordered, go to NaN handling. + __ B(vs, &handle_nan_eq, /* is_far_target= */ false); // if un-ordered, go to NaN handling. // op1 <> op2 vixl32::ConditionType cond = is_min ? gt : lt; @@ -4490,7 +4490,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateMinMaxDouble(HInstruction* minmax, __ vmov(cond, F64, out, op2); } // for <>(not equal), we've done min/max calculation. - __ B(ne, final_label, /* far_target */ false); + __ B(ne, final_label, /* is_far_target= */ false); // handle op1 == op2, max(+0.0,-0.0). if (!is_min) { @@ -4714,7 +4714,7 @@ void InstructionCodeGeneratorARMVIXL::HandleLongRotate(HRor* ror) { __ And(shift_right, RegisterFrom(rhs), 0x1F); __ Lsrs(shift_left, RegisterFrom(rhs), 6); __ Rsb(LeaveFlags, shift_left, shift_right, Operand::From(kArmBitsPerWord)); - __ B(cc, &shift_by_32_plus_shift_right, /* far_target */ false); + __ B(cc, &shift_by_32_plus_shift_right, /* is_far_target= */ false); // out_reg_hi = (reg_hi << shift_left) | (reg_lo >> shift_right). // out_reg_lo = (reg_lo << shift_left) | (reg_hi >> shift_right). @@ -5030,7 +5030,7 @@ void LocationsBuilderARMVIXL::VisitNewInstance(HNewInstance* instruction) { void InstructionCodeGeneratorARMVIXL::VisitNewInstance(HNewInstance* instruction) { codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 11); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 11); } void LocationsBuilderARMVIXL::VisitNewArray(HNewArray* instruction) { @@ -5048,7 +5048,7 @@ void InstructionCodeGeneratorARMVIXL::VisitNewArray(HNewArray* instruction) { codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc()); CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>(); DCHECK(!codegen_->IsLeafMethod()); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 12); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 12); } void LocationsBuilderARMVIXL::VisitParameterValue(HParameterValue* instruction) { @@ -5170,8 +5170,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { } case DataType::Type::kInt64: { __ Cmp(HighRegisterFrom(left), HighRegisterFrom(right)); // Signed compare. - __ B(lt, &less, /* far_target */ false); - __ B(gt, &greater, /* far_target */ false); + __ B(lt, &less, /* is_far_target= */ false); + __ B(gt, &greater, /* is_far_target= */ false); // Emit move to `out` before the last `Cmp`, as `Mov` might affect the status flags. __ Mov(out, 0); __ Cmp(LowRegisterFrom(left), LowRegisterFrom(right)); // Unsigned compare. @@ -5192,8 +5192,8 @@ void InstructionCodeGeneratorARMVIXL::VisitCompare(HCompare* compare) { UNREACHABLE(); } - __ B(eq, final_label, /* far_target */ false); - __ B(less_cond, &less, /* far_target */ false); + __ B(eq, final_label, /* is_far_target= */ false); + __ B(less_cond, &less, /* is_far_target= */ false); __ Bind(&greater); __ Mov(out, 1); @@ -5608,7 +5608,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, // Note that a potential implicit null check is handled in this // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, base, offset, maybe_temp, /* needs_null_check */ true); + instruction, out, base, offset, maybe_temp, /* needs_null_check= */ true); if (is_volatile) { codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -5964,7 +5964,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not. static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u, "Expecting 0=compressed, 1=uncompressed"); - __ B(cs, &uncompressed_load, /* far_target */ false); + __ B(cs, &uncompressed_load, /* is_far_target= */ false); GetAssembler()->LoadFromOffset(kLoadUnsignedByte, RegisterFrom(out_loc), obj, @@ -6006,7 +6006,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { __ Lsrs(length, length, 1u); // LSRS has a 16-bit encoding, TST (immediate) does not. static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u, "Expecting 0=compressed, 1=uncompressed"); - __ B(cs, &uncompressed_load, /* far_target */ false); + __ B(cs, &uncompressed_load, /* is_far_target= */ false); __ Ldrb(RegisterFrom(out_loc), MemOperand(temp, RegisterFrom(index), vixl32::LSL, 0)); __ B(final_label); __ Bind(&uncompressed_load); @@ -6046,11 +6046,11 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { obj, data_offset, maybe_temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { Location temp = locations->GetTemp(0); codegen_->GenerateArrayLoadWithBakerReadBarrier( - out_loc, obj, data_offset, index, temp, /* needs_null_check */ false); + out_loc, obj, data_offset, index, temp, /* needs_null_check= */ false); } } else { vixl32::Register out = OutputRegister(instruction); @@ -6325,7 +6325,7 @@ void InstructionCodeGeneratorARMVIXL::VisitArraySet(HArraySet* instruction) { if (instruction->StaticTypeOfArrayIsObjectArray()) { vixl32::Label do_put; - __ B(eq, &do_put, /* far_target */ false); + __ B(eq, &do_put, /* is_far_target= */ false); // If heap poisoning is enabled, the `temp1` reference has // not been unpoisoned yet; unpoison it now. GetAssembler()->MaybeUnpoisonHeapReference(temp1); @@ -6627,7 +6627,7 @@ void InstructionCodeGeneratorARMVIXL::VisitSuspendCheck(HSuspendCheck* instructi return; } GenerateSuspendCheck(instruction, nullptr); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 13); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 13); } void InstructionCodeGeneratorARMVIXL::GenerateSuspendCheck(HSuspendCheck* instruction, @@ -6975,7 +6975,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ HLoadClass::LoadKind load_kind = cls->GetLoadKind(); if (load_kind == HLoadClass::LoadKind::kRuntimeCall) { codegen_->GenerateLoadClassRuntimeCall(cls); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 14); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 14); return; } DCHECK(!cls->NeedsAccessCheck()); @@ -7014,14 +7014,14 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(cls)); codegen_->EmitMovwMovtPlaceholder(labels, out); - __ Ldr(out, MemOperand(out, /* offset */ 0)); + __ Ldr(out, MemOperand(out, /* offset= */ 0)); break; } case HLoadClass::LoadKind::kBssEntry: { CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex()); codegen_->EmitMovwMovtPlaceholder(labels, out); - codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); + codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option); generate_null_check = true; break; } @@ -7037,7 +7037,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ cls->GetTypeIndex(), cls->GetClass())); // /* GcRoot<mirror::Class> */ out = *out - codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option); + codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option); break; } case HLoadClass::LoadKind::kRuntimeCall: @@ -7059,7 +7059,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ } else { __ Bind(slow_path->GetExitLabel()); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 15); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 15); } } @@ -7240,7 +7240,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = codegen_->NewBootImageRelRoPatch(codegen_->GetBootImageOffset(load)); codegen_->EmitMovwMovtPlaceholder(labels, out); - __ Ldr(out, MemOperand(out, /* offset */ 0)); + __ Ldr(out, MemOperand(out, /* offset= */ 0)); return; } case HLoadString::LoadKind::kBssEntry: { @@ -7249,13 +7249,13 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex()); codegen_->EmitMovwMovtPlaceholder(labels, out); codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); + load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); __ CompareAndBranchIfZero(out, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 16); return; } case HLoadString::LoadKind::kJitBootImageAddress: { @@ -7270,7 +7270,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE load->GetString())); // /* GcRoot<mirror::String> */ out = *out codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); + load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption); return; } default: @@ -7283,7 +7283,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE __ Mov(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_); codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc()); CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>(); - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 17); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 17); } static int32_t GetExceptionTlsOffset() { @@ -7415,7 +7415,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) if (instruction->MustDoNullCheck()) { DCHECK(!out.Is(obj)); __ Mov(out, 0); - __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false); + __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false); } switch (type_check_kind) { @@ -7447,7 +7447,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) __ it(eq); __ mov(eq, out, 1); } else { - __ B(ne, final_label, /* far_target */ false); + __ B(ne, final_label, /* is_far_target= */ false); __ Mov(out, 1); } @@ -7475,9 +7475,9 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) maybe_temp_loc, read_barrier_option); // If `out` is null, we use it for the result, and jump to the final label. - __ CompareAndBranchIfZero(out, final_label, /* far_target */ false); + __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false); __ Cmp(out, cls); - __ B(ne, &loop, /* far_target */ false); + __ B(ne, &loop, /* is_far_target= */ false); __ Mov(out, 1); break; } @@ -7496,7 +7496,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) vixl32::Label loop, success; __ Bind(&loop); __ Cmp(out, cls); - __ B(eq, &success, /* far_target */ false); + __ B(eq, &success, /* is_far_target= */ false); // /* HeapReference<Class> */ out = out->super_class_ GenerateReferenceLoadOneRegister(instruction, out_loc, @@ -7506,7 +7506,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) // This is essentially a null check, but it sets the condition flags to the // proper value for the code that follows the loop, i.e. not `eq`. __ Cmp(out, 1); - __ B(hs, &loop, /* far_target */ false); + __ B(hs, &loop, /* is_far_target= */ false); // Since IT blocks longer than a 16-bit instruction are deprecated by ARMv8, // we check that the output is in a low register, so that a 16-bit MOV @@ -7551,7 +7551,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) // Do an exact check. vixl32::Label exact_check; __ Cmp(out, cls); - __ B(eq, &exact_check, /* far_target */ false); + __ B(eq, &exact_check, /* is_far_target= */ false); // Otherwise, we need to check that the object's class is a non-primitive array. // /* HeapReference<Class> */ out = out->component_type_ GenerateReferenceLoadOneRegister(instruction, @@ -7560,7 +7560,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) maybe_temp_loc, read_barrier_option); // If `out` is null, we use it for the result, and jump to the final label. - __ CompareAndBranchIfZero(out, final_label, /* far_target */ false); + __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false); GetAssembler()->LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset); static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot"); __ Cmp(out, 0); @@ -7582,7 +7582,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) __ it(eq); __ mov(eq, out, 1); } else { - __ B(ne, final_label, /* far_target */ false); + __ B(ne, final_label, /* is_far_target= */ false); __ Bind(&exact_check); __ Mov(out, 1); } @@ -7602,7 +7602,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) __ Cmp(out, cls); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ B(ne, slow_path->GetEntryLabel()); __ Mov(out, 1); @@ -7631,7 +7631,7 @@ void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -7716,7 +7716,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done); // Avoid null check if we know obj is not null. if (instruction->MustDoNullCheck()) { - __ CompareAndBranchIfZero(obj, final_label, /* far_target */ false); + __ CompareAndBranchIfZero(obj, final_label, /* is_far_target= */ false); } switch (type_check_kind) { @@ -7763,7 +7763,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { // Otherwise, compare the classes. __ Cmp(temp, cls); - __ B(ne, &loop, /* far_target */ false); + __ B(ne, &loop, /* is_far_target= */ false); break; } @@ -7780,7 +7780,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { vixl32::Label loop; __ Bind(&loop); __ Cmp(temp, cls); - __ B(eq, final_label, /* far_target */ false); + __ B(eq, final_label, /* is_far_target= */ false); // /* HeapReference<Class> */ temp = temp->super_class_ GenerateReferenceLoadOneRegister(instruction, @@ -7808,7 +7808,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { // Do an exact check. __ Cmp(temp, cls); - __ B(eq, final_label, /* far_target */ false); + __ B(eq, final_label, /* is_far_target= */ false); // Otherwise, we need to check that the object's class is a non-primitive array. // /* HeapReference<Class> */ temp = temp->component_type_ @@ -7872,7 +7872,7 @@ void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) { __ Sub(RegisterFrom(maybe_temp2_loc), RegisterFrom(maybe_temp2_loc), 2); // Compare the classes and continue the loop if they do not match. __ Cmp(cls, RegisterFrom(maybe_temp3_loc)); - __ B(ne, &start_loop, /* far_target */ false); + __ B(ne, &start_loop, /* is_far_target= */ false); break; } @@ -7913,7 +7913,7 @@ void InstructionCodeGeneratorARMVIXL::VisitMonitorOperation(HMonitorOperation* i } else { CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>(); } - codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 18); + codegen_->MaybeGenerateMarkingRegisterCheck(/* code= */ 18); } void LocationsBuilderARMVIXL::VisitAnd(HAnd* instruction) { @@ -8268,7 +8268,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, out_reg, offset, maybe_temp, /* needs_null_check */ false); + instruction, out, out_reg, offset, maybe_temp, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -8303,7 +8303,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check */ false); + instruction, out, obj_reg, offset, maybe_temp, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -8384,7 +8384,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( // Note that GC roots are not affected by heap poisoning, thus we // do not have to unpoison `root_reg` here. } - MaybeGenerateMarkingRegisterCheck(/* code */ 19); + MaybeGenerateMarkingRegisterCheck(/* code= */ 19); } void CodeGeneratorARMVIXL::GenerateUnsafeCasOldValueAddWithBakerReadBarrier( @@ -8484,7 +8484,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i narrow ? BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET : BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET); } - MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip)); + MaybeGenerateMarkingRegisterCheck(/* code= */ 20, /* temp_loc= */ LocationFrom(ip)); } void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, @@ -8572,7 +8572,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(), BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET); } - MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip)); + MaybeGenerateMarkingRegisterCheck(/* code= */ 21, /* temp_loc= */ LocationFrom(ip)); } void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { @@ -8815,12 +8815,12 @@ void CodeGeneratorARMVIXL::GenerateVirtualCall( CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch( uint32_t intrinsic_data) { - return NewPcRelativePatch(/* dex_file */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_); + return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_); } CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch( uint32_t boot_image_offset) { - return NewPcRelativePatch(/* dex_file */ nullptr, + return NewPcRelativePatch(/* dex_file= */ nullptr, boot_image_offset, &boot_image_method_patches_); } @@ -8891,7 +8891,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral( return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), [this]() { - return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); + return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); }); } @@ -8902,7 +8902,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitClassLiteral(const DexFil return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), [this]() { - return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); + return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ 0u); }); } @@ -8916,7 +8916,7 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg, CodeGeneratorARMVIXL::PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_reference); EmitMovwMovtPlaceholder(labels, reg); - __ Ldr(reg, MemOperand(reg, /* offset */ 0)); + __ Ldr(reg, MemOperand(reg, /* offset= */ 0)); } else { DCHECK(Runtime::Current()->UseJitCompilation()); gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -9061,7 +9061,7 @@ VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateUint32Literal( return map->GetOrCreate( value, [this, value]() { - return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ value); + return GetAssembler()->CreateLiteralDestroyedWithPool<uint32_t>(/* value= */ value); }); } @@ -9288,9 +9288,9 @@ void CodeGeneratorARMVIXL::EmitMovwMovtPlaceholder( CodeBufferCheckScope::kMaximumSize); // TODO(VIXL): Think about using mov instead of movw. __ bind(&labels->movw_label); - __ movw(out, /* placeholder */ 0u); + __ movw(out, /* operand= */ 0u); __ bind(&labels->movt_label); - __ movt(out, /* placeholder */ 0u); + __ movt(out, /* operand= */ 0u); __ bind(&labels->add_pc_label); __ add(out, out, pc); } @@ -9313,7 +9313,7 @@ static void EmitGrayCheckAndFastPath(ArmVIXLAssembler& assembler, static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0"); static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); __ Tst(ip, Operand(LockWord::kReadBarrierStateMaskShifted)); - __ B(ne, slow_path, /* is_far_target */ false); + __ B(ne, slow_path, /* is_far_target= */ false); // To throw NPE, we return to the fast path; the artificial dependence below does not matter. if (throw_npe != nullptr) { __ Bind(throw_npe); @@ -9360,7 +9360,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb vixl32::Label* throw_npe = nullptr; if (GetCompilerOptions().GetImplicitNullChecks() && holder_reg.Is(base_reg)) { throw_npe = &throw_npe_label; - __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target */ false); + __ CompareAndBranchIfZero(holder_reg, throw_npe, /* is_far_target= */ false); } // Check if the holder is gray and, if not, add fake dependency to the base register // and return to the LDR instruction to load the reference. Otherwise, use introspection @@ -9437,7 +9437,7 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb UseScratchRegisterScope temps(assembler.GetVIXLAssembler()); temps.Exclude(ip); vixl32::Label return_label, not_marked, forwarding_address; - __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target */ false); + __ CompareAndBranchIfZero(root_reg, &return_label, /* is_far_target= */ false); MemOperand lock_word(root_reg, mirror::Object::MonitorOffset().Int32Value()); __ Ldr(ip, lock_word); __ Tst(ip, LockWord::kMarkBitStateMaskShifted); diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index c536dd3db5..f7f37db26a 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -587,7 +587,7 @@ class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS { mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this, - /* direct */ false); + /* direct= */ false); } __ B(GetExitLabel()); } @@ -681,7 +681,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS { mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this, - /* direct */ false); + /* direct= */ false); // If the new reference is different from the old reference, // update the field in the holder (`*(obj_ + field_offset_)`). @@ -1167,9 +1167,9 @@ void ParallelMoveResolverMIPS::EmitSwap(size_t index) { __ Move(r2_l, TMP); __ Move(r2_h, AT); } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) { - Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ false); + Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false); } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) { - Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot */ true); + Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true); } else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) { ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex()); } else if ((loc1.IsRegister() && loc2.IsStackSlot()) || @@ -1654,14 +1654,14 @@ CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageIntrinsic uint32_t intrinsic_data, const PcRelativePatchInfo* info_high) { return NewPcRelativePatch( - /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_); + /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_); } CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch( uint32_t boot_image_offset, const PcRelativePatchInfo* info_high) { return NewPcRelativePatch( - /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_); + /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_); } CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch( @@ -1737,7 +1737,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo __ Bind(&info_high->label); __ Bind(&info_high->pc_rel_label); // Add the high half of a 32-bit offset to PC. - __ Auipc(out, /* placeholder */ 0x1234); + __ Auipc(out, /* imm16= */ 0x1234); __ SetReorder(reordering); } else { // If base is ZERO, emit NAL to obtain the actual base. @@ -1746,7 +1746,7 @@ void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo __ Nal(); } __ Bind(&info_high->label); - __ Lui(out, /* placeholder */ 0x1234); + __ Lui(out, /* imm16= */ 0x1234); // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler. if (base == ZERO) { @@ -1764,13 +1764,13 @@ void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_r if (GetCompilerOptions().IsBootImage()) { PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference); PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high); - EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO); - __ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label); + EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO); + __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label); } else if (GetCompilerOptions().GetCompilePic()) { PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference); PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high); - EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO); - __ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label); + EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO); + __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label); } else { DCHECK(Runtime::Current()->UseJitCompilation()); gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -1793,8 +1793,8 @@ void CodeGeneratorMIPS::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invo PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx); PcRelativePatchInfo* info_low = NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high); - EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base */ ZERO); - __ Addiu(argument, argument, /* placeholder */ 0x5678, &info_low->label); + EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO); + __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label); } else { LoadBootImageAddress(argument, boot_image_offset); } @@ -2579,7 +2579,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { __ Or(dst_high, dst_high, TMP); __ Andi(TMP, rhs_reg, kMipsBitsPerWord); if (isR6) { - __ Beqzc(TMP, &done, /* is_bare */ true); + __ Beqzc(TMP, &done, /* is_bare= */ true); __ Move(dst_high, dst_low); __ Move(dst_low, ZERO); } else { @@ -2595,7 +2595,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { __ Or(dst_low, dst_low, TMP); __ Andi(TMP, rhs_reg, kMipsBitsPerWord); if (isR6) { - __ Beqzc(TMP, &done, /* is_bare */ true); + __ Beqzc(TMP, &done, /* is_bare= */ true); __ Move(dst_low, dst_high); __ Sra(dst_high, dst_high, 31); } else { @@ -2612,7 +2612,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { __ Or(dst_low, dst_low, TMP); __ Andi(TMP, rhs_reg, kMipsBitsPerWord); if (isR6) { - __ Beqzc(TMP, &done, /* is_bare */ true); + __ Beqzc(TMP, &done, /* is_bare= */ true); __ Move(dst_low, dst_high); __ Move(dst_high, ZERO); } else { @@ -2631,7 +2631,7 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { __ Or(dst_high, dst_high, TMP); __ Andi(TMP, rhs_reg, kMipsBitsPerWord); if (isR6) { - __ Beqzc(TMP, &done, /* is_bare */ true); + __ Beqzc(TMP, &done, /* is_bare= */ true); __ Move(TMP, dst_high); __ Move(dst_high, dst_low); __ Move(dst_low, TMP); @@ -2862,7 +2862,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { obj, offset, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction, out_loc, @@ -2870,7 +2870,7 @@ void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) { data_offset, index, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } } else { Register out = out_loc.AsRegister<Register>(); @@ -4104,7 +4104,7 @@ void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperatio int64_t magic; int shift; - CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift); bool isR6 = codegen_->GetInstructionSetFeatures().IsR6(); @@ -5948,7 +5948,7 @@ void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) { nullptr : codegen_->GetLabelOf(true_successor); MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -5967,9 +5967,9 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCodeMIPS* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize); GenerateTestAndBranch(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } // This function returns true if a conditional move can be generated for HSelect. @@ -5983,7 +5983,7 @@ void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) { // of common logic. static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) { bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition()); - HInstruction* cond = select->InputAt(/* condition_input_index */ 2); + HInstruction* cond = select->InputAt(/* i= */ 2); HCondition* condition = cond->AsCondition(); DataType::Type cond_type = @@ -6216,7 +6216,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { Location src = locations->InAt(1); Register src_reg = ZERO; Register src_reg_high = ZERO; - HInstruction* cond = select->InputAt(/* condition_input_index */ 2); + HInstruction* cond = select->InputAt(/* i= */ 2); Register cond_reg = TMP; int cond_cc = 0; DataType::Type cond_type = DataType::Type::kInt32; @@ -6224,7 +6224,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) { DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { - cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>(); + cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>(); } else { HCondition* condition = cond->AsCondition(); LocationSummary* cond_locations = cond->GetLocations(); @@ -6337,7 +6337,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { Location dst = locations->Out(); Location false_src = locations->InAt(0); Location true_src = locations->InAt(1); - HInstruction* cond = select->InputAt(/* condition_input_index */ 2); + HInstruction* cond = select->InputAt(/* i= */ 2); Register cond_reg = TMP; FRegister fcond_reg = FTMP; DataType::Type cond_type = DataType::Type::kInt32; @@ -6345,7 +6345,7 @@ void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) { DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { - cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<Register>(); + cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>(); } else { HCondition* condition = cond->AsCondition(); LocationSummary* cond_locations = cond->GetLocations(); @@ -6526,7 +6526,7 @@ void LocationsBuilderMIPS::VisitSelect(HSelect* select) { void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) { bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6(); - if (CanMoveConditionally(select, is_r6, /* locations_to_set */ nullptr)) { + if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) { if (is_r6) { GenConditionalMoveR6(select); } else { @@ -6536,8 +6536,8 @@ void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) { LocationSummary* locations = select->GetLocations(); MipsLabel false_target; GenerateTestAndBranch(select, - /* condition_input_index */ 2, - /* true_target */ nullptr, + /* condition_input_index= */ 2, + /* true_target= */ nullptr, &false_target); codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType()); __ Bind(&false_target); @@ -6696,7 +6696,7 @@ void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction, obj, offset, temp_loc, - /* needs_null_check */ true); + /* needs_null_check= */ true); if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -6929,7 +6929,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister( out_reg, offset, maybe_temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -6970,7 +6970,7 @@ void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters( obj_reg, offset, maybe_temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -7061,7 +7061,7 @@ void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruc __ AddUpper(base, obj, offset_high); } MipsLabel skip_call; - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); if (label_low != nullptr) { DCHECK(short_offset); __ Bind(label_low); @@ -7216,11 +7216,11 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst MipsLabel skip_call; if (short_offset) { if (isR6) { - __ Beqzc(T9, &skip_call, /* is_bare */ true); + __ Beqzc(T9, &skip_call, /* is_bare= */ true); __ Nop(); // In forbidden slot. __ Jialc(T9, thunk_disp); } else { - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Addiu(T9, T9, thunk_disp); // In delay slot. __ Jalr(T9); __ Nop(); // In delay slot. @@ -7228,13 +7228,13 @@ void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* inst __ Bind(&skip_call); } else { if (isR6) { - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Aui(base, obj, offset_high); // In delay slot. __ Jialc(T9, thunk_disp); __ Bind(&skip_call); } else { __ Lui(base, offset_high); - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Addiu(T9, T9, thunk_disp); // In delay slot. __ Jalr(T9); __ Bind(&skip_call); @@ -7311,7 +7311,7 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst // We will not do the explicit null check in the thunk as some form of a null check // must've been done earlier. DCHECK(!needs_null_check); - const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false); + const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false); // Loading the entrypoint does not require a load acquire since it is only changed when // threads are suspended or running a checkpoint. __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset); @@ -7321,13 +7321,13 @@ void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* inst : index.AsRegister<Register>(); MipsLabel skip_call; if (GetInstructionSetFeatures().IsR6()) { - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Lsa(TMP, index_reg, obj, scale_factor); // In delay slot. __ Jialc(T9, thunk_disp); __ Bind(&skip_call); } else { __ Sll(TMP, index_reg, scale_factor); - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Addiu(T9, T9, thunk_disp); // In delay slot. __ Jalr(T9); __ Bind(&skip_call); @@ -7442,7 +7442,7 @@ void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction, ref, obj, - /* field_offset */ index, + /* field_offset= */ index, temp_reg); } else { slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref); @@ -7705,7 +7705,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -7734,7 +7734,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) { // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ B(slow_path->GetEntryLabel()); break; @@ -8001,7 +8001,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall( NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high); Register temp_reg = temp.AsRegister<Register>(); EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg); - __ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label); + __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label); break; } case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { @@ -8010,7 +8010,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall( PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high); Register temp_reg = temp.AsRegister<Register>(); EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg); - __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label); + __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label); break; } case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { @@ -8020,7 +8020,7 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall( MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high); Register temp_reg = temp.AsRegister<Register>(); EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg); - __ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label); + __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label); break; } case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress: @@ -8226,7 +8226,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out, base_or_current_method_reg); - __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label); + __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label); break; } case HLoadClass::LoadKind::kBootImageRelRo: { @@ -8239,7 +8239,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out, base_or_current_method_reg); - __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label); + __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label); break; } case HLoadClass::LoadKind::kBssEntry: { @@ -8253,7 +8253,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF GenerateGcRootFieldLoad(cls, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, read_barrier_option, &info_low->label); generate_null_check = true; @@ -8278,12 +8278,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF cls->GetClass()); bool reordering = __ SetReorder(false); __ Bind(&info->high_label); - __ Lui(out, /* placeholder */ 0x1234); + __ Lui(out, /* imm16= */ 0x1234); __ SetReorder(reordering); GenerateGcRootFieldLoad(cls, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, read_barrier_option, &info->low_label); break; @@ -8432,7 +8432,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out, base_or_current_method_reg); - __ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label); + __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label); return; } case HLoadString::LoadKind::kBootImageRelRo: { @@ -8445,7 +8445,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out, base_or_current_method_reg); - __ Lw(out, out, /* placeholder */ 0x5678, &info_low->label); + __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label); return; } case HLoadString::LoadKind::kBssEntry: { @@ -8460,7 +8460,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ GenerateGcRootFieldLoad(load, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS* slow_path = @@ -8489,12 +8489,12 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_ load->GetString()); bool reordering = __ SetReorder(false); __ Bind(&info->high_label); - __ Lui(out, /* placeholder */ 0x1234); + __ Lui(out, /* imm16= */ 0x1234); __ SetReorder(reordering); GenerateGcRootFieldLoad(load, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, kCompilerReadBarrierOption, &info->low_label); return; diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 016aac791c..8b6328f097 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -953,7 +953,7 @@ CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph, : CodeGenerator(graph, kNumberOfGpuRegisters, kNumberOfFpuRegisters, - /* number_of_register_pairs */ 0, + /* number_of_register_pairs= */ 0, ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves), arraysize(kCoreCalleeSaves)), ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves), @@ -1581,14 +1581,14 @@ CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageIntri uint32_t intrinsic_data, const PcRelativePatchInfo* info_high) { return NewPcRelativePatch( - /* dex_file */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_); + /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_); } CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch( uint32_t boot_image_offset, const PcRelativePatchInfo* info_high) { return NewPcRelativePatch( - /* dex_file */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_); + /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_); } CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch( @@ -1665,7 +1665,7 @@ void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchIn DCHECK(!info_high->patch_info_high); __ Bind(&info_high->label); // Add the high half of a 32-bit offset to PC. - __ Auipc(out, /* placeholder */ 0x1234); + __ Auipc(out, /* imm16= */ 0x1234); // A following instruction will add the sign-extended low half of the 32-bit // offset to `out` (e.g. ld, jialc, daddiu). if (info_low != nullptr) { @@ -1679,13 +1679,13 @@ void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_im PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference); PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Daddiu(reg, AT, /* placeholder */ 0x5678); + __ Daddiu(reg, AT, /* imm16= */ 0x5678); } else if (GetCompilerOptions().GetCompilePic()) { PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference); PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load. - __ Lwu(reg, AT, /* placeholder */ 0x5678); + __ Lwu(reg, AT, /* imm16= */ 0x5678); } else { DCHECK(Runtime::Current()->UseJitCompilation()); gc::Heap* heap = Runtime::Current()->GetHeap(); @@ -1710,7 +1710,7 @@ void CodeGeneratorMIPS64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in PcRelativePatchInfo* info_low = NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Daddiu(argument, AT, /* placeholder */ 0x5678); + __ Daddiu(argument, AT, /* imm16= */ 0x5678); } else { LoadBootImageAddress(argument, boot_image_offset); } @@ -1724,7 +1724,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_fil ReserveJitStringRoot(StringReference(&dex_file, string_index), handle); return jit_string_patches_.GetOrCreate( StringReference(&dex_file, string_index), - [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); + [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); }); } Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file, @@ -1733,7 +1733,7 @@ Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle); return jit_class_patches_.GetOrCreate( TypeReference(&dex_file, type_index), - [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); + [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); }); } void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code, @@ -2458,7 +2458,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { obj, offset, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction, out_loc, @@ -2466,7 +2466,7 @@ void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) { data_offset, index, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } } else { GpuRegister out = out_loc.AsRegister<GpuRegister>(); @@ -3337,10 +3337,10 @@ void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) { switch (type) { default: // Integer case. - GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ false, locations); + GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations); return; case DataType::Type::kInt64: - GenerateIntLongCompare(instruction->GetCondition(), /* is64bit */ true, locations); + GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations); return; case DataType::Type::kFloat32: case DataType::Type::kFloat64: @@ -4449,10 +4449,10 @@ void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruc switch (type) { default: - GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ false, locations, branch_target); + GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target); break; case DataType::Type::kInt64: - GenerateIntLongCompareAndBranch(if_cond, /* is64bit */ true, locations, branch_target); + GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target); break; case DataType::Type::kFloat32: case DataType::Type::kFloat64: @@ -4482,7 +4482,7 @@ void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) { nullptr : codegen_->GetLabelOf(true_successor); Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -4501,9 +4501,9 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCodeMIPS64* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize); GenerateTestAndBranch(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } // This function returns true if a conditional move can be generated for HSelect. @@ -4517,7 +4517,7 @@ void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) { // of common logic. static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) { bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition()); - HInstruction* cond = select->InputAt(/* condition_input_index */ 2); + HInstruction* cond = select->InputAt(/* i= */ 2); HCondition* condition = cond->AsCondition(); DataType::Type cond_type = @@ -4660,7 +4660,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { Location dst = locations->Out(); Location false_src = locations->InAt(0); Location true_src = locations->InAt(1); - HInstruction* cond = select->InputAt(/* condition_input_index */ 2); + HInstruction* cond = select->InputAt(/* i= */ 2); GpuRegister cond_reg = TMP; FpuRegister fcond_reg = FTMP; DataType::Type cond_type = DataType::Type::kInt32; @@ -4668,7 +4668,7 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { DataType::Type dst_type = select->GetType(); if (IsBooleanValueOrMaterializedCondition(cond)) { - cond_reg = locations->InAt(/* condition_input_index */ 2).AsRegister<GpuRegister>(); + cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>(); } else { HCondition* condition = cond->AsCondition(); LocationSummary* cond_locations = cond->GetLocations(); @@ -4677,13 +4677,13 @@ void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) { switch (cond_type) { default: cond_inverted = MaterializeIntLongCompare(if_cond, - /* is64bit */ false, + /* is64bit= */ false, cond_locations, cond_reg); break; case DataType::Type::kInt64: cond_inverted = MaterializeIntLongCompare(if_cond, - /* is64bit */ true, + /* is64bit= */ true, cond_locations, cond_reg); break; @@ -4826,14 +4826,14 @@ void LocationsBuilderMIPS64::VisitSelect(HSelect* select) { } void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) { - if (CanMoveConditionally(select, /* locations_to_set */ nullptr)) { + if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) { GenConditionalMove(select); } else { LocationSummary* locations = select->GetLocations(); Mips64Label false_target; GenerateTestAndBranch(select, - /* condition_input_index */ 2, - /* true_target */ nullptr, + /* condition_input_index= */ 2, + /* true_target= */ nullptr, &false_target); codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType()); __ Bind(&false_target); @@ -4945,7 +4945,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction, obj, offset, temp_loc, - /* needs_null_check */ true); + /* needs_null_check= */ true); if (is_volatile) { GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -5101,7 +5101,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister( out_reg, offset, maybe_temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -5142,7 +5142,7 @@ void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters( obj_reg, offset, maybe_temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -5230,7 +5230,7 @@ void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instr __ Daui(base, obj, offset_high); } Mips64Label skip_call; - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); if (label_low != nullptr) { DCHECK(short_offset); __ Bind(label_low); @@ -5360,7 +5360,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in GpuRegister ref_reg = ref.AsRegister<GpuRegister>(); Mips64Label skip_call; if (short_offset) { - __ Beqzc(T9, &skip_call, /* is_bare */ true); + __ Beqzc(T9, &skip_call, /* is_bare= */ true); __ Nop(); // In forbidden slot. __ Jialc(T9, thunk_disp); __ Bind(&skip_call); @@ -5369,7 +5369,7 @@ void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in } else { int16_t offset_low = Low16Bits(offset); int16_t offset_high = High16Bits(offset - offset_low); // Accounts for sign extension in lwu. - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); __ Daui(TMP, obj, offset_high); // In delay slot. __ Jialc(T9, thunk_disp); __ Bind(&skip_call); @@ -5442,12 +5442,12 @@ void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in // We will not do the explicit null check in the thunk as some form of a null check // must've been done earlier. DCHECK(!needs_null_check); - const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset */ false); + const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false); // Loading the entrypoint does not require a load acquire since it is only changed when // threads are suspended or running a checkpoint. __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset); Mips64Label skip_call; - __ Beqz(T9, &skip_call, /* is_bare */ true); + __ Beqz(T9, &skip_call, /* is_bare= */ true); GpuRegister ref_reg = ref.AsRegister<GpuRegister>(); GpuRegister index_reg = index.AsRegister<GpuRegister>(); __ Dlsa(TMP, index_reg, obj, scale_factor); // In delay slot. @@ -5558,7 +5558,7 @@ void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction, ref, obj, - /* field_offset */ index, + /* field_offset= */ index, temp_reg); } else { slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref); @@ -5821,7 +5821,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { kWithoutReadBarrier); DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel()); __ LoadConst32(out, 1); @@ -5850,7 +5850,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) { // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ Bc(slow_path->GetEntryLabel()); break; @@ -6092,7 +6092,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall( CodeGeneratorMIPS64::PcRelativePatchInfo* info_low = NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678); + __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678); break; } case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { @@ -6101,7 +6101,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall( PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load. - __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678); + __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678); break; } case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { @@ -6110,7 +6110,7 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall( PcRelativePatchInfo* info_low = NewMethodBssEntryPatch( MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high); EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678); + __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678); break; } case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress: @@ -6280,7 +6280,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S CodeGeneratorMIPS64::PcRelativePatchInfo* info_low = codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high); codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Daddiu(out, AT, /* placeholder */ 0x5678); + __ Daddiu(out, AT, /* imm16= */ 0x5678); break; } case HLoadClass::LoadKind::kBootImageRelRo: { @@ -6291,7 +6291,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S CodeGeneratorMIPS64::PcRelativePatchInfo* info_low = codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high); codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Lwu(out, AT, /* placeholder */ 0x5678); + __ Lwu(out, AT, /* imm16= */ 0x5678); break; } case HLoadClass::LoadKind::kBssEntry: { @@ -6303,7 +6303,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S GenerateGcRootFieldLoad(cls, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, read_barrier_option, &info_low->label); generate_null_check = true; @@ -6427,7 +6427,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA CodeGeneratorMIPS64::PcRelativePatchInfo* info_low = codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high); codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Daddiu(out, AT, /* placeholder */ 0x5678); + __ Daddiu(out, AT, /* imm16= */ 0x5678); return; } case HLoadString::LoadKind::kBootImageRelRo: { @@ -6438,7 +6438,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA CodeGeneratorMIPS64::PcRelativePatchInfo* info_low = codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high); codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low); - __ Lwu(out, AT, /* placeholder */ 0x5678); + __ Lwu(out, AT, /* imm16= */ 0x5678); return; } case HLoadString::LoadKind::kBssEntry: { @@ -6451,7 +6451,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA GenerateGcRootFieldLoad(load, out_loc, out, - /* placeholder */ 0x5678, + /* offset= */ 0x5678, kCompilerReadBarrierOption, &info_low->label); SlowPathCodeMIPS64* slow_path = diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc index 09e96cc1f4..4e9ba0d3d2 100644 --- a/compiler/optimizing/code_generator_vector_mips.cc +++ b/compiler/optimizing/code_generator_vector_mips.cc @@ -74,19 +74,19 @@ void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar* __ InsertW(static_cast<VectorRegister>(FTMP), locations->InAt(0).AsRegisterPairHigh<Register>(), 1); - __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double */ true); + __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true); break; case DataType::Type::kFloat32: DCHECK_EQ(4u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FRegister>(), - /* is_double */ false); + /* is_double= */ false); break; case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FRegister>(), - /* is_double */ true); + /* is_double= */ true); break; default: LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); @@ -1344,7 +1344,7 @@ int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations, } void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true); } void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { @@ -1387,7 +1387,7 @@ void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false); } void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc index b6873b1a67..6467d3e27f 100644 --- a/compiler/optimizing/code_generator_vector_mips64.cc +++ b/compiler/optimizing/code_generator_vector_mips64.cc @@ -79,13 +79,13 @@ void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar DCHECK_EQ(4u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FpuRegister>(), - /* is_double */ false); + /* is_double= */ false); break; case DataType::Type::kFloat64: DCHECK_EQ(2u, instruction->GetVectorLength()); __ ReplicateFPToVectorRegister(dst, locations->InAt(0).AsFpuRegister<FpuRegister>(), - /* is_double */ true); + /* is_double= */ true); break; default: LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType(); @@ -1342,7 +1342,7 @@ int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations, } void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) { - CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ true); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true); } void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { @@ -1385,7 +1385,7 @@ void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) { } void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) { - CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load */ false); + CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false); } void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 1b74d22a81..766ff78fa4 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1720,7 +1720,7 @@ void InstructionCodeGeneratorX86::VisitIf(HIf* if_instr) { nullptr : codegen_->GetLabelOf(true_successor); Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -1738,9 +1738,9 @@ void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) { void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86>(deoptimize); GenerateTestAndBranch<Label>(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } void LocationsBuilderX86::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { @@ -1863,7 +1863,7 @@ void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) { } else { NearLabel false_target; GenerateTestAndBranch<NearLabel>( - select, /* condition_input_index */ 2, /* true_target */ nullptr, &false_target); + select, /* condition_input_index= */ 2, /* true_target= */ nullptr, &false_target); codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType()); __ Bind(&false_target); } @@ -3434,8 +3434,8 @@ void InstructionCodeGeneratorX86::GenerateRemFP(HRem *rem) { // Load the values to the FP stack in reverse order, using temporaries if needed. const bool is_wide = !is_float; - PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp */ true, is_wide); - PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide); + PushOntoFPStack(second, elem_size, 2 * elem_size, /* is_fp= */ true, is_wide); + PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp= */ true, is_wide); // Loop doing FPREM until we stabilize. NearLabel retry; @@ -3572,7 +3572,7 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation int64_t magic; int shift; - CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); // Save the numerator. __ movl(num, eax); @@ -4801,7 +4801,7 @@ void CodeGeneratorX86::GenerateMemoryBarrier(MemBarrierKind kind) { } case MemBarrierKind::kNTStoreStore: // Non-Temporal Store/Store needs an explicit fence. - MemoryFence(/* non-temporal */ true); + MemoryFence(/* non-temporal= */ true); break; } } @@ -4936,14 +4936,14 @@ void CodeGeneratorX86::GenerateVirtualCall( void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address, uint32_t intrinsic_data) { boot_image_intrinsic_patches_.emplace_back( - method_address, /* target_dex_file */ nullptr, intrinsic_data); + method_address, /* target_dex_file= */ nullptr, intrinsic_data); __ Bind(&boot_image_intrinsic_patches_.back().label); } void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address, uint32_t boot_image_offset) { boot_image_method_patches_.emplace_back( - method_address, /* target_dex_file */ nullptr, boot_image_offset); + method_address, /* target_dex_file= */ nullptr, boot_image_offset); __ Bind(&boot_image_method_patches_.back().label); } @@ -5237,7 +5237,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, base, offset, /* needs_null_check */ true); + instruction, out, base, offset, /* needs_null_check= */ true); if (is_volatile) { codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -5720,7 +5720,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( - instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true); + instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true); } else { Register out = out_loc.AsRegister<Register>(); __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset)); @@ -6582,7 +6582,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE cls, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()), - /* fixup_label */ nullptr, + /* fixup_label= */ nullptr, read_barrier_option); break; } @@ -7109,7 +7109,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { } DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -7141,7 +7141,7 @@ void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) { // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -7650,7 +7650,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, out_reg, offset, /* needs_null_check */ false); + instruction, out, out_reg, offset, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -7684,7 +7684,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, obj_reg, offset, /* needs_null_check */ false); + instruction, out, obj_reg, offset, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -7733,7 +7733,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86( - instruction, root, /* unpoison_ref_before_marking */ false); + instruction, root, /* unpoison_ref_before_marking= */ false); codegen_->AddSlowPath(slow_path); // Test the entrypoint (`Thread::Current()->pReadBarrierMarkReg ## root.reg()`). @@ -7863,10 +7863,10 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i if (always_update_field) { DCHECK(temp != nullptr); slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86( - instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp); + instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp); } else { slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86( - instruction, ref, /* unpoison_ref_before_marking */ true); + instruction, ref, /* unpoison_ref_before_marking= */ true); } AddSlowPath(slow_path); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 781f2724a5..67a2aa561b 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -992,7 +992,7 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall( // temp = thread->string_init_entrypoint uint32_t offset = GetThreadOffset<kX86_64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value(); - __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip */ true)); + __ gs()->movq(temp.AsRegister<CpuRegister>(), Address::Absolute(offset, /* no_rip= */ true)); break; } case HInvokeStaticOrDirect::MethodLoadKind::kRecursive: @@ -1001,19 +1001,19 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall( case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: DCHECK(GetCompilerOptions().IsBootImage()); __ leal(temp.AsRegister<CpuRegister>(), - Address::Absolute(kDummy32BitOffset, /* no_rip */ false)); + Address::Absolute(kDummy32BitOffset, /* no_rip= */ false)); RecordBootImageMethodPatch(invoke); break; case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: { // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load. __ movl(temp.AsRegister<CpuRegister>(), - Address::Absolute(kDummy32BitOffset, /* no_rip */ false)); + Address::Absolute(kDummy32BitOffset, /* no_rip= */ false)); RecordBootImageRelRoPatch(GetBootImageOffset(invoke)); break; } case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: { __ movq(temp.AsRegister<CpuRegister>(), - Address::Absolute(kDummy32BitOffset, /* no_rip */ false)); + Address::Absolute(kDummy32BitOffset, /* no_rip= */ false)); RecordMethodBssEntryPatch(invoke); break; } @@ -1076,12 +1076,12 @@ void CodeGeneratorX86_64::GenerateVirtualCall( } void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) { - boot_image_intrinsic_patches_.emplace_back(/* target_dex_file */ nullptr, intrinsic_data); + boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data); __ Bind(&boot_image_intrinsic_patches_.back().label); } void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) { - boot_image_method_patches_.emplace_back(/* target_dex_file */ nullptr, boot_image_offset); + boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset); __ Bind(&boot_image_method_patches_.back().label); } @@ -1123,10 +1123,10 @@ Label* CodeGeneratorX86_64::NewStringBssEntryPatch(HLoadString* load_string) { void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_image_reference) { if (GetCompilerOptions().IsBootImage()) { - __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); RecordBootImageIntrinsicPatch(boot_image_reference); } else if (GetCompilerOptions().GetCompilePic()) { - __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); RecordBootImageRelRoPatch(boot_image_reference); } else { DCHECK(Runtime::Current()->UseJitCompilation()); @@ -1146,7 +1146,7 @@ void CodeGeneratorX86_64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* in DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference); // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative. __ leal(argument, - Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); MethodReference target_method = invoke->GetTargetMethod(); dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_; boot_image_type_patches_.emplace_back(target_method.dex_file, type_idx.index_); @@ -1277,7 +1277,7 @@ void CodeGeneratorX86_64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_poin } void CodeGeneratorX86_64::GenerateInvokeRuntime(int32_t entry_point_offset) { - __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip */ true)); + __ gs()->call(Address::Absolute(entry_point_offset, /* no_rip= */ true)); } static constexpr int kNumberOfCpuRegisterPairs = 0; @@ -1799,7 +1799,7 @@ void InstructionCodeGeneratorX86_64::VisitIf(HIf* if_instr) { nullptr : codegen_->GetLabelOf(true_successor); Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ? nullptr : codegen_->GetLabelOf(false_successor); - GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target); + GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target); } void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { @@ -1817,9 +1817,9 @@ void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { SlowPathCode* slow_path = deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathX86_64>(deoptimize); GenerateTestAndBranch<Label>(deoptimize, - /* condition_input_index */ 0, + /* condition_input_index= */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target= */ nullptr); } void LocationsBuilderX86_64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) { @@ -1922,8 +1922,8 @@ void InstructionCodeGeneratorX86_64::VisitSelect(HSelect* select) { } else { NearLabel false_target; GenerateTestAndBranch<NearLabel>(select, - /* condition_input_index */ 2, - /* true_target */ nullptr, + /* condition_input_index= */ 2, + /* true_target= */ nullptr, &false_target); codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType()); __ Bind(&false_target); @@ -3679,7 +3679,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat if (instruction->GetResultType() == DataType::Type::kInt32) { int imm = second.GetConstant()->AsIntConstant()->GetValue(); - CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift); __ movl(numerator, eax); @@ -3716,7 +3716,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat CpuRegister rax = eax; CpuRegister rdx = edx; - CalculateMagicAndShiftForDivRem(imm, true /* is_long */, &magic, &shift); + CalculateMagicAndShiftForDivRem(imm, true /* is_long= */, &magic, &shift); // Save the numerator. __ movq(numerator, rax); @@ -4554,7 +4554,7 @@ void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) { } case MemBarrierKind::kNTStoreStore: // Non-Temporal Store/Store needs an explicit fence. - MemoryFence(/* non-temporal */ true); + MemoryFence(/* non-temporal= */ true); break; } } @@ -4631,7 +4631,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, base, offset, /* needs_null_check */ true); + instruction, out, base, offset, /* needs_null_check= */ true); if (is_volatile) { codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); } @@ -5086,7 +5086,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( - instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true); + instruction, out_loc, obj, data_offset, index, /* needs_null_check= */ true); } else { CpuRegister out = out_loc.AsRegister<CpuRegister>(); __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset)); @@ -5486,7 +5486,7 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp, } // Load the address of the card table into `card`. __ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(), - /* no_rip */ true)); + /* no_rip= */ true)); // Calculate the offset (in the card table) of the card corresponding to // `object`. __ movq(temp, object); @@ -5566,7 +5566,7 @@ void InstructionCodeGeneratorX86_64::GenerateSuspendCheck(HSuspendCheck* instruc } __ gs()->cmpw(Address::Absolute(Thread::ThreadFlagsOffset<kX86_64PointerSize>().Int32Value(), - /* no_rip */ true), + /* no_rip= */ true), Immediate(0)); if (successor == nullptr) { __ j(kNotEqual, slow_path->GetEntryLabel()); @@ -5948,25 +5948,25 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S cls, out_loc, Address(current_method, ArtMethod::DeclaringClassOffset().Int32Value()), - /* fixup_label */ nullptr, + /* fixup_label= */ nullptr, read_barrier_option); break; } case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: DCHECK(codegen_->GetCompilerOptions().IsBootImage()); DCHECK_EQ(read_barrier_option, kWithoutReadBarrier); - __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); codegen_->RecordBootImageTypePatch(cls); break; case HLoadClass::LoadKind::kBootImageRelRo: { DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); - __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(cls)); break; } case HLoadClass::LoadKind::kBssEntry: { Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ false); + /* no_rip= */ false); Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls); // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */ GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option); @@ -5982,7 +5982,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S } case HLoadClass::LoadKind::kJitTableAddress: { Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ true); + /* no_rip= */ true); Label* fixup_label = codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetClass()); // /* GcRoot<mirror::Class> */ out = *address @@ -6107,19 +6107,19 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA switch (load->GetLoadKind()) { case HLoadString::LoadKind::kBootImageLinkTimePcRelative: { DCHECK(codegen_->GetCompilerOptions().IsBootImage()); - __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); codegen_->RecordBootImageStringPatch(load); return; } case HLoadString::LoadKind::kBootImageRelRo: { DCHECK(!codegen_->GetCompilerOptions().IsBootImage()); - __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false)); + __ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false)); codegen_->RecordBootImageRelRoPatch(codegen_->GetBootImageOffset(load)); return; } case HLoadString::LoadKind::kBssEntry: { Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ false); + /* no_rip= */ false); Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); @@ -6138,7 +6138,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA } case HLoadString::LoadKind::kJitTableAddress: { Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ true); + /* no_rip= */ true); Label* fixup_label = codegen_->NewJitRootStringPatch( load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address @@ -6160,7 +6160,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA static Address GetExceptionTlsAddress() { return Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>().Int32Value(), - /* no_rip */ true); + /* no_rip= */ true); } void LocationsBuilderX86_64::VisitLoadException(HLoadException* load) { @@ -6435,7 +6435,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { } DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ j(kNotEqual, slow_path->GetEntryLabel()); __ movl(out, Immediate(1)); @@ -6467,7 +6467,7 @@ void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) { // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64( - instruction, /* is_fatal */ false); + instruction, /* is_fatal= */ false); codegen_->AddSlowPath(slow_path); __ jmp(slow_path->GetEntryLabel()); if (zero.IsLinked()) { @@ -6954,7 +6954,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, out_reg, offset, /* needs_null_check */ false); + instruction, out, out_reg, offset, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // Save the value of `out` into `maybe_temp` before overwriting it @@ -6988,7 +6988,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters( // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) codegen_->GenerateFieldLoadWithBakerReadBarrier( - instruction, out, obj_reg, offset, /* needs_null_check */ false); + instruction, out, obj_reg, offset, /* needs_null_check= */ false); } else { // Load with slow path based read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -7037,13 +7037,13 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad( // Slow path marking the GC root `root`. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64( - instruction, root, /* unpoison_ref_before_marking */ false); + instruction, root, /* unpoison_ref_before_marking= */ false); codegen_->AddSlowPath(slow_path); // Test the `Thread::Current()->pReadBarrierMarkReg ## root.reg()` entrypoint. const int32_t entry_point_offset = Thread::ReadBarrierMarkEntryPointsOffset<kX86_64PointerSize>(root.reg()); - __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip */ true), Immediate(0)); + __ gs()->cmpl(Address::Absolute(entry_point_offset, /* no_rip= */ true), Immediate(0)); // The entrypoint is null when the GC is not marking. __ j(kNotEqual, slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); @@ -7169,10 +7169,10 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction DCHECK(temp1 != nullptr); DCHECK(temp2 != nullptr); slow_path = new (GetScopedAllocator()) ReadBarrierMarkAndUpdateFieldSlowPathX86_64( - instruction, ref, obj, src, /* unpoison_ref_before_marking */ true, *temp1, *temp2); + instruction, ref, obj, src, /* unpoison_ref_before_marking= */ true, *temp1, *temp2); } else { slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathX86_64( - instruction, ref, /* unpoison_ref_before_marking */ true); + instruction, ref, /* unpoison_ref_before_marking= */ true); } AddSlowPath(slow_path); diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index d6c97552dc..f406983fc2 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -180,7 +180,7 @@ static HInstruction* FindIdealPosition(HInstruction* instruction, DCHECK(!instruction->IsPhi()); // Makes no sense for Phi. // Find the target block. - CommonDominator finder(/* start_block */ nullptr); + CommonDominator finder(/* block= */ nullptr); for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) { HInstruction* user = use.GetUser(); if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) { @@ -259,12 +259,12 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { size_t number_of_instructions = graph_->GetCurrentInstructionId(); ScopedArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc)); - ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false); + ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable= */ false); processed_instructions.ClearAllBits(); - ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false); + ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable= */ false); post_dominated.ClearAllBits(); ArenaBitVector instructions_that_can_move( - &allocator, number_of_instructions, /* expandable */ false); + &allocator, number_of_instructions, /* expandable= */ false); instructions_that_can_move.ClearAllBits(); ScopedArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc)); @@ -414,7 +414,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { } // Find the position of the instruction we're storing into, filtering out this // store and all other stores to that instruction. - position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true); + position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter= */ true); // The position needs to be dominated by the store, in order for the store to move there. if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) { @@ -434,7 +434,7 @@ void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) { continue; } MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSunk); - instruction->MoveBefore(position, /* ensure_safety */ false); + instruction->MoveBefore(position, /* do_checks= */ false); } } diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc index b1436f863c..74d9d3a993 100644 --- a/compiler/optimizing/constant_folding_test.cc +++ b/compiler/optimizing/constant_folding_test.cc @@ -70,7 +70,7 @@ class ConstantFoldingTest : public OptimizingUnitTest { check_after_cf(graph_); - HDeadCodeElimination(graph_, nullptr /* stats */, "dead_code_elimination").Run(); + HDeadCodeElimination(graph_, /* stats= */ nullptr, "dead_code_elimination").Run(); GraphChecker graph_checker_dce(graph_); graph_checker_dce.Run(); ASSERT_TRUE(graph_checker_dce.IsValid()); diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc index 277453545a..f5cd4dc27a 100644 --- a/compiler/optimizing/dead_code_elimination_test.cc +++ b/compiler/optimizing/dead_code_elimination_test.cc @@ -43,7 +43,7 @@ void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data, std::string actual_before = printer_before.str(); ASSERT_EQ(actual_before, expected_before); - HDeadCodeElimination(graph, nullptr /* stats */, "dead_code_elimination").Run(); + HDeadCodeElimination(graph, /* stats= */ nullptr, "dead_code_elimination").Run(); GraphChecker graph_checker(graph); graph_checker.Run(); ASSERT_TRUE(graph_checker.IsValid()); diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index a689f35e0f..01d9603802 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -635,8 +635,8 @@ void GraphChecker::HandleTypeCheckInstruction(HTypeCheckInstruction* check) { } } CheckTypeCheckBitstringInput( - check, /* input_pos */ 2, check_values, expected_path_to_root, "path_to_root"); - CheckTypeCheckBitstringInput(check, /* input_pos */ 3, check_values, expected_mask, "mask"); + check, /* input_pos= */ 2, check_values, expected_path_to_root, "path_to_root"); + CheckTypeCheckBitstringInput(check, /* input_pos= */ 3, check_values, expected_mask, "mask"); } else { if (!input->IsLoadClass()) { AddError(StringPrintf("%s:%d (classic) expects a HLoadClass as second input, not %s:%d.", @@ -931,7 +931,7 @@ void GraphChecker::VisitPhi(HPhi* phi) { // because the BitVector reallocation strategy has very bad worst-case behavior. ArenaBitVector visited(&allocator, GetGraph()->GetCurrentInstructionId(), - /* expandable */ false, + /* expandable= */ false, kArenaAllocGraphChecker); visited.ClearAllBits(); if (!IsConstantEquivalent(phi, other_phi, &visited)) { diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc index a1af2be9de..07966207f7 100644 --- a/compiler/optimizing/graph_visualizer.cc +++ b/compiler/optimizing/graph_visualizer.cc @@ -130,10 +130,10 @@ class HGraphVisualizerDisassembler { // been generated, so we can read data in literal pools. disassembler_ = std::unique_ptr<Disassembler>((*create_disassembler)( instruction_set, - new DisassemblerOptions(/* absolute_addresses */ false, + new DisassemblerOptions(/* absolute_addresses= */ false, base_address, end_address, - /* can_read_literals */ true, + /* can_read_literals= */ true, Is64BitInstructionSet(instruction_set) ? &Thread::DumpThreadOffset<PointerSize::k64> : &Thread::DumpThreadOffset<PointerSize::k32>))); @@ -924,8 +924,8 @@ void HGraphVisualizer::DumpGraphWithDisassembly() const { HGraphVisualizerPrinter printer(graph_, *output_, "disassembly", - /* is_after_pass */ true, - /* graph_in_bad_state */ false, + /* is_after_pass= */ true, + /* graph_in_bad_state= */ false, codegen_, codegen_.GetDisassemblyInformation()); printer.Run(); diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index e6b6326726..3689d1d232 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -348,7 +348,7 @@ class GlobalValueNumberer : public ValueObject { side_effects_(side_effects), sets_(graph->GetBlocks().size(), nullptr, allocator_.Adapter(kArenaAllocGvn)), visited_blocks_( - &allocator_, graph->GetBlocks().size(), /* expandable */ false, kArenaAllocGvn) { + &allocator_, graph->GetBlocks().size(), /* expandable= */ false, kArenaAllocGvn) { visited_blocks_.ClearAllBits(); } @@ -546,12 +546,12 @@ HBasicBlock* GlobalValueNumberer::FindVisitedBlockWithRecyclableSet( // that is larger, we return it if no perfectly-matching set is found. // Note that we defer testing WillBeReferencedAgain until all other criteria // have been satisfied because it might be expensive. - if (current_set->CanHoldCopyOf(reference_set, /* exact_match */ true)) { + if (current_set->CanHoldCopyOf(reference_set, /* exact_match= */ true)) { if (!WillBeReferencedAgain(current_block)) { return current_block; } } else if (secondary_match == nullptr && - current_set->CanHoldCopyOf(reference_set, /* exact_match */ false)) { + current_set->CanHoldCopyOf(reference_set, /* exact_match= */ false)) { if (!WillBeReferencedAgain(current_block)) { secondary_match = current_block; } diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc index 55eca2316a..4c78fa8f06 100644 --- a/compiler/optimizing/induction_var_range.cc +++ b/compiler/optimizing/induction_var_range.cc @@ -216,13 +216,13 @@ bool InductionVarRange::GetInductionRange(HInstruction* context, chase_hint_ = chase_hint; bool in_body = context->GetBlock() != loop->GetHeader(); int64_t stride_value = 0; - *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true)); - *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min */ false), chase_hint); + *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true)); + *max_val = SimplifyMax(GetVal(info, trip, in_body, /* is_min= */ false), chase_hint); *needs_finite_test = NeedsTripCount(info, &stride_value) && IsUnsafeTripCount(trip); chase_hint_ = nullptr; // Retry chasing constants for wrap-around (merge sensitive). if (!min_val->is_known && info->induction_class == HInductionVarAnalysis::kWrapAround) { - *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min */ true)); + *min_val = SimplifyMin(GetVal(info, trip, in_body, /* is_min= */ true)); } return true; } @@ -445,8 +445,8 @@ bool InductionVarRange::IsConstant(HInductionVarAnalysis::InductionInfo* info, } // Try range analysis on the invariant, only accept a proper range // to avoid arithmetic wrap-around anomalies. - Value min_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ true); - Value max_val = GetVal(info, nullptr, /* in_body */ true, /* is_min */ false); + Value min_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ true); + Value max_val = GetVal(info, nullptr, /* in_body= */ true, /* is_min= */ false); if (IsConstantValue(min_val) && IsConstantValue(max_val) && min_val.b_constant <= max_val.b_constant) { if ((request == kExact && min_val.b_constant == max_val.b_constant) || request == kAtMost) { @@ -791,10 +791,10 @@ InductionVarRange::Value InductionVarRange::GetMul(HInductionVarAnalysis::Induct return MulRangeAndConstant(value, info1, trip, in_body, is_min); } // Interval ranges. - Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true); - Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false); - Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true); - Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false); + Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true); + Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false); + Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true); + Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false); // Positive range vs. positive or negative range. if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) { if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) { @@ -825,10 +825,10 @@ InductionVarRange::Value InductionVarRange::GetDiv(HInductionVarAnalysis::Induct return DivRangeAndConstant(value, info1, trip, in_body, is_min); } // Interval ranges. - Value v1_min = GetVal(info1, trip, in_body, /* is_min */ true); - Value v1_max = GetVal(info1, trip, in_body, /* is_min */ false); - Value v2_min = GetVal(info2, trip, in_body, /* is_min */ true); - Value v2_max = GetVal(info2, trip, in_body, /* is_min */ false); + Value v1_min = GetVal(info1, trip, in_body, /* is_min= */ true); + Value v1_max = GetVal(info1, trip, in_body, /* is_min= */ false); + Value v2_min = GetVal(info2, trip, in_body, /* is_min= */ true); + Value v2_max = GetVal(info2, trip, in_body, /* is_min= */ false); // Positive range vs. positive or negative range. if (IsConstantValue(v1_min) && v1_min.b_constant >= 0) { if (IsConstantValue(v2_min) && v2_min.b_constant >= 0) { @@ -1019,10 +1019,10 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context, // Code generation for taken test: generate the code when requested or otherwise analyze // if code generation is feasible when taken test is needed. if (taken_test != nullptr) { - return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min */ false); + return GenerateCode(trip->op_b, nullptr, graph, block, taken_test, in_body, /* is_min= */ false); } else if (*needs_taken_test) { if (!GenerateCode( - trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min */ false)) { + trip->op_b, nullptr, nullptr, nullptr, nullptr, in_body, /* is_min= */ false)) { return false; } } @@ -1030,9 +1030,9 @@ bool InductionVarRange::GenerateRangeOrLastValue(HInstruction* context, return // Success on lower if invariant (not set), or code can be generated. ((info->induction_class == HInductionVarAnalysis::kInvariant) || - GenerateCode(info, trip, graph, block, lower, in_body, /* is_min */ true)) && + GenerateCode(info, trip, graph, block, lower, in_body, /* is_min= */ true)) && // And success on upper. - GenerateCode(info, trip, graph, block, upper, in_body, /* is_min */ false); + GenerateCode(info, trip, graph, block, upper, in_body, /* is_min= */ false); } bool InductionVarRange::GenerateLastValuePolynomial(HInductionVarAnalysis::InductionInfo* info, diff --git a/compiler/optimizing/induction_var_range_test.cc b/compiler/optimizing/induction_var_range_test.cc index 223e08e1b4..f6af384af0 100644 --- a/compiler/optimizing/induction_var_range_test.cc +++ b/compiler/optimizing/induction_var_range_test.cc @@ -252,24 +252,24 @@ class InductionVarRangeTest : public OptimizingUnitTest { Value GetMin(HInductionVarAnalysis::InductionInfo* info, HInductionVarAnalysis::InductionInfo* trip) { - return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ true); + return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ true); } Value GetMax(HInductionVarAnalysis::InductionInfo* info, HInductionVarAnalysis::InductionInfo* trip) { - return range_.GetVal(info, trip, /* in_body */ true, /* is_min */ false); + return range_.GetVal(info, trip, /* in_body= */ true, /* is_min= */ false); } Value GetMul(HInductionVarAnalysis::InductionInfo* info1, HInductionVarAnalysis::InductionInfo* info2, bool is_min) { - return range_.GetMul(info1, info2, nullptr, /* in_body */ true, is_min); + return range_.GetMul(info1, info2, nullptr, /* in_body= */ true, is_min); } Value GetDiv(HInductionVarAnalysis::InductionInfo* info1, HInductionVarAnalysis::InductionInfo* info2, bool is_min) { - return range_.GetDiv(info1, info2, nullptr, /* in_body */ true, is_min); + return range_.GetDiv(info1, info2, nullptr, /* in_body= */ true, is_min); } Value GetRem(HInductionVarAnalysis::InductionInfo* info1, diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 417d794264..854228beb9 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -175,7 +175,7 @@ bool HInliner::Run() { if (honor_noinline_directives) { // Debugging case: directives in method names control or assert on inlining. std::string callee_name = outer_compilation_unit_.GetDexFile()->PrettyMethod( - call->GetDexMethodIndex(), /* with_signature */ false); + call->GetDexMethodIndex(), /* with_signature= */ false); // Tests prevent inlining by having $noinline$ in their method names. if (callee_name.find("$noinline$") == std::string::npos) { if (TryInline(call)) { @@ -504,7 +504,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { bool result = TryInlineAndReplace(invoke_instruction, actual_method, ReferenceTypeInfo::CreateInvalid(), - /* do_rtp */ true, + /* do_rtp= */ true, cha_devirtualize); if (result) { // Successfully inlined. @@ -858,9 +858,9 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction, HBasicBlock* bb_cursor = invoke_instruction->GetBlock(); if (!TryInlineAndReplace(invoke_instruction, resolved_method, - ReferenceTypeInfo::Create(monomorphic_type, /* is_exact */ true), - /* do_rtp */ false, - /* cha_devirtualize */ false)) { + ReferenceTypeInfo::Create(monomorphic_type, /* is_exact= */ true), + /* do_rtp= */ false, + /* cha_devirtualize= */ false)) { return false; } @@ -871,7 +871,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction, class_index, monomorphic_type, invoke_instruction, - /* with_deoptimization */ true); + /* with_deoptimization= */ true); // Run type propagation to get the guard typed, and eventually propagate the // type of the receiver. @@ -879,7 +879,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction, outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, - /* is_first_run */ false); + /* is_first_run= */ false); rtp_fixup.Run(); MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall); @@ -949,7 +949,7 @@ HInstruction* HInliner::AddTypeGuard(HInstruction* receiver, klass, is_referrer, invoke_instruction->GetDexPc(), - /* needs_access_check */ false); + /* needs_access_check= */ false); HLoadClass::LoadKind kind = HSharpening::ComputeLoadClassKind( load_class, codegen_, caller_compilation_unit_); DCHECK(kind != HLoadClass::LoadKind::kInvalid) @@ -1027,7 +1027,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, if (!class_index.IsValid() || !TryBuildAndInline(invoke_instruction, method, - ReferenceTypeInfo::Create(handle, /* is_exact */ true), + ReferenceTypeInfo::Create(handle, /* is_exact= */ true), &return_replacement)) { all_targets_inlined = false; } else { @@ -1079,7 +1079,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, - /* is_first_run */ false); + /* is_first_run= */ false); rtp_fixup.Run(); return true; } @@ -1150,14 +1150,14 @@ void HInliner::CreateDiamondPatternForPolymorphicInline(HInstruction* compare, graph_->UpdateLoopAndTryInformationOfNewBlock( - then, original_invoke_block, /* replace_if_back_edge */ false); + then, original_invoke_block, /* replace_if_back_edge= */ false); graph_->UpdateLoopAndTryInformationOfNewBlock( - otherwise, original_invoke_block, /* replace_if_back_edge */ false); + otherwise, original_invoke_block, /* replace_if_back_edge= */ false); // In case the original invoke location was a back edge, we need to update // the loop to now have the merge block as a back edge. graph_->UpdateLoopAndTryInformationOfNewBlock( - merge, original_invoke_block, /* replace_if_back_edge */ true); + merge, original_invoke_block, /* replace_if_back_edge= */ true); } bool HInliner::TryInlinePolymorphicCallToSameTarget( @@ -1275,7 +1275,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, - /* is_first_run */ false); + /* is_first_run= */ false); rtp_fixup.Run(); MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall); @@ -1399,7 +1399,7 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction, outer_compilation_unit_.GetClassLoader(), outer_compilation_unit_.GetDexCache(), handles_, - /* is_first_run */ false).Run(); + /* is_first_run= */ false).Run(); } return true; } @@ -1625,7 +1625,8 @@ bool HInliner::TryPatternSubstitution(HInvoke* invoke_instruction, [](uint16_t index) { return index != DexFile::kDexNoIndex16; })); // Create HInstanceFieldSet for each IPUT that stores non-zero data. - HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, /* this */ 0u); + HInstruction* obj = GetInvokeInputForArgVRegIndex(invoke_instruction, + /* arg_vreg_index= */ 0u); bool needs_constructor_barrier = false; for (size_t i = 0; i != number_of_iputs; ++i) { HInstruction* value = GetInvokeInputForArgVRegIndex(invoke_instruction, iput_args[i]); @@ -1667,7 +1668,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, REQUIRES_SHARED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ArtField* resolved_field = - class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false); + class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false); DCHECK(resolved_field != nullptr); HInstanceFieldGet* iget = new (graph_->GetAllocator()) HInstanceFieldGet( obj, @@ -1680,7 +1681,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, *referrer->GetDexFile(), // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. - /* dex_pc */ 0); + /* dex_pc= */ 0); if (iget->GetType() == DataType::Type::kReference) { // Use the same dex_cache that we used for field lookup as the hint_dex_cache. Handle<mirror::DexCache> dex_cache = handles_->NewHandle(referrer->GetDexCache()); @@ -1688,7 +1689,7 @@ HInstanceFieldGet* HInliner::CreateInstanceFieldGet(uint32_t field_index, outer_compilation_unit_.GetClassLoader(), dex_cache, handles_, - /* is_first_run */ false); + /* is_first_run= */ false); rtp.Visit(iget); } return iget; @@ -1702,7 +1703,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index, REQUIRES_SHARED(Locks::mutator_lock_) { ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); ArtField* resolved_field = - class_linker->LookupResolvedField(field_index, referrer, /* is_static */ false); + class_linker->LookupResolvedField(field_index, referrer, /* is_static= */ false); DCHECK(resolved_field != nullptr); if (is_final != nullptr) { // This information is needed only for constructors. @@ -1721,7 +1722,7 @@ HInstanceFieldSet* HInliner::CreateInstanceFieldSet(uint32_t field_index, *referrer->GetDexFile(), // Read barrier generates a runtime call in slow path and we need a valid // dex pc for the associated stack map. 0 is bogus but valid. Bug: 26854537. - /* dex_pc */ 0); + /* dex_pc= */ 0); return iput; } @@ -1777,7 +1778,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, resolved_method->GetDeclaringClass()->GetDexClassDefIndex(), method_index, resolved_method->GetAccessFlags(), - /* verified_method */ nullptr, + /* verified_method= */ nullptr, dex_cache, compiling_class); @@ -1797,7 +1798,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, codegen_->GetCompilerOptions().GetInstructionSet(), invoke_type, graph_->IsDebuggable(), - /* osr */ false, + /* osr= */ false, caller_instruction_counter); callee_graph->SetArtMethod(resolved_method); @@ -1878,7 +1879,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, outer_compilation_unit_.GetClassLoader(), dex_compilation_unit.GetDexCache(), handles_, - /* is_first_run */ false).Run(); + /* is_first_run= */ false).Run(); } RunOptimizations(callee_graph, code_item, dex_compilation_unit); @@ -2102,7 +2103,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* // is more specific than the class which declares the method. if (!resolved_method->IsStatic()) { if (IsReferenceTypeRefinement(GetClassRTI(resolved_method->GetDeclaringClass()), - /* declared_can_be_null */ false, + /* declared_can_be_null= */ false, invoke_instruction->InputAt(0u))) { return true; } @@ -2122,7 +2123,7 @@ bool HInliner::ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* ObjPtr<mirror::Class> param_cls = resolved_method->LookupResolvedClassFromTypeIndex( param_list->GetTypeItem(param_idx).type_idx_); if (IsReferenceTypeRefinement(GetClassRTI(param_cls), - /* declared_can_be_null */ true, + /* declared_can_be_null= */ true, input)) { return true; } @@ -2139,7 +2140,7 @@ bool HInliner::ReturnTypeMoreSpecific(HInvoke* invoke_instruction, if (return_replacement->GetType() == DataType::Type::kReference) { // Test if the return type is a refinement of the declared return type. if (IsReferenceTypeRefinement(invoke_instruction->GetReferenceTypeInfo(), - /* declared_can_be_null */ true, + /* declared_can_be_null= */ true, return_replacement)) { return true; } else if (return_replacement->IsInstanceFieldGet()) { diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 448fed9435..b6ef2b614f 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -434,7 +434,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) { HInvokeStaticOrDirect::DispatchInfo dispatch_info = { HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall, HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod, - /* method_load_data */ 0u + /* method_load_data= */ 0u }; InvokeType invoke_type = dex_compilation_unit_->IsStatic() ? kStatic : kDirect; HInvokeStaticOrDirect* invoke = new (allocator_) HInvokeStaticOrDirect( @@ -449,7 +449,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) { target_method, HInvokeStaticOrDirect::ClinitCheckRequirement::kNone); RangeInstructionOperands operands(graph_->GetNumberOfVRegs() - in_vregs, in_vregs); - HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved */ false); + HandleInvoke(invoke, operands, dex_file_->GetMethodShorty(method_idx), /* is_unresolved= */ false); // Add the return instruction. if (return_type_ == DataType::Type::kVoid) { @@ -468,7 +468,7 @@ void HInstructionBuilder::BuildIntrinsic(ArtMethod* method) { ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() { ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_, code_item_accessor_.InsnsSizeInCodeUnits(), - /* expandable */ false, + /* expandable= */ false, kArenaAllocGraphBuilder); locations->ClearAllBits(); // The visitor gets called when the line number changes. @@ -567,7 +567,7 @@ void HInstructionBuilder::InitializeParameters() { referrer_method_id.class_idx_, parameter_index++, DataType::Type::kReference, - /* is_this */ true); + /* is_this= */ true); AppendInstruction(parameter); UpdateLocal(locals_index++, parameter); number_of_parameters--; @@ -584,7 +584,7 @@ void HInstructionBuilder::InitializeParameters() { arg_types->GetTypeItem(shorty_pos - 1).type_idx_, parameter_index++, DataType::FromShorty(shorty[shorty_pos]), - /* is_this */ false); + /* is_this= */ false); ++shorty_pos; AppendInstruction(parameter); // Store the parameter value in the local that the dex code will use @@ -926,7 +926,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, dex_pc, method_idx, invoke_type); - return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ true); + return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ true); } // Replace calls to String.<init> with StringFactory. @@ -945,10 +945,10 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, HInvoke* invoke = new (allocator_) HInvokeStaticOrDirect( allocator_, number_of_arguments - 1, - DataType::Type::kReference /*return_type */, + /* return_type= */ DataType::Type::kReference, dex_pc, method_idx, - nullptr /* resolved_method */, + /* resolved_method= */ nullptr, dispatch_info, invoke_type, target_method, @@ -1010,7 +1010,7 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction, resolved_method, ImTable::GetImtIndex(resolved_method)); } - return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false, clinit_check); + return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check); } bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc, @@ -1026,7 +1026,7 @@ bool HInstructionBuilder::BuildInvokePolymorphic(uint32_t dex_pc, return_type, dex_pc, method_idx); - return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false); + return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false); } @@ -1042,7 +1042,7 @@ bool HInstructionBuilder::BuildInvokeCustom(uint32_t dex_pc, call_site_idx, return_type, dex_pc); - return HandleInvoke(invoke, operands, shorty, /* is_unresolved */ false); + return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false); } HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, uint32_t dex_pc) { @@ -1370,7 +1370,7 @@ HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke( klass->GetDexFile(), klass, dex_pc, - /* needs_access_check */ false); + /* needs_access_check= */ false); if (cls != nullptr) { *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit; clinit_check = new (allocator_) HClinitCheck(cls, dex_pc); @@ -1539,7 +1539,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio } ScopedObjectAccess soa(Thread::Current()); - ArtField* resolved_field = ResolveField(field_index, /* is_static */ false, is_put); + ArtField* resolved_field = ResolveField(field_index, /* is_static= */ false, is_put); // Generate an explicit null check on the reference, unless the field access // is unresolved. In that case, we rely on the runtime to perform various @@ -1673,7 +1673,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, uint16_t field_index = instruction.VRegB_21c(); ScopedObjectAccess soa(Thread::Current()); - ArtField* resolved_field = ResolveField(field_index, /* is_static */ true, is_put); + ArtField* resolved_field = ResolveField(field_index, /* is_static= */ true, is_put); if (resolved_field == nullptr) { MaybeRecordStat(compilation_stats_, @@ -1690,7 +1690,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction, klass->GetDexFile(), klass, dex_pc, - /* needs_access_check */ false); + /* needs_access_check= */ false); if (constant == nullptr) { // The class cannot be referenced from this compiled code. Generate @@ -2946,7 +2946,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::IGET_CHAR_QUICK: case Instruction::IGET_SHORT: case Instruction::IGET_SHORT_QUICK: { - if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ false, quicken_index)) { + if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ false, quicken_index)) { return false; } break; @@ -2966,7 +2966,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::IPUT_CHAR_QUICK: case Instruction::IPUT_SHORT: case Instruction::IPUT_SHORT_QUICK: { - if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put */ true, quicken_index)) { + if (!BuildInstanceFieldAccess(instruction, dex_pc, /* is_put= */ true, quicken_index)) { return false; } break; @@ -2979,7 +2979,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::SGET_BYTE: case Instruction::SGET_CHAR: case Instruction::SGET_SHORT: { - BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ false); + BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ false); break; } @@ -2990,7 +2990,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, case Instruction::SPUT_BYTE: case Instruction::SPUT_CHAR: case Instruction::SPUT_SHORT: { - BuildStaticFieldAccess(instruction, dex_pc, /* is_put */ true); + BuildStaticFieldAccess(instruction, dex_pc, /* is_put= */ true); break; } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 4c6d6bacd8..a433d7ef73 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -372,7 +372,7 @@ void InstructionSimplifierVisitor::VisitShift(HBinaryOperation* instruction) { // (as defined by shift semantics). This ensures other // optimizations do not need to special case for such situations. DCHECK_EQ(shift_amount->GetType(), DataType::Type::kInt32); - instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index */ 1); + instruction->ReplaceInput(GetGraph()->GetIntConstant(masked_cst), /* index= */ 1); RecordSimplification(); return; } @@ -2361,17 +2361,17 @@ void InstructionSimplifierVisitor::SimplifyStringCharAt(HInvoke* invoke) { ArenaAllocator* allocator = GetGraph()->GetAllocator(); // We treat String as an array to allow DCE and BCE to seamlessly work on strings, // so create the HArrayLength, HBoundsCheck and HArrayGet. - HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length */ true); + HArrayLength* length = new (allocator) HArrayLength(str, dex_pc, /* is_string_length= */ true); invoke->GetBlock()->InsertInstructionBefore(length, invoke); HBoundsCheck* bounds_check = new (allocator) HBoundsCheck( - index, length, dex_pc, /* is_string_char_at */ true); + index, length, dex_pc, /* is_string_char_at= */ true); invoke->GetBlock()->InsertInstructionBefore(bounds_check, invoke); HArrayGet* array_get = new (allocator) HArrayGet(str, bounds_check, DataType::Type::kUint16, SideEffects::None(), // Strings are immutable. dex_pc, - /* is_string_char_at */ true); + /* is_string_char_at= */ true); invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, array_get); bounds_check->CopyEnvironmentFrom(invoke->GetEnvironment()); GetGraph()->SetHasBoundsChecks(true); @@ -2383,7 +2383,7 @@ void InstructionSimplifierVisitor::SimplifyStringIsEmptyOrLength(HInvoke* invoke // We treat String as an array to allow DCE and BCE to seamlessly work on strings, // so create the HArrayLength. HArrayLength* length = - new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length */ true); + new (GetGraph()->GetAllocator()) HArrayLength(str, dex_pc, /* is_string_length= */ true); HInstruction* replacement; if (invoke->GetIntrinsic() == Intrinsics::kStringIsEmpty) { // For String.isEmpty(), create the `HEqual` representing the `length == 0`. @@ -2534,28 +2534,28 @@ void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) { SimplifySystemArrayCopy(instruction); break; case Intrinsics::kIntegerRotateRight: - SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt32); + SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt32); break; case Intrinsics::kLongRotateRight: - SimplifyRotate(instruction, /* is_left */ false, DataType::Type::kInt64); + SimplifyRotate(instruction, /* is_left= */ false, DataType::Type::kInt64); break; case Intrinsics::kIntegerRotateLeft: - SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt32); + SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt32); break; case Intrinsics::kLongRotateLeft: - SimplifyRotate(instruction, /* is_left */ true, DataType::Type::kInt64); + SimplifyRotate(instruction, /* is_left= */ true, DataType::Type::kInt64); break; case Intrinsics::kIntegerCompare: - SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt32); + SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt32); break; case Intrinsics::kLongCompare: - SimplifyCompare(instruction, /* is_signum */ false, DataType::Type::kInt64); + SimplifyCompare(instruction, /* is_signum= */ false, DataType::Type::kInt64); break; case Intrinsics::kIntegerSignum: - SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt32); + SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt32); break; case Intrinsics::kLongSignum: - SimplifyCompare(instruction, /* is_signum */ true, DataType::Type::kInt64); + SimplifyCompare(instruction, /* is_signum= */ true, DataType::Type::kInt64); break; case Intrinsics::kFloatIsNaN: case Intrinsics::kDoubleIsNaN: diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc index f968c19cf9..01e9cff6d8 100644 --- a/compiler/optimizing/instruction_simplifier_arm.cc +++ b/compiler/optimizing/instruction_simplifier_arm.cc @@ -43,11 +43,11 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor { bool TryMergeIntoUsersShifterOperand(HInstruction* instruction); bool TryMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op, bool do_merge); bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) { - return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false); + return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false); } bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) { DCHECK(CanMergeIntoShifterOperand(use, bitfield_op)); - return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true); + return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true); } /** diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc index b536cb4dc4..e23decbd71 100644 --- a/compiler/optimizing/instruction_simplifier_arm64.cc +++ b/compiler/optimizing/instruction_simplifier_arm64.cc @@ -45,11 +45,11 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor { HInstruction* bitfield_op, bool do_merge); bool CanMergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) { - return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ false); + return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ false); } bool MergeIntoShifterOperand(HInstruction* use, HInstruction* bitfield_op) { DCHECK(CanMergeIntoShifterOperand(use, bitfield_op)); - return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge */ true); + return TryMergeIntoShifterOperand(use, bitfield_op, /* do_merge= */ true); } /** diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc index 0374b4e332..c345624a7a 100644 --- a/compiler/optimizing/intrinsic_objects.cc +++ b/compiler/optimizing/intrinsic_objects.cc @@ -30,7 +30,7 @@ static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* se ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass( - self, "Ljava/lang/Integer$IntegerCache;", /* class_linker */ nullptr); + self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr); if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) { return nullptr; } diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 619cd8ed41..2721cb5a0f 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -76,7 +76,7 @@ static ObjPtr<mirror::Class> LookupInitializedClass(Thread* self, const char* descriptor) REQUIRES_SHARED(Locks::mutator_lock_) { ObjPtr<mirror::Class> klass = - class_linker->LookupClass(self, descriptor, /* class_loader */ nullptr); + class_linker->LookupClass(self, descriptor, /* class_loader= */ nullptr); DCHECK(klass != nullptr); DCHECK(klass->IsInitialized()); return klass; @@ -166,14 +166,14 @@ void IntrinsicVisitor::ComputeIntegerValueOfLocations(HInvoke* invoke, Thread* self = Thread::Current(); ScopedObjectAccess soa(self); ObjPtr<mirror::Class> cache_class = class_linker->LookupClass( - self, kIntegerCacheDescriptor, /* class_loader */ nullptr); + self, kIntegerCacheDescriptor, /* class_loader= */ nullptr); DCHECK(cache_class != nullptr); if (UNLIKELY(!cache_class->IsInitialized())) { LOG(WARNING) << "Image class " << cache_class->PrettyDescriptor() << " is uninitialized."; return; } ObjPtr<mirror::Class> integer_class = - class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader */ nullptr); + class_linker->LookupClass(self, kIntegerDescriptor, /* class_loader= */ nullptr); DCHECK(integer_class != nullptr); if (UNLIKELY(!integer_class->IsInitialized())) { LOG(WARNING) << "Image class " << integer_class->PrettyDescriptor() << " is uninitialized."; diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 7fb69b7463..ae1650e607 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -272,10 +272,10 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke } void IntrinsicCodeGeneratorARM64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler()); } void IntrinsicCodeGeneratorARM64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -286,10 +286,10 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler()); } void IntrinsicCodeGeneratorARM64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -618,7 +618,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundDouble(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitMathRoundDouble(HInvoke* invoke) { - GenMathRound(invoke, /* is_double */ true, GetVIXLAssembler()); + GenMathRound(invoke, /* is_double= */ true, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) { @@ -626,7 +626,7 @@ void IntrinsicLocationsBuilderARM64::VisitMathRoundFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitMathRoundFloat(HInvoke* invoke) { - GenMathRound(invoke, /* is_double */ false, GetVIXLAssembler()); + GenMathRound(invoke, /* is_double= */ false, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitMemoryPeekByte(HInvoke* invoke) { @@ -752,13 +752,13 @@ static void GenUnsafeGet(HInvoke* invoke, trg_loc, base, MemOperand(temp.X()), - /* needs_null_check */ false, + /* needs_null_check= */ false, is_volatile); } else { // Other cases. MemOperand mem_op(base.X(), offset); if (is_volatile) { - codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check */ true); + codegen->LoadAcquire(invoke, trg, mem_op, /* needs_null_check= */ true); } else { codegen->Load(type, trg, mem_op); } @@ -813,22 +813,22 @@ void IntrinsicLocationsBuilderARM64::VisitUnsafeGetObjectVolatile(HInvoke* invok } void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) { @@ -896,7 +896,7 @@ static void GenUnsafePut(HInvoke* invoke, } if (is_volatile || is_ordered) { - codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check */ false); + codegen->StoreRelease(invoke, type, source, mem_op, /* needs_null_check= */ false); } else { codegen->Store(type, source, mem_op); } @@ -911,64 +911,64 @@ static void GenUnsafePut(HInvoke* invoke, void IntrinsicCodeGeneratorARM64::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt32, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kReference, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARM64::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke, DataType::Type::kInt64, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } @@ -1638,7 +1638,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitStringIndexOf(HInvoke* invoke) { - GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ true); + GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ true); } void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1654,7 +1654,7 @@ void IntrinsicLocationsBuilderARM64::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero */ false); + GenerateVisitStringIndexOf(invoke, GetVIXLAssembler(), codegen_, /* start_at_zero= */ false); } void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) { @@ -2456,8 +2456,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { src.W(), class_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); // Bail out if the source is not a non primitive array. // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, @@ -2465,8 +2465,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { temp1, component_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); __ Cbz(temp1, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp1` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2482,8 +2482,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { dest.W(), class_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); if (!optimizations.GetDestinationIsNonPrimitiveArray()) { // Bail out if the destination is not a non primitive array. @@ -2499,8 +2499,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { temp1, component_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp2` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2518,8 +2518,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { src.W(), class_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); // Note: if heap poisoning is on, we are comparing two unpoisoned references here. __ Cmp(temp1, temp2); @@ -2532,8 +2532,8 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { temp1, component_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); // /* HeapReference<Class> */ temp1 = temp1->super_class_ // We do not need to emit a read barrier for the following // heap reference load, as `temp1` is only used in a @@ -2616,16 +2616,16 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { src.W(), class_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); // /* HeapReference<Class> */ temp2 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, temp2_loc, temp1, component_offset, temp3_loc, - /* needs_null_check */ false, - /* use_load_acquire */ false); + /* needs_null_check= */ false, + /* use_load_acquire= */ false); __ Cbz(temp2, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp2` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2779,7 +2779,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { } // We only need one card marking on the destination array. - codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false); + codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null= */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } @@ -2812,7 +2812,7 @@ void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); + GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) { @@ -2820,7 +2820,7 @@ void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler()); + GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetVIXLAssembler()); } void IntrinsicLocationsBuilderARM64::VisitIntegerValueOf(HInvoke* invoke) { diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 95752fcd01..396ff62a16 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -229,7 +229,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { assembler->MaybePoisonHeapReference(tmp); __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex)); __ Cmp(src_curr_addr, src_stop_addr); - __ B(ne, &loop, /* far_target */ false); + __ B(ne, &loop, /* is_far_target= */ false); __ B(GetExitLabel()); } @@ -298,10 +298,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invo } void IntrinsicCodeGeneratorARMVIXL::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicCodeGeneratorARMVIXL::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicLocationsBuilderARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -312,10 +312,10 @@ void IntrinsicLocationsBuilderARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) } void IntrinsicCodeGeneratorARMVIXL::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } void IntrinsicCodeGeneratorARMVIXL::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -355,7 +355,7 @@ static void GenNumberOfLeadingZeros(HInvoke* invoke, vixl32::Label end; vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end); __ Clz(out, in_reg_hi); - __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* far_target */ false); + __ CompareAndBranchIfNonZero(in_reg_hi, final_label, /* is_far_target= */ false); __ Clz(out, in_reg_lo); __ Add(out, out, 32); if (end.IsReferenced()) { @@ -398,7 +398,7 @@ static void GenNumberOfTrailingZeros(HInvoke* invoke, vixl32::Label* final_label = codegen->GetFinalLabel(invoke, &end); __ Rbit(out, in_reg_lo); __ Clz(out, out); - __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* far_target */ false); + __ CompareAndBranchIfNonZero(in_reg_lo, final_label, /* is_far_target= */ false); __ Rbit(out, in_reg_hi); __ Clz(out, out); __ Add(out, out, 32); @@ -476,7 +476,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathRoundFloat(HInvoke* invoke) { // For positive, zero or NaN inputs, rounding is done. __ Cmp(out_reg, 0); - __ B(ge, final_label, /* far_target */ false); + __ B(ge, final_label, /* is_far_target= */ false); // Handle input < 0 cases. // If input is negative but not a tie, previous result (round to nearest) is valid. @@ -642,7 +642,7 @@ static void GenUnsafeGet(HInvoke* invoke, __ Add(RegisterFrom(temp), base, Operand(offset)); MemOperand src(RegisterFrom(temp), 0); codegen->GenerateFieldLoadWithBakerReadBarrier( - invoke, trg_loc, base, src, /* needs_null_check */ false); + invoke, trg_loc, base, src, /* needs_null_check= */ false); if (is_volatile) { __ Dmb(vixl32::ISH); } @@ -733,22 +733,22 @@ void IntrinsicLocationsBuilderARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* inv } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, @@ -778,39 +778,39 @@ static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePut(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt32, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt32, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kInt32, /* is_volatile= */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObject(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kReference, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kReference, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kReference, /* is_volatile= */ true, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt64, /* is_volatile */ false, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ false, invoke); } void IntrinsicLocationsBuilderARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoid( - allocator_, features_, DataType::Type::kInt64, /* is_volatile */ true, invoke); + allocator_, features_, DataType::Type::kInt64, /* is_volatile= */ true, invoke); } static void GenUnsafePut(LocationSummary* locations, @@ -844,7 +844,7 @@ static void GenUnsafePut(LocationSummary* locations, __ Ldrexd(temp_lo, temp_hi, MemOperand(temp_reg)); __ Strexd(temp_lo, value_lo, value_hi, MemOperand(temp_reg)); __ Cmp(temp_lo, 0); - __ B(ne, &loop_head, /* far_target */ false); + __ B(ne, &loop_head, /* is_far_target= */ false); } else { __ Strd(value_lo, value_hi, MemOperand(base, offset)); } @@ -875,64 +875,64 @@ static void GenUnsafePut(LocationSummary* locations, void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } void IntrinsicCodeGeneratorARMVIXL::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } @@ -1026,7 +1026,7 @@ class BakerReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL { __ Strex(tmp, value, MemOperand(tmp_ptr)); assembler->MaybeUnpoisonHeapReference(value); __ Cmp(tmp, 0); - __ B(ne, &loop_head, /* far_target */ false); + __ B(ne, &loop_head, /* is_far_target= */ false); __ B(GetExitLabel()); } }; @@ -1102,7 +1102,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c assembler->MaybeUnpoisonHeapReference(value); } __ Cmp(tmp, 0); - __ B(ne, &loop_head, /* far_target */ false); + __ B(ne, &loop_head, /* is_far_target= */ false); __ Bind(loop_exit); @@ -1113,7 +1113,7 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMVIXL* c __ Lsr(out, out, WhichPowerOf2(out.GetSizeInBits())); if (type == DataType::Type::kReference) { - codegen->MaybeGenerateMarkingRegisterCheck(/* code */ 128); + codegen->MaybeGenerateMarkingRegisterCheck(/* code= */ 128); } } @@ -1308,23 +1308,23 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler, __ Ldr(temp_reg, MemOperand(str, temp1)); __ Ldr(temp2, MemOperand(arg, temp1)); __ Cmp(temp_reg, temp2); - __ B(ne, &find_char_diff, /* far_target */ false); + __ B(ne, &find_char_diff, /* is_far_target= */ false); __ Add(temp1, temp1, char_size * 2); __ Ldr(temp_reg, MemOperand(str, temp1)); __ Ldr(temp2, MemOperand(arg, temp1)); __ Cmp(temp_reg, temp2); - __ B(ne, &find_char_diff_2nd_cmp, /* far_target */ false); + __ B(ne, &find_char_diff_2nd_cmp, /* is_far_target= */ false); __ Add(temp1, temp1, char_size * 2); // With string compression, we have compared 8 bytes, otherwise 4 chars. __ Subs(temp0, temp0, (mirror::kUseStringCompression ? 8 : 4)); - __ B(hi, &loop, /* far_target */ false); + __ B(hi, &loop, /* is_far_target= */ false); __ B(end); __ Bind(&find_char_diff_2nd_cmp); if (mirror::kUseStringCompression) { __ Subs(temp0, temp0, 4); // 4 bytes previously compared. - __ B(ls, end, /* far_target */ false); // Was the second comparison fully beyond the end? + __ B(ls, end, /* is_far_target= */ false); // Was the second comparison fully beyond the end? } else { // Without string compression, we can start treating temp0 as signed // and rely on the signed comparison below. @@ -1352,7 +1352,7 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler, // the remaining string data, so just return length diff (out). // The comparison is unsigned for string compression, otherwise signed. __ Cmp(temp0, Operand(temp1, vixl32::LSR, (mirror::kUseStringCompression ? 3 : 4))); - __ B((mirror::kUseStringCompression ? ls : le), end, /* far_target */ false); + __ B((mirror::kUseStringCompression ? ls : le), end, /* is_far_target= */ false); // Extract the characters and calculate the difference. if (mirror::kUseStringCompression) { @@ -1419,9 +1419,9 @@ static void GenerateStringCompareToLoop(ArmVIXLAssembler* assembler, __ Ldrb(temp_reg, MemOperand(temp1, c_char_size, PostIndex)); __ Ldrh(temp3, MemOperand(temp2, char_size, PostIndex)); __ Cmp(temp_reg, temp3); - __ B(ne, &different_compression_diff, /* far_target */ false); + __ B(ne, &different_compression_diff, /* is_far_target= */ false); __ Subs(temp0, temp0, 2); - __ B(hi, &different_compression_loop, /* far_target */ false); + __ B(hi, &different_compression_loop, /* is_far_target= */ false); __ B(end); // Calculate the difference. @@ -1517,12 +1517,12 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { StringEqualsOptimizations optimizations(invoke); if (!optimizations.GetArgumentNotNull()) { // Check if input is null, return false if it is. - __ CompareAndBranchIfZero(arg, &return_false, /* far_target */ false); + __ CompareAndBranchIfZero(arg, &return_false, /* is_far_target= */ false); } // Reference equality check, return true if same reference. __ Cmp(str, arg); - __ B(eq, &return_true, /* far_target */ false); + __ B(eq, &return_true, /* is_far_target= */ false); if (!optimizations.GetArgumentIsString()) { // Instanceof check for the argument by comparing class fields. @@ -1532,7 +1532,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { __ Ldr(temp, MemOperand(str, class_offset)); __ Ldr(out, MemOperand(arg, class_offset)); __ Cmp(temp, out); - __ B(ne, &return_false, /* far_target */ false); + __ B(ne, &return_false, /* is_far_target= */ false); } // Check if one of the inputs is a const string. Do not special-case both strings @@ -1555,7 +1555,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { // Also compares the compression style, if differs return false. __ Ldr(temp, MemOperand(arg, count_offset)); __ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed))); - __ B(ne, &return_false, /* far_target */ false); + __ B(ne, &return_false, /* is_far_target= */ false); } else { // Load `count` fields of this and argument strings. __ Ldr(temp, MemOperand(str, count_offset)); @@ -1563,7 +1563,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { // Check if `count` fields are equal, return false if they're not. // Also compares the compression style, if differs return false. __ Cmp(temp, out); - __ B(ne, &return_false, /* far_target */ false); + __ B(ne, &return_false, /* is_far_target= */ false); } // Assertions that must hold in order to compare strings 4 bytes at a time. @@ -1586,9 +1586,9 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { __ Ldrd(temp, temp1, MemOperand(str, offset)); __ Ldrd(temp2, out, MemOperand(arg, offset)); __ Cmp(temp, temp2); - __ B(ne, &return_false, /* far_label */ false); + __ B(ne, &return_false, /* is_far_target= */ false); __ Cmp(temp1, out); - __ B(ne, &return_false, /* far_label */ false); + __ B(ne, &return_false, /* is_far_target= */ false); offset += 2u * sizeof(uint32_t); remaining_bytes -= 2u * sizeof(uint32_t); } @@ -1596,13 +1596,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { __ Ldr(temp, MemOperand(str, offset)); __ Ldr(out, MemOperand(arg, offset)); __ Cmp(temp, out); - __ B(ne, &return_false, /* far_label */ false); + __ B(ne, &return_false, /* is_far_target= */ false); } } else { // Return true if both strings are empty. Even with string compression `count == 0` means empty. static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u, "Expecting 0=compressed, 1=uncompressed"); - __ CompareAndBranchIfZero(temp, &return_true, /* far_target */ false); + __ CompareAndBranchIfZero(temp, &return_true, /* is_far_target= */ false); if (mirror::kUseStringCompression) { // For string compression, calculate the number of bytes to compare (not chars). @@ -1628,10 +1628,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringEquals(HInvoke* invoke) { __ Ldr(temp2, MemOperand(arg, temp1)); __ Add(temp1, temp1, Operand::From(sizeof(uint32_t))); __ Cmp(out, temp2); - __ B(ne, &return_false, /* far_target */ false); + __ B(ne, &return_false, /* is_far_target= */ false); // With string compression, we have compared 4 bytes, otherwise 2 chars. __ Subs(temp, temp, mirror::kUseStringCompression ? 4 : 2); - __ B(hi, &loop, /* far_target */ false); + __ B(hi, &loop, /* is_far_target= */ false); } // Return true and exit the function. @@ -1712,7 +1712,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOf(HInvoke* invoke) { - GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true); + GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true); } void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) { @@ -1728,7 +1728,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) } void IntrinsicCodeGeneratorARMVIXL::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false); + GenerateVisitStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false); } void IntrinsicLocationsBuilderARMVIXL::VisitStringNewStringFromBytes(HInvoke* invoke) { @@ -1950,7 +1950,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { } else { if (!optimizations.GetDestinationIsSource()) { __ Cmp(src, dest); - __ B(ne, &conditions_on_positions_validated, /* far_target */ false); + __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false); } __ Cmp(RegisterFrom(dest_pos), src_pos_constant); __ B(gt, intrinsic_slow_path->GetEntryLabel()); @@ -1958,7 +1958,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { } else { if (!optimizations.GetDestinationIsSource()) { __ Cmp(src, dest); - __ B(ne, &conditions_on_positions_validated, /* far_target */ false); + __ B(ne, &conditions_on_positions_validated, /* is_far_target= */ false); } if (dest_pos.IsConstant()) { int32_t dest_pos_constant = Int32ConstantFrom(dest_pos); @@ -2018,11 +2018,11 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false); // Bail out if the source is not a non primitive array. // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false); __ CompareAndBranchIfZero(temp1, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp1` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2034,7 +2034,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // /* HeapReference<Class> */ temp1 = dest->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp1_loc, dest, class_offset, temp2_loc, /* needs_null_check= */ false); if (!optimizations.GetDestinationIsNonPrimitiveArray()) { // Bail out if the destination is not a non primitive array. @@ -2046,7 +2046,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // temporaries such a `temp1`. // /* HeapReference<Class> */ temp2 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check */ false); + invoke, temp2_loc, temp1, component_offset, temp3_loc, /* needs_null_check= */ false); __ CompareAndBranchIfZero(temp2, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp2` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2060,16 +2060,16 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below. // /* HeapReference<Class> */ temp2 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check */ false); + invoke, temp2_loc, src, class_offset, temp3_loc, /* needs_null_check= */ false); // Note: if heap poisoning is on, we are comparing two unpoisoned references here. __ Cmp(temp1, temp2); if (optimizations.GetDestinationIsTypedObjectArray()) { vixl32::Label do_copy; - __ B(eq, &do_copy, /* far_target */ false); + __ B(eq, &do_copy, /* is_far_target= */ false); // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false); // /* HeapReference<Class> */ temp1 = temp1->super_class_ // We do not need to emit a read barrier for the following // heap reference load, as `temp1` is only used in a @@ -2126,7 +2126,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (optimizations.GetDestinationIsTypedObjectArray()) { vixl32::Label do_copy; - __ B(eq, &do_copy, /* far_target */ false); + __ B(eq, &do_copy, /* is_far_target= */ false); if (!did_unpoison) { assembler->MaybeUnpoisonHeapReference(temp1); } @@ -2148,10 +2148,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false); // /* HeapReference<Class> */ temp3 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check */ false); + invoke, temp3_loc, temp1, component_offset, temp2_loc, /* needs_null_check= */ false); __ CompareAndBranchIfZero(temp3, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp3` has been unpoisoned // by the the previous call to GenerateFieldLoadWithBakerReadBarrier. @@ -2179,7 +2179,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (length.IsRegister()) { // Don't enter the copy loop if the length is null. - __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false); + __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false); } if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { @@ -2256,7 +2256,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); } __ Cmp(temp1, temp3); - __ B(ne, &loop, /* far_target */ false); + __ B(ne, &loop, /* is_far_target= */ false); __ Bind(read_barrier_slow_path->GetExitLabel()); } else { @@ -2278,13 +2278,13 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); } __ Cmp(temp1, temp3); - __ B(ne, &loop, /* far_target */ false); + __ B(ne, &loop, /* is_far_target= */ false); } __ Bind(&done); } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, NoReg, /* can_be_null= */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } @@ -2814,7 +2814,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) __ Subs(num_chr, srcEnd, srcBegin); // Early out for valid zero-length retrievals. - __ B(eq, final_label, /* far_target */ false); + __ B(eq, final_label, /* is_far_target= */ false); // src range to copy. __ Add(src_ptr, srcObj, value_offset); @@ -2830,7 +2830,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) __ Ldr(temp, MemOperand(srcObj, count_offset)); __ Tst(temp, 1); temps.Release(temp); - __ B(eq, &compressed_string_preloop, /* far_target */ false); + __ B(eq, &compressed_string_preloop, /* is_far_target= */ false); } __ Add(src_ptr, src_ptr, Operand(srcBegin, vixl32::LSL, 1)); @@ -2840,7 +2840,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) temp = temps.Acquire(); // Save repairing the value of num_chr on the < 4 character path. __ Subs(temp, num_chr, 4); - __ B(lt, &remainder, /* far_target */ false); + __ B(lt, &remainder, /* is_far_target= */ false); // Keep the result of the earlier subs, we are going to fetch at least 4 characters. __ Mov(num_chr, temp); @@ -2855,10 +2855,10 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) __ Ldr(temp, MemOperand(src_ptr, char_size * 4, PostIndex)); __ Str(temp, MemOperand(dst_ptr, char_size * 4, PostIndex)); temps.Release(temp); - __ B(ge, &loop, /* far_target */ false); + __ B(ge, &loop, /* is_far_target= */ false); __ Adds(num_chr, num_chr, 4); - __ B(eq, final_label, /* far_target */ false); + __ B(eq, final_label, /* is_far_target= */ false); // Main loop for < 4 character case and remainder handling. Loads and stores one // 16-bit Java character at a time. @@ -2868,7 +2868,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) __ Subs(num_chr, num_chr, 1); __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex)); temps.Release(temp); - __ B(gt, &remainder, /* far_target */ false); + __ B(gt, &remainder, /* is_far_target= */ false); if (mirror::kUseStringCompression) { __ B(final_label); @@ -2884,7 +2884,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringGetCharsNoCheck(HInvoke* invoke) __ Strh(temp, MemOperand(dst_ptr, char_size, PostIndex)); temps.Release(temp); __ Subs(num_chr, num_chr, 1); - __ B(gt, &compressed_string_loop, /* far_target */ false); + __ B(gt, &compressed_string_loop, /* is_far_target= */ false); } if (done.IsReferenced()) { @@ -3004,7 +3004,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitIntegerValueOf(HInvoke* invoke) { __ Add(out, in, -info.low); __ Cmp(out, info.length); vixl32::Label allocate, done; - __ B(hs, &allocate, /* is_far_target */ false); + __ B(hs, &allocate, /* is_far_target= */ false); // If the value is within the bounds, load the j.l.Integer directly from the array. codegen_->LoadBootImageAddress(temp, info.array_data_boot_image_reference); codegen_->LoadFromShiftedRegOffset(DataType::Type::kReference, locations->Out(), temp, out); @@ -3037,7 +3037,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitThreadInterrupted(HInvoke* invoke) { vixl32::Register temp = temps.Acquire(); vixl32::Label done; vixl32::Label* const final_label = codegen_->GetFinalLabel(invoke, &done); - __ CompareAndBranchIfZero(out, final_label, /* far_target */ false); + __ CompareAndBranchIfZero(out, final_label, /* is_far_target= */ false); __ Dmb(vixl32::ISH); __ Mov(temp, 0); assembler->StoreToOffset(kStoreWord, temp, tr, offset); diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 8092a1c030..5b359741d9 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -185,7 +185,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invo } void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } // int java.lang.Float.floatToRawIntBits(float) @@ -194,7 +194,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -226,7 +226,7 @@ void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } // float java.lang.Float.intBitsToFloat(int) @@ -235,7 +235,7 @@ void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, @@ -411,7 +411,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) { DataType::Type::kInt32, IsR2OrNewer(), IsR6(), - /* reverseBits */ false, + /* reverseBits= */ false, GetAssembler()); } @@ -425,7 +425,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) { DataType::Type::kInt64, IsR2OrNewer(), IsR6(), - /* reverseBits */ false, + /* reverseBits= */ false, GetAssembler()); } @@ -439,7 +439,7 @@ void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) { DataType::Type::kInt16, IsR2OrNewer(), IsR6(), - /* reverseBits */ false, + /* reverseBits= */ false, GetAssembler()); } @@ -479,7 +479,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* in } void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler()); } // int java.lang.Long.numberOfLeadingZeros(long i) @@ -488,7 +488,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invok } void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler()); } static void GenNumberOfTrailingZeroes(LocationSummary* locations, @@ -566,7 +566,7 @@ void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* i } void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, IsR6(), GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler()); } // int java.lang.Long.numberOfTrailingZeros(long i) @@ -575,7 +575,7 @@ void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invo } void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, IsR6(), GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler()); } // int java.lang.Integer.reverse(int) @@ -588,7 +588,7 @@ void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) { DataType::Type::kInt32, IsR2OrNewer(), IsR6(), - /* reverseBits */ true, + /* reverseBits= */ true, GetAssembler()); } @@ -602,7 +602,7 @@ void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) { DataType::Type::kInt64, IsR2OrNewer(), IsR6(), - /* reverseBits */ true, + /* reverseBits= */ true, GetAssembler()); } @@ -1055,11 +1055,11 @@ static void GenUnsafeGet(HInvoke* invoke, codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke, trg_loc, base, - /* offset */ 0U, - /* index */ offset_loc, + /* offset= */ 0U, + /* index= */ offset_loc, TIMES_1, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); if (is_volatile) { __ Sync(0); } @@ -1077,8 +1077,8 @@ static void GenUnsafeGet(HInvoke* invoke, trg_loc, trg_loc, base_loc, - /* offset */ 0U, - /* index */ offset_loc); + /* offset= */ 0U, + /* index= */ offset_loc); } } else { if (is_R6) { @@ -1107,7 +1107,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_); } // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) @@ -1116,7 +1116,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_); } // long sun.misc.Unsafe.getLong(Object o, long offset) @@ -1125,7 +1125,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_); } // Object sun.misc.Unsafe.getObject(Object o, long offset) @@ -1134,7 +1134,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_); } // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) @@ -1143,7 +1143,7 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke } void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, IsR6(), codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_); } static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -1225,8 +1225,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, IsR6(), codegen_); } @@ -1239,8 +1239,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, IsR6(), codegen_); } @@ -1253,8 +1253,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, IsR6(), codegen_); } @@ -1267,8 +1267,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, IsR6(), codegen_); } @@ -1281,8 +1281,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, IsR6(), codegen_); } @@ -1295,8 +1295,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, IsR6(), codegen_); } @@ -1309,8 +1309,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, IsR6(), codegen_); } @@ -1323,8 +1323,8 @@ void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, IsR6(), codegen_); } @@ -1388,12 +1388,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* code invoke, out_loc, // Unused, used only as a "temporary" within the read barrier. base, - /* offset */ 0u, - /* index */ offset_loc, + /* offset= */ 0u, + /* index= */ offset_loc, ScaleFactor::TIMES_1, temp, - /* needs_null_check */ false, - /* always_update_field */ true); + /* needs_null_check= */ false, + /* always_update_field= */ true); } } @@ -1706,7 +1706,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) { - GenerateStringIndexOf(invoke, /* start_at_zero */ true, GetAssembler(), codegen_); + GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_); } // int java.lang.String.indexOf(int ch, int fromIndex) @@ -1727,7 +1727,7 @@ void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateStringIndexOf(invoke, /* start_at_zero */ false, GetAssembler(), codegen_); + GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_); } // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index f5577c3efc..afaa4ca4c8 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -169,7 +169,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* in } void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } // int java.lang.Float.floatToRawIntBits(float) @@ -178,7 +178,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invok } void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -205,7 +205,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invok } void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } // float java.lang.Float.intBitsToFloat(int) @@ -214,7 +214,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) } void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -295,7 +295,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* } void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } // int java.lang.Long.numberOfLeadingZeros(long i) @@ -304,7 +304,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* inv } void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } static void GenNumberOfTrailingZeroes(LocationSummary* locations, @@ -332,7 +332,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* } void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } // int java.lang.Long.numberOfTrailingZeros(long i) @@ -341,7 +341,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* in } void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } static void GenReverse(LocationSummary* locations, @@ -911,11 +911,11 @@ static void GenUnsafeGet(HInvoke* invoke, codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke, trg_loc, base, - /* offset */ 0U, - /* index */ offset_loc, + /* offset= */ 0U, + /* index= */ offset_loc, TIMES_1, temp, - /* needs_null_check */ false); + /* needs_null_check= */ false); if (is_volatile) { __ Sync(0); } @@ -928,8 +928,8 @@ static void GenUnsafeGet(HInvoke* invoke, trg_loc, trg_loc, base_loc, - /* offset */ 0U, - /* index */ offset_loc); + /* offset= */ 0U, + /* index= */ offset_loc); } } else { __ Lwu(trg, TMP, 0); @@ -952,7 +952,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } // int sun.misc.Unsafe.getIntVolatile(Object o, long offset) @@ -961,7 +961,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } // long sun.misc.Unsafe.getLong(Object o, long offset) @@ -970,7 +970,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } // long sun.misc.Unsafe.getLongVolatile(Object o, long offset) @@ -979,7 +979,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } // Object sun.misc.Unsafe.getObject(Object o, long offset) @@ -988,7 +988,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_); } // Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset) @@ -997,7 +997,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invo } void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_); } static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) { @@ -1067,8 +1067,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } @@ -1080,8 +1080,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } @@ -1093,8 +1093,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } @@ -1106,8 +1106,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } @@ -1119,8 +1119,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invok void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } @@ -1132,8 +1132,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invo void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kReference, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } @@ -1145,8 +1145,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) { void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ false, + /* is_volatile= */ false, + /* is_ordered= */ false, codegen_); } @@ -1158,8 +1158,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ false, - /* is_ordered */ true, + /* is_volatile= */ false, + /* is_ordered= */ true, codegen_); } @@ -1171,8 +1171,8 @@ void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) { GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, - /* is_volatile */ true, - /* is_ordered */ false, + /* is_volatile= */ true, + /* is_ordered= */ false, codegen_); } @@ -1234,12 +1234,12 @@ static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* co invoke, out_loc, // Unused, used only as a "temporary" within the read barrier. base, - /* offset */ 0u, - /* index */ offset_loc, + /* offset= */ 0u, + /* index= */ offset_loc, ScaleFactor::TIMES_1, temp, - /* needs_null_check */ false, - /* always_update_field */ true); + /* needs_null_check= */ false, + /* always_update_field= */ true); } } @@ -1548,7 +1548,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true); } // int java.lang.String.indexOf(int ch, int fromIndex) @@ -1566,7 +1566,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false); } // java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount) @@ -1667,7 +1667,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } // boolean java.lang.Double.isInfinite(double) @@ -1676,7 +1676,7 @@ void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { } void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) { - GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } // void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin) diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 5ad94697e8..8747f061d3 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -223,31 +223,31 @@ static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* } void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - CreateFPToIntLocations(allocator_, invoke, /* is64bit */ true); + CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ true); } void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - CreateIntToFPLocations(allocator_, invoke, /* is64bit */ true); + CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ true); } void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - CreateFPToIntLocations(allocator_, invoke, /* is64bit */ false); + CreateFPToIntLocations(allocator_, invoke, /* is64bit= */ false); } void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) { - CreateIntToFPLocations(allocator_, invoke, /* is64bit */ false); + CreateIntToFPLocations(allocator_, invoke, /* is64bit= */ false); } void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -1317,19 +1317,19 @@ static void GenerateStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true); } void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true); } void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false); } void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false); } void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) { @@ -1691,7 +1691,7 @@ static void GenUnsafeGet(HInvoke* invoke, if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( - invoke, output_loc, base, src, /* needs_null_check */ false); + invoke, output_loc, base, src, /* needs_null_check= */ false); } else { __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0)); codegen->GenerateReadBarrierSlow( @@ -1762,45 +1762,45 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) { CreateIntIntIntToIntLocations( - allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ false); + allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile */ true); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /* is_volatile= */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) { CreateIntIntIntToIntLocations( - allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ false); + allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile */ true); + CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /* is_volatile= */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) { CreateIntIntIntToIntLocations( - allocator_, invoke, DataType::Type::kReference, /* is_volatile */ false); + allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { CreateIntIntIntToIntLocations( - allocator_, invoke, DataType::Type::kReference, /* is_volatile */ true); + allocator_, invoke, DataType::Type::kReference, /* is_volatile= */ true); } void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_); } @@ -1827,39 +1827,39 @@ static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* allocator void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt32, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kInt32, invoke, /* is_volatile= */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kReference, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kReference, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kReference, invoke, /* is_volatile= */ true); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ false); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ false); } void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { CreateIntIntIntIntToVoidPlusTempsLocations( - allocator_, DataType::Type::kInt64, invoke, /* is_volatile */ true); + allocator_, DataType::Type::kInt64, invoke, /* is_volatile= */ true); } // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86 @@ -1911,34 +1911,34 @@ static void GenUnsafePut(LocationSummary* locations, } void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, @@ -2035,8 +2035,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg temp1_loc, // Unused, used only as a "temporary" within the read barrier. base, field_addr, - /* needs_null_check */ false, - /* always_update_field */ true, + /* needs_null_check= */ false, + /* always_update_field= */ true, &temp2); } @@ -2267,19 +2267,19 @@ static void GenBitCount(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerBitCount(HInvoke* invoke) { - CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ false); + CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86::VisitLongBitCount(HInvoke* invoke) { - CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long */ true); + CreateBitCountLocations(allocator_, codegen_, invoke, /* is_long= */ true); } void IntrinsicCodeGeneratorX86::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true); } static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) { @@ -2371,19 +2371,19 @@ static void GenLeadingZeros(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ false); + CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - CreateLeadingZeroLocations(allocator_, invoke, /* is_long */ true); + CreateLeadingZeroLocations(allocator_, invoke, /* is_long= */ true); } void IntrinsicCodeGeneratorX86::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true); } static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_long) { @@ -2462,19 +2462,19 @@ static void GenTrailingZeros(X86Assembler* assembler, } void IntrinsicLocationsBuilderX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ false); + CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ false); } void IntrinsicCodeGeneratorX86::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - CreateTrailingZeroLocations(allocator_, invoke, /* is_long */ true); + CreateTrailingZeroLocations(allocator_, invoke, /* is_long= */ true); } void IntrinsicCodeGeneratorX86::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true); } static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) { @@ -2682,11 +2682,11 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, src, class_offset, /* needs_null_check */ false); + invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); // Bail out if the source is not a non primitive array. // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); __ testl(temp1, temp1); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp1` has been unpoisoned @@ -2719,7 +2719,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // /* HeapReference<Class> */ temp1 = dest->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false); + invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false); if (!optimizations.GetDestinationIsNonPrimitiveArray()) { // Bail out if the destination is not a non primitive array. @@ -2731,7 +2731,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // temporaries such a `temp1`. // /* HeapReference<Class> */ temp2 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp2_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, temp2_loc, temp1, component_offset, /* needs_null_check= */ false); __ testl(temp2, temp2); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp2` has been unpoisoned @@ -2744,7 +2744,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below. // /* HeapReference<Class> */ temp2 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp2_loc, src, class_offset, /* needs_null_check */ false); + invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false); // Note: if heap poisoning is on, we are comparing two unpoisoned references here. __ cmpl(temp1, temp2); @@ -2753,7 +2753,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kEqual, &do_copy); // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); // We do not need to emit a read barrier for the following // heap reference load, as `temp1` is only used in a // comparison with null below, and this reference is not @@ -2807,10 +2807,10 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, src, class_offset, /* needs_null_check */ false); + invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); __ testl(temp1, temp1); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `temp1` has been unpoisoned @@ -2943,7 +2943,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null= */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 62ccd49adf..167c1d8632 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -162,10 +162,10 @@ void IntrinsicLocationsBuilderX86_64::VisitDoubleLongBitsToDouble(HInvoke* invok } void IntrinsicCodeGeneratorX86_64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicCodeGeneratorX86_64::VisitDoubleLongBitsToDouble(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ true, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler()); } void IntrinsicLocationsBuilderX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { @@ -176,10 +176,10 @@ void IntrinsicLocationsBuilderX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) } void IntrinsicCodeGeneratorX86_64::VisitFloatFloatToRawIntBits(HInvoke* invoke) { - MoveFPToInt(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } void IntrinsicCodeGeneratorX86_64::VisitFloatIntBitsToFloat(HInvoke* invoke) { - MoveIntToFP(invoke->GetLocations(), /* is64bit */ false, GetAssembler()); + MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler()); } static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -430,12 +430,12 @@ void IntrinsicCodeGeneratorX86_64::VisitMathRoundDouble(HInvoke* invoke) { // direct x86 instruction, since NaN should map to 0 and large positive // values need to be clipped to the extreme value. codegen_->Load64BitValue(out, kPrimLongMax); - __ cvtsi2sd(t2, out, /* is64bit */ true); + __ cvtsi2sd(t2, out, /* is64bit= */ true); __ comisd(t1, t2); __ j(kAboveEqual, &done); // clipped to max (already in out), does not jump on unordered __ movl(out, Immediate(0)); // does not change flags, implicit zero extension to 64-bit __ j(kUnordered, &done); // NaN mapped to 0 (just moved in out) - __ cvttsd2si(out, t1, /* is64bit */ true); + __ cvttsd2si(out, t1, /* is64bit= */ true); __ Bind(&done); } @@ -979,7 +979,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = dest->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, dest, class_offset, /* needs_null_check */ false); + invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false); // Register `temp1` is not trashed by the read barrier emitted // by GenerateFieldLoadWithBakerReadBarrier below, as that // method produces a call to a ReadBarrierMarkRegX entry point, @@ -987,7 +987,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // temporaries such a `temp1`. // /* HeapReference<Class> */ temp2 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp2_loc, src, class_offset, /* needs_null_check */ false); + invoke, temp2_loc, src, class_offset, /* needs_null_check= */ false); // If heap poisoning is enabled, `temp1` and `temp2` have been // unpoisoned by the the previous calls to // GenerateFieldLoadWithBakerReadBarrier. @@ -1011,7 +1011,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ TMP = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false); __ testl(CpuRegister(TMP), CpuRegister(TMP)); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `TMP` has been unpoisoned by @@ -1034,7 +1034,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below. // /* HeapReference<Class> */ TMP = temp2->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, TMP_loc, temp2, component_offset, /* needs_null_check */ false); + invoke, TMP_loc, temp2, component_offset, /* needs_null_check= */ false); __ testl(CpuRegister(TMP), CpuRegister(TMP)); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); // If heap poisoning is enabled, `TMP` has been unpoisoned by @@ -1058,7 +1058,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); // We do not need to emit a read barrier for the following // heap reference load, as `temp1` is only used in a // comparison with null below, and this reference is not @@ -1086,10 +1086,10 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, temp1_loc, src, class_offset, /* needs_null_check */ false); + invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); // /* HeapReference<Class> */ TMP = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( - invoke, TMP_loc, temp1, component_offset, /* needs_null_check */ false); + invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false); __ testl(CpuRegister(TMP), CpuRegister(TMP)); __ j(kEqual, intrinsic_slow_path->GetEntryLabel()); } else { @@ -1198,7 +1198,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null= */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } @@ -1444,7 +1444,7 @@ static void GenerateStringIndexOf(HInvoke* invoke, // Ensure we have a start index >= 0; __ xorl(counter, counter); __ cmpl(start_index, Immediate(0)); - __ cmov(kGreater, counter, start_index, /* is64bit */ false); // 32-bit copy is enough. + __ cmov(kGreater, counter, start_index, /* is64bit= */ false); // 32-bit copy is enough. if (mirror::kUseStringCompression) { NearLabel modify_counter, offset_uncompressed_label; @@ -1506,19 +1506,19 @@ static void GenerateStringIndexOf(HInvoke* invoke, } void IntrinsicLocationsBuilderX86_64::VisitStringIndexOf(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ true); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ true); } void IntrinsicCodeGeneratorX86_64::VisitStringIndexOf(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ true); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true); } void IntrinsicLocationsBuilderX86_64::VisitStringIndexOfAfter(HInvoke* invoke) { - CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero */ false); + CreateStringIndexOfLocations(invoke, allocator_, /* start_at_zero= */ false); } void IntrinsicCodeGeneratorX86_64::VisitStringIndexOfAfter(HInvoke* invoke) { - GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero */ false); + GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false); } void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) { @@ -1832,7 +1832,7 @@ void IntrinsicLocationsBuilderX86_64::VisitThreadCurrentThread(HInvoke* invoke) void IntrinsicCodeGeneratorX86_64::VisitThreadCurrentThread(HInvoke* invoke) { CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>(); GetAssembler()->gs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86_64PointerSize>(), - /* no_rip */ true)); + /* no_rip= */ true)); } static void GenUnsafeGet(HInvoke* invoke, @@ -1858,7 +1858,7 @@ static void GenUnsafeGet(HInvoke* invoke, if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( - invoke, output_loc, base, src, /* needs_null_check */ false); + invoke, output_loc, base, src, /* needs_null_check= */ false); } else { __ movl(output, Address(base, offset, ScaleFactor::TIMES_1, 0)); codegen->GenerateReadBarrierSlow( @@ -1922,22 +1922,22 @@ void IntrinsicLocationsBuilderX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invo void IntrinsicCodeGeneratorX86_64::VisitUnsafeGet(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLong(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObject(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ false, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile */ true, codegen_); + GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_); } @@ -2020,34 +2020,34 @@ static void GenUnsafePut(LocationSummary* locations, DataType::Type type, bool i } void IntrinsicCodeGeneratorX86_64::VisitUnsafePut(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt32, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObject(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectOrdered(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ false, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutObjectVolatile(HInvoke* invoke) { GenUnsafePut( - invoke->GetLocations(), DataType::Type::kReference, /* is_volatile */ true, codegen_); + invoke->GetLocations(), DataType::Type::kReference, /* is_volatile= */ true, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLong(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongOrdered(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ false, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ false, codegen_); } void IntrinsicCodeGeneratorX86_64::VisitUnsafePutLongVolatile(HInvoke* invoke) { - GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile */ true, codegen_); + GenUnsafePut(invoke->GetLocations(), DataType::Type::kInt64, /* is_volatile= */ true, codegen_); } static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, @@ -2132,8 +2132,8 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86_64* co out_loc, // Unused, used only as a "temporary" within the read barrier. base, field_addr, - /* needs_null_check */ false, - /* always_update_field */ true, + /* needs_null_check= */ false, + /* always_update_field= */ true, &temp1, &temp2); } @@ -2361,7 +2361,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitIntegerBitCount(HInvoke* invoke) { - GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) { @@ -2369,7 +2369,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongBitCount(HInvoke* invoke) { } void IntrinsicCodeGeneratorX86_64::VisitLongBitCount(HInvoke* invoke) { - GenBitCount(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenBitCount(GetAssembler(), codegen_, invoke, /* is_long= */ true); } static void CreateOneBitLocations(ArenaAllocator* allocator, HInvoke* invoke, bool is_high) { @@ -2476,35 +2476,35 @@ static void GenOneBit(X86_64Assembler* assembler, } void IntrinsicLocationsBuilderX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) { - CreateOneBitLocations(allocator_, invoke, /* is_high */ true); + CreateOneBitLocations(allocator_, invoke, /* is_high= */ true); } void IntrinsicCodeGeneratorX86_64::VisitIntegerHighestOneBit(HInvoke* invoke) { - GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ false); + GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ false); } void IntrinsicLocationsBuilderX86_64::VisitLongHighestOneBit(HInvoke* invoke) { - CreateOneBitLocations(allocator_, invoke, /* is_high */ true); + CreateOneBitLocations(allocator_, invoke, /* is_high= */ true); } void IntrinsicCodeGeneratorX86_64::VisitLongHighestOneBit(HInvoke* invoke) { - GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ true, /* is_long */ true); + GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ true, /* is_long= */ true); } void IntrinsicLocationsBuilderX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) { - CreateOneBitLocations(allocator_, invoke, /* is_high */ false); + CreateOneBitLocations(allocator_, invoke, /* is_high= */ false); } void IntrinsicCodeGeneratorX86_64::VisitIntegerLowestOneBit(HInvoke* invoke) { - GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ false); + GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ false); } void IntrinsicLocationsBuilderX86_64::VisitLongLowestOneBit(HInvoke* invoke) { - CreateOneBitLocations(allocator_, invoke, /* is_high */ false); + CreateOneBitLocations(allocator_, invoke, /* is_high= */ false); } void IntrinsicCodeGeneratorX86_64::VisitLongLowestOneBit(HInvoke* invoke) { - GenOneBit(GetAssembler(), codegen_, invoke, /* is_high */ false, /* is_long */ true); + GenOneBit(GetAssembler(), codegen_, invoke, /* is_high= */ false, /* is_long= */ true); } static void CreateLeadingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -2569,7 +2569,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* } void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) { - GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { @@ -2577,7 +2577,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfLeadingZeros(HInvoke* inv } void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) { - GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenLeadingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true); } static void CreateTrailingZeroLocations(ArenaAllocator* allocator, HInvoke* invoke) { @@ -2637,7 +2637,7 @@ void IntrinsicLocationsBuilderX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* } void IntrinsicCodeGeneratorX86_64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) { - GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ false); + GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ false); } void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { @@ -2645,7 +2645,7 @@ void IntrinsicLocationsBuilderX86_64::VisitLongNumberOfTrailingZeros(HInvoke* in } void IntrinsicCodeGeneratorX86_64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) { - GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long */ true); + GenTrailingZeros(GetAssembler(), codegen_, invoke, /* is_long= */ true); } void IntrinsicLocationsBuilderX86_64::VisitIntegerValueOf(HInvoke* invoke) { @@ -2716,7 +2716,7 @@ void IntrinsicCodeGeneratorX86_64::VisitThreadInterrupted(HInvoke* invoke) { X86_64Assembler* assembler = GetAssembler(); CpuRegister out = invoke->GetLocations()->Out().AsRegister<CpuRegister>(); Address address = Address::Absolute - (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip */ true); + (Thread::InterruptedOffset<kX86_64PointerSize>().Int32Value(), /* no_rip= */ true); NearLabel done; __ gs()->movl(out, address); __ testl(out, out); diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc index c7cc661303..310d98b5b0 100644 --- a/compiler/optimizing/loop_optimization_test.cc +++ b/compiler/optimizing/loop_optimization_test.cc @@ -30,7 +30,7 @@ class LoopOptimizationTest : public OptimizingUnitTest { : graph_(CreateGraph()), iva_(new (GetAllocator()) HInductionVarAnalysis(graph_)), loop_opt_(new (GetAllocator()) HLoopOptimization( - graph_, /* compiler_options */ nullptr, iva_, /* stats */ nullptr)) { + graph_, /* compiler_options= */ nullptr, iva_, /* stats= */ nullptr)) { BuildGraph(); } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index d1fba31792..e9a2f96798 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -44,7 +44,7 @@ void HGraph::InitializeInexactObjectRTI(VariableSizedHandleScope* handles) { // Create the inexact Object reference type and store it in the HGraph. inexact_object_rti_ = ReferenceTypeInfo::Create( handles->NewHandle(GetClassRoot<mirror::Object>()), - /* is_exact */ false); + /* is_exact= */ false); } void HGraph::AddBlock(HBasicBlock* block) { @@ -60,7 +60,7 @@ void HGraph::FindBackEdges(ArenaBitVector* visited) { ScopedArenaAllocator allocator(GetArenaStack()); // Nodes that we're currently visiting, indexed by block id. ArenaBitVector visiting( - &allocator, blocks_.size(), /* expandable */ false, kArenaAllocGraphBuilder); + &allocator, blocks_.size(), /* expandable= */ false, kArenaAllocGraphBuilder); visiting.ClearAllBits(); // Number of successors visited from a given node, indexed by block id. ScopedArenaVector<size_t> successors_visited(blocks_.size(), @@ -826,7 +826,7 @@ void HLoopInformation::Populate() { ScopedArenaAllocator allocator(graph->GetArenaStack()); ArenaBitVector visited(&allocator, graph->GetBlocks().size(), - /* expandable */ false, + /* expandable= */ false, kArenaAllocGraphBuilder); visited.ClearAllBits(); // Stop marking blocks at the loop header. @@ -2527,7 +2527,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { current->SetGraph(outer_graph); outer_graph->AddBlock(current); outer_graph->reverse_post_order_[++index_of_at] = current; - UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge */ false); + UpdateLoopAndTryInformationOfNewBlock(current, at, /* replace_if_back_edge= */ false); } } @@ -2537,7 +2537,7 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { outer_graph->reverse_post_order_[++index_of_at] = to; // Only `to` can become a back edge, as the inlined blocks // are predecessors of `to`. - UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge */ true); + UpdateLoopAndTryInformationOfNewBlock(to, at, /* replace_if_back_edge= */ true); // Update all predecessors of the exit block (now the `to` block) // to not `HReturn` but `HGoto` instead. Special case throwing blocks @@ -2711,13 +2711,13 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) { DCHECK((old_pre_header->GetLoopInformation() == nullptr) || !old_pre_header->GetLoopInformation()->IsBackEdge(*old_pre_header)); UpdateLoopAndTryInformationOfNewBlock( - if_block, old_pre_header, /* replace_if_back_edge */ false); + if_block, old_pre_header, /* replace_if_back_edge= */ false); UpdateLoopAndTryInformationOfNewBlock( - true_block, old_pre_header, /* replace_if_back_edge */ false); + true_block, old_pre_header, /* replace_if_back_edge= */ false); UpdateLoopAndTryInformationOfNewBlock( - false_block, old_pre_header, /* replace_if_back_edge */ false); + false_block, old_pre_header, /* replace_if_back_edge= */ false); UpdateLoopAndTryInformationOfNewBlock( - new_pre_header, old_pre_header, /* replace_if_back_edge */ false); + new_pre_header, old_pre_header, /* replace_if_back_edge= */ false); } HBasicBlock* HGraph::TransformLoopForVectorization(HBasicBlock* header, diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 13c8684cc0..686a2deb0b 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -3242,7 +3242,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction { SideEffects::All(), dex_pc, allocator, - /* number_of_inputs */ 1, + /* number_of_inputs= */ 1, kArenaAllocMisc) { SetPackedFlag<kFieldCanBeMoved>(false); SetPackedField<DeoptimizeKindField>(kind); @@ -3267,7 +3267,7 @@ class HDeoptimize final : public HVariableInputSizeInstruction { SideEffects::CanTriggerGC(), dex_pc, allocator, - /* number_of_inputs */ 2, + /* number_of_inputs= */ 2, kArenaAllocMisc) { SetPackedFlag<kFieldCanBeMoved>(true); SetPackedField<DeoptimizeKindField>(kind); @@ -4399,7 +4399,7 @@ class HInvokeUnresolved final : public HInvoke { : HInvoke(kInvokeUnresolved, allocator, number_of_arguments, - 0u /* number_of_other_inputs */, + /* number_of_other_inputs= */ 0u, return_type, dex_pc, dex_method_index, @@ -4425,7 +4425,7 @@ class HInvokePolymorphic final : public HInvoke { : HInvoke(kInvokePolymorphic, allocator, number_of_arguments, - 0u /* number_of_other_inputs */, + /* number_of_other_inputs= */ 0u, return_type, dex_pc, dex_method_index, @@ -4451,11 +4451,11 @@ class HInvokeCustom final : public HInvoke { : HInvoke(kInvokeCustom, allocator, number_of_arguments, - /* number_of_other_inputs */ 0u, + /* number_of_other_inputs= */ 0u, return_type, dex_pc, - /* dex_method_index */ dex::kDexNoIndex, - /* resolved_method */ nullptr, + /* dex_method_index= */ dex::kDexNoIndex, + /* resolved_method= */ nullptr, kStatic), call_site_index_(call_site_index) { } @@ -5894,7 +5894,7 @@ class HArrayGet final : public HExpression<2> { type, SideEffects::ArrayReadOfType(type), dex_pc, - /* is_string_char_at */ false) { + /* is_string_char_at= */ false) { } HArrayGet(HInstruction* array, @@ -6336,7 +6336,7 @@ class HLoadClass final : public HInstruction { ReferenceTypeInfo GetLoadedClassRTI() { if (GetPackedFlag<kFlagValidLoadedClassRTI>()) { // Note: The is_exact flag from the return value should not be used. - return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true); + return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true); } else { return ReferenceTypeInfo::CreateInvalid(); } @@ -7089,7 +7089,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction { side_effects, dex_pc, allocator, - /* number_of_inputs */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u, + /* number_of_inputs= */ check_kind == TypeCheckKind::kBitstringCheck ? 4u : 2u, kArenaAllocTypeCheckInputs), klass_(klass) { SetPackedField<TypeCheckKindField>(check_kind); @@ -7145,7 +7145,7 @@ class HTypeCheckInstruction : public HVariableInputSizeInstruction { ReferenceTypeInfo GetTargetClassRTI() { if (GetPackedFlag<kFlagValidTargetClassRTI>()) { // Note: The is_exact flag from the return value should not be used. - return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact */ true); + return ReferenceTypeInfo::CreateUnchecked(klass_, /* is_exact= */ true); } else { return ReferenceTypeInfo::CreateInvalid(); } @@ -7458,7 +7458,7 @@ class HConstructorFence final : public HVariableInputSizeInstruction { SideEffects::AllReads(), dex_pc, allocator, - /* number_of_inputs */ 1, + /* number_of_inputs= */ 1, kArenaAllocConstructorFenceInputs) { DCHECK(fence_object != nullptr); SetRawInputAt(0, fence_object); diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index cd4f45e830..efe4d6b000 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -207,7 +207,7 @@ class HVecUnaryOperation : public HVecOperation { allocator, packed_type, SideEffects::None(), - /* number_of_inputs */ 1, + /* number_of_inputs= */ 1, vector_length, dex_pc) { SetRawInputAt(0, input); @@ -235,7 +235,7 @@ class HVecBinaryOperation : public HVecOperation { allocator, packed_type, SideEffects::None(), - /* number_of_inputs */ 2, + /* number_of_inputs= */ 2, vector_length, dex_pc) { SetRawInputAt(0, left); @@ -948,7 +948,7 @@ class HVecMultiplyAccumulate final : public HVecOperation { allocator, packed_type, SideEffects::None(), - /* number_of_inputs */ 3, + /* number_of_inputs= */ 3, vector_length, dex_pc), op_kind_(op) { @@ -1002,7 +1002,7 @@ class HVecSADAccumulate final : public HVecOperation { allocator, packed_type, SideEffects::None(), - /* number_of_inputs */ 3, + /* number_of_inputs= */ 3, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(accumulator, packed_type)); @@ -1049,7 +1049,7 @@ class HVecDotProd final : public HVecOperation { allocator, packed_type, SideEffects::None(), - /* number_of_inputs */ 3, + /* number_of_inputs= */ 3, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(accumulator, packed_type)); @@ -1097,7 +1097,7 @@ class HVecLoad final : public HVecMemoryOperation { allocator, packed_type, side_effects, - /* number_of_inputs */ 2, + /* number_of_inputs= */ 2, vector_length, dex_pc) { SetRawInputAt(0, base); @@ -1143,7 +1143,7 @@ class HVecStore final : public HVecMemoryOperation { allocator, packed_type, side_effects, - /* number_of_inputs */ 3, + /* number_of_inputs= */ 3, vector_length, dex_pc) { DCHECK(HasConsistentPackedTypes(value, packed_type)); diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc index b75afad4c8..8864a12301 100644 --- a/compiler/optimizing/optimization.cc +++ b/compiler/optimizing/optimization.cc @@ -260,9 +260,9 @@ ArenaVector<HOptimization*> ConstructOptimizations( handles, stats, accessor.RegistersSize(), - /* total_number_of_instructions */ 0, - /* parent */ nullptr, - /* depth */ 0, + /* total_number_of_instructions= */ 0, + /* parent= */ nullptr, + /* depth= */ 0, pass_name); break; } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 4936a6d3c0..a8fa370993 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -162,7 +162,7 @@ class PassObserver : public ValueObject { VLOG(compiler) << "Starting pass: " << pass_name; // Dump graph first, then start timer. if (visualizer_enabled_) { - visualizer_.DumpGraph(pass_name, /* is_after_pass */ false, graph_in_bad_state_); + visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_); FlushVisualizer(); } if (timing_logger_enabled_) { @@ -184,7 +184,7 @@ class PassObserver : public ValueObject { timing_logger_.EndTiming(); } if (visualizer_enabled_) { - visualizer_.DumpGraph(pass_name, /* is_after_pass */ true, graph_in_bad_state_); + visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_); FlushVisualizer(); } @@ -964,7 +964,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic( compiler_options.GetInstructionSet(), kInvalidInvokeType, compiler_options.GetDebuggable(), - /* osr */ false); + /* osr= */ false); DCHECK(Runtime::Current()->IsAotCompiler()); DCHECK(method != nullptr); @@ -994,7 +994,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic( &dex_compilation_unit, codegen.get(), compilation_stats_.get(), - /* interpreter_metadata */ ArrayRef<const uint8_t>(), + /* interpreter_metadata= */ ArrayRef<const uint8_t>(), handles); builder.BuildIntrinsicGraph(method); } @@ -1161,7 +1161,7 @@ static ScopedArenaVector<uint8_t> CreateJniStackMap(ScopedArenaAllocator* alloca jni_compiled_method.GetFrameSize(), jni_compiled_method.GetCoreSpillMask(), jni_compiled_method.GetFpSpillMask(), - /* num_dex_registers */ 0); + /* num_dex_registers= */ 0); stack_map_stream->EndMethod(); return stack_map_stream->Encode(); } @@ -1208,7 +1208,7 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, CompiledMethod* compiled_method = Emit(&allocator, &code_allocator, codegen.get(), - /* code_item_for_osr_check */ nullptr); + /* item= */ nullptr); compiled_method->MarkAsIntrinsic(); return compiled_method; } @@ -1228,7 +1228,7 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags, jni_compiled_method.GetCode(), ArrayRef<const uint8_t>(stack_map), jni_compiled_method.GetCfi(), - /* patches */ ArrayRef<const linker::LinkerPatch>()); + /* patches= */ ArrayRef<const linker::LinkerPatch>()); } Compiler* CreateOptimizingCompiler(CompilerDriver* driver) { @@ -1277,7 +1277,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, uint8_t* roots_data = nullptr; uint32_t data_size = code_cache->ReserveData(self, stack_map.size(), - /* number_of_roots */ 0, + /* number_of_roots= */ 0, method, &stack_map_data, &roots_data); @@ -1297,7 +1297,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, data_size, osr, roots, - /* has_should_deoptimize_flag */ false, + /* has_should_deoptimize_flag= */ false, cha_single_implementation_list); if (code == nullptr) { return false; diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index f903f82d50..4e376b1c57 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -165,13 +165,13 @@ class OptimizingUnitTestHelper { const DexCompilationUnit* dex_compilation_unit = new (graph->GetAllocator()) DexCompilationUnit( handles_->NewHandle<mirror::ClassLoader>(nullptr), - /* class_linker */ nullptr, + /* class_linker= */ nullptr, graph->GetDexFile(), code_item, - /* class_def_index */ DexFile::kDexNoIndex16, - /* method_idx */ dex::kDexNoIndex, - /* access_flags */ 0u, - /* verified_method */ nullptr, + /* class_def_index= */ DexFile::kDexNoIndex16, + /* method_idx= */ dex::kDexNoIndex, + /* access_flags= */ 0u, + /* verified_method= */ nullptr, handles_->NewHandle<mirror::DexCache>(nullptr)); CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u); HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type); diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 12db8a06de..fbdbf9d086 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -87,9 +87,9 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) { if (GetGraph()->GetArtMethod() != char_at_method) { ArenaAllocator* allocator = GetGraph()->GetAllocator(); HEnvironment* environment = new (allocator) HEnvironment(allocator, - /* number_of_vregs */ 0u, + /* number_of_vregs= */ 0u, char_at_method, - /* dex_pc */ dex::kDexNoIndex, + /* dex_pc= */ dex::kDexNoIndex, check); check->InsertRawEnvironment(environment); } diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 9079658e1c..61e7a60ea9 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -278,7 +278,7 @@ static void BoundTypeIn(HInstruction* receiver, if (ShouldCreateBoundType( insert_point, receiver, class_rti, start_instruction, start_block)) { bound_type = new (receiver->GetBlock()->GetGraph()->GetAllocator()) HBoundType(receiver); - bound_type->SetUpperBound(class_rti, /* bound_can_be_null */ false); + bound_type->SetUpperBound(class_rti, /* can_be_null= */ false); start_block->InsertInstructionBefore(bound_type, insert_point); // To comply with the RTP algorithm, don't type the bound type just yet, it will // be handled in RTPVisitor::VisitBoundType. @@ -350,7 +350,7 @@ static void BoundTypeForClassCheck(HInstruction* check) { HBasicBlock* trueBlock = compare->IsEqual() ? check->AsIf()->IfTrueSuccessor() : check->AsIf()->IfFalseSuccessor(); - BoundTypeIn(receiver, trueBlock, /* start_instruction */ nullptr, class_rti); + BoundTypeIn(receiver, trueBlock, /* start_instruction= */ nullptr, class_rti); } else { DCHECK(check->IsDeoptimize()); if (compare->IsEqual() && check->AsDeoptimize()->GuardsAnInput()) { @@ -427,9 +427,9 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfNotNull(HBasicBlock* bl : ifInstruction->IfFalseSuccessor(); ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( - handle_cache_->GetObjectClassHandle(), /* is_exact */ false); + handle_cache_->GetObjectClassHandle(), /* is_exact= */ false); - BoundTypeIn(obj, notNullBlock, /* start_instruction */ nullptr, object_rti); + BoundTypeIn(obj, notNullBlock, /* start_instruction= */ nullptr, object_rti); } // Returns true if one of the patterns below has been recognized. If so, the @@ -538,10 +538,10 @@ void ReferenceTypePropagation::RTPVisitor::BoundTypeForIfInstanceOf(HBasicBlock* { ScopedObjectAccess soa(Thread::Current()); if (!class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) { - class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false); + class_rti = ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false); } } - BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction */ nullptr, class_rti); + BoundTypeIn(obj, instanceOfTrueBlock, /* start_instruction= */ nullptr, class_rti); } void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* instr, @@ -561,7 +561,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst // Use a null loader, the target method is in a boot classpath dex file. Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( - dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect); + dex_method_index, dex_cache, loader, /* referrer= */ nullptr, kDirect); DCHECK(method != nullptr); ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass(); DCHECK(declaring_class != nullptr); @@ -571,7 +571,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst << "Expected String.<init>: " << method->PrettyMethod(); } instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true)); + ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true)); } else if (IsAdmissible(klass)) { ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); @@ -600,12 +600,12 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* void ReferenceTypePropagation::RTPVisitor::VisitNewInstance(HNewInstance* instr) { ScopedObjectAccess soa(Thread::Current()); - SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true); + SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true); } void ReferenceTypePropagation::RTPVisitor::VisitNewArray(HNewArray* instr) { ScopedObjectAccess soa(Thread::Current()); - SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact */ true); + SetClassAsTypeInfo(instr, instr->GetLoadClass()->GetClass().Get(), /* is_exact= */ true); } void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* instr) { @@ -614,7 +614,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitParameterValue(HParameterValue* UpdateReferenceTypeInfo(instr, instr->GetTypeIndex(), instr->GetDexFile(), - /* is_exact */ false); + /* is_exact= */ false); } } @@ -632,7 +632,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateFieldAccessTypeInfo(HInstructio klass = info.GetField()->LookupResolvedType(); } - SetClassAsTypeInfo(instr, klass, /* is_exact */ false); + SetClassAsTypeInfo(instr, klass, /* is_exact= */ false); } void ReferenceTypePropagation::RTPVisitor::VisitInstanceFieldGet(HInstanceFieldGet* instr) { @@ -665,7 +665,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) { instr->SetValidLoadedClassRTI(); } instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact */ true)); + ReferenceTypeInfo::Create(handle_cache_->GetClassClassHandle(), /* is_exact= */ true)); } void ReferenceTypePropagation::RTPVisitor::VisitInstanceOf(HInstanceOf* instr) { @@ -682,17 +682,17 @@ void ReferenceTypePropagation::RTPVisitor::VisitClinitCheck(HClinitCheck* instr) void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodHandle(HLoadMethodHandle* instr) { instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create( handle_cache_->GetMethodHandleClassHandle(), - /* is_exact */ true)); + /* is_exact= */ true)); } void ReferenceTypePropagation::RTPVisitor::VisitLoadMethodType(HLoadMethodType* instr) { instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact */ true)); + ReferenceTypeInfo::Create(handle_cache_->GetMethodTypeClassHandle(), /* is_exact= */ true)); } void ReferenceTypePropagation::RTPVisitor::VisitLoadString(HLoadString* instr) { instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true)); + ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact= */ true)); } void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* instr) { @@ -701,12 +701,12 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadException(HLoadException* in if (catch_info->IsCatchAllTypeIndex()) { instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact */ false)); + ReferenceTypeInfo::Create(handle_cache_->GetThrowableClassHandle(), /* is_exact= */ false)); } else { UpdateReferenceTypeInfo(instr, catch_info->GetCatchTypeIndex(), catch_info->GetCatchDexFile(), - /* is_exact */ false); + /* is_exact= */ false); } } @@ -736,7 +736,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitBoundType(HBoundType* instr) { // bound type is dead. To not confuse potential other optimizations, we mark // the bound as non-exact. instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact= */ false)); } } else { // Object not typed yet. Leave BoundType untyped for now rather than @@ -914,7 +914,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitInvoke(HInvoke* instr) { ScopedObjectAccess soa(Thread::Current()); ArtMethod* method = instr->GetResolvedMethod(); ObjPtr<mirror::Class> klass = (method == nullptr) ? nullptr : method->LookupResolvedReturnType(); - SetClassAsTypeInfo(instr, klass, /* is_exact */ false); + SetClassAsTypeInfo(instr, klass, /* is_exact= */ false); } void ReferenceTypePropagation::RTPVisitor::VisitArrayGet(HArrayGet* instr) { @@ -947,7 +947,7 @@ void ReferenceTypePropagation::RTPVisitor::UpdateBoundType(HBoundType* instr) { // bound type is dead. To not confuse potential other optimizations, we mark // the bound as non-exact. instr->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact */ false)); + ReferenceTypeInfo::Create(upper_bound_rti.GetTypeHandle(), /* is_exact= */ false)); } } diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc index 27f9ac3990..b1f0a1add9 100644 --- a/compiler/optimizing/register_allocation_resolver.cc +++ b/compiler/optimizing/register_allocation_resolver.cc @@ -280,16 +280,16 @@ size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize( LocationSummary* locations = instruction->GetLocations(); if (locations->OnlyCallsOnSlowPath()) { size_t core_spills = - codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true); + codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true); size_t fp_spills = - codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false); + codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false); size_t spill_size = core_register_spill_size * core_spills + fp_register_spill_size * fp_spills; maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size); } else if (locations->CallsOnMainAndSlowPath()) { // Nothing to spill on the slow path if the main path already clobbers caller-saves. - DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true)); - DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false)); + DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ true)); + DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers= */ false)); } } return maximum_safepoint_spill_size; diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc index 1e00003701..0d6c5a3eff 100644 --- a/compiler/optimizing/register_allocator_linear_scan.cc +++ b/compiler/optimizing/register_allocator_linear_scan.cc @@ -252,7 +252,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) temp_intervals_.push_back(interval); interval->AddTempUse(instruction, i); if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) { - interval->AddHighInterval(/* is_temp */ true); + interval->AddHighInterval(/* is_temp= */ true); LiveInterval* high = interval->GetHighInterval(); temp_intervals_.push_back(high); unhandled_fp_intervals_.push_back(high); @@ -284,7 +284,7 @@ void RegisterAllocatorLinearScan::ProcessInstruction(HInstruction* instruction) } if (locations->WillCall()) { - BlockRegisters(position, position + 1, /* caller_save_only */ true); + BlockRegisters(position, position + 1, /* caller_save_only= */ true); } for (size_t i = 0; i < locations->GetInputCount(); ++i) { diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index be5304c7c3..79eb082cd7 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -68,11 +68,11 @@ class RegisterAllocatorTest : public OptimizingUnitTest { bool ValidateIntervals(const ScopedArenaVector<LiveInterval*>& intervals, const CodeGenerator& codegen) { return RegisterAllocator::ValidateIntervals(ArrayRef<LiveInterval* const>(intervals), - /* number_of_spill_slots */ 0u, - /* number_of_out_slots */ 0u, + /* number_of_spill_slots= */ 0u, + /* number_of_out_slots= */ 0u, codegen, - /* processing_core_registers */ true, - /* log_fatal_on_failure */ false); + /* processing_core_registers= */ true, + /* log_fatal_on_failure= */ false); } }; diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc index df897a4904..fdef45ec8b 100644 --- a/compiler/optimizing/scheduler.cc +++ b/compiler/optimizing/scheduler.cc @@ -680,7 +680,7 @@ static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) { DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction()); DCHECK(!instruction->IsControlFlow()); DCHECK(!cursor->IsControlFlow()); - instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false); + instruction->MoveBefore(cursor->GetNext(), /* do_checks= */ false); } void HScheduler::Schedule(HInstruction* instruction) { diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index d89d1171a1..858a555e97 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -563,7 +563,7 @@ void SchedulingLatencyVisitorARM::HandleGenerateDataProc(HDataProcWithShifterOp* last_visited_internal_latency_ = kArmIntegerOpLatency; last_visited_latency_ = kArmIntegerOpLatency; } else { - HandleGenerateDataProcInstruction(/* internal_latency */ true); + HandleGenerateDataProcInstruction(/* internal_latency= */ true); HandleGenerateDataProcInstruction(); } } @@ -585,8 +585,8 @@ void SchedulingLatencyVisitorARM::HandleGenerateLongDataProc(HDataProcWithShifte DCHECK_LT(shift_value, 32U); if (kind == HInstruction::kOr || kind == HInstruction::kXor) { - HandleGenerateDataProcInstruction(/* internal_latency */ true); - HandleGenerateDataProcInstruction(/* internal_latency */ true); + HandleGenerateDataProcInstruction(/* internal_latency= */ true); + HandleGenerateDataProcInstruction(/* internal_latency= */ true); HandleGenerateDataProcInstruction(); } else { last_visited_internal_latency_ += 2 * kArmIntegerOpLatency; diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index 981fcc42a7..e0e265a04c 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -148,7 +148,7 @@ class SchedulerTest : public OptimizingUnitTest { SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator(), - /* heap_location_collector */ nullptr); + /* heap_location_collector= */ nullptr); // Instructions must be inserted in reverse order into the scheduling graph. for (HInstruction* instr : ReverseRange(block_instructions)) { scheduling_graph.AddNode(instr); diff --git a/compiler/optimizing/side_effects_test.cc b/compiler/optimizing/side_effects_test.cc index 4b0be07f3b..cf26e79c69 100644 --- a/compiler/optimizing/side_effects_test.cc +++ b/compiler/optimizing/side_effects_test.cc @@ -141,13 +141,13 @@ TEST(SideEffectsTest, NoDependences) { TEST(SideEffectsTest, VolatileDependences) { SideEffects volatile_write = - SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ true); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ true); SideEffects any_write = - SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false); SideEffects volatile_read = - SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ true); + SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ true); SideEffects any_read = - SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile */ false); + SideEffects::FieldReadOfType(DataType::Type::kInt8, /* is_volatile= */ false); EXPECT_FALSE(volatile_write.MayDependOn(any_read)); EXPECT_TRUE(any_read.MayDependOn(volatile_write)); @@ -163,15 +163,15 @@ TEST(SideEffectsTest, VolatileDependences) { TEST(SideEffectsTest, SameWidthTypesNoAlias) { // Type I/F. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile */ false), - SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile */ false)); + SideEffects::FieldWriteOfType(DataType::Type::kInt32, /* is_volatile= */ false), + SideEffects::FieldReadOfType(DataType::Type::kFloat32, /* is_volatile= */ false)); testNoWriteAndReadDependence( SideEffects::ArrayWriteOfType(DataType::Type::kInt32), SideEffects::ArrayReadOfType(DataType::Type::kFloat32)); // Type L/D. testNoWriteAndReadDependence( - SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false), - SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile */ false)); + SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false), + SideEffects::FieldReadOfType(DataType::Type::kFloat64, /* is_volatile= */ false)); testNoWriteAndReadDependence( SideEffects::ArrayWriteOfType(DataType::Type::kInt64), SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); @@ -181,9 +181,9 @@ TEST(SideEffectsTest, AllWritesAndReads) { SideEffects s = SideEffects::None(); // Keep taking the union of different writes and reads. for (DataType::Type type : kTestTypes) { - s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile */ false)); + s = s.Union(SideEffects::FieldWriteOfType(type, /* is_volatile= */ false)); s = s.Union(SideEffects::ArrayWriteOfType(type)); - s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile */ false)); + s = s.Union(SideEffects::FieldReadOfType(type, /* is_volatile= */ false)); s = s.Union(SideEffects::ArrayReadOfType(type)); } EXPECT_TRUE(s.DoesAllReadWrite()); @@ -254,10 +254,10 @@ TEST(SideEffectsTest, BitStrings) { "||I|||||", SideEffects::ArrayReadOfType(DataType::Type::kInt32).ToString().c_str()); SideEffects s = SideEffects::None(); - s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile */ false)); - s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile */ false)); + s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kUint16, /* is_volatile= */ false)); + s = s.Union(SideEffects::FieldWriteOfType(DataType::Type::kInt64, /* is_volatile= */ false)); s = s.Union(SideEffects::ArrayWriteOfType(DataType::Type::kInt16)); - s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile */ false)); + s = s.Union(SideEffects::FieldReadOfType(DataType::Type::kInt32, /* is_volatile= */ false)); s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat32)); s = s.Union(SideEffects::ArrayReadOfType(DataType::Type::kFloat64)); EXPECT_STREQ("||DF|I||S|JC|", s.ToString().c_str()); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index cef234a399..0d0e1ecf1f 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -391,7 +391,7 @@ bool SsaBuilder::FixAmbiguousArrayOps() { // succeed in code validated by the verifier. HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type); DCHECK(equivalent != nullptr); - aset->ReplaceInput(equivalent, /* input_index */ 2); + aset->ReplaceInput(equivalent, /* index= */ 2); if (equivalent->IsPhi()) { // Returned equivalent is a phi which may not have had its inputs // replaced yet. We need to run primitive type propagation on it. @@ -525,7 +525,7 @@ GraphAnalysisResult SsaBuilder::BuildSsa() { class_loader_, dex_cache_, handles_, - /* is_first_run */ true).Run(); + /* is_first_run= */ true).Run(); // HInstructionBuilder duplicated ArrayGet instructions with ambiguous type // (int/float or long/double) and marked ArraySets with ambiguous input type. diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 62a70d6b12..7b2c3a939c 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -120,7 +120,7 @@ void SsaLivenessAnalysis::RecursivelyProcessInputs(HInstruction* current, DCHECK(input->HasSsaIndex()); // `input` generates a result used by `current`. Add use and update // the live-in set. - input->GetLiveInterval()->AddUse(current, /* environment */ nullptr, i, actual_user); + input->GetLiveInterval()->AddUse(current, /* environment= */ nullptr, i, actual_user); live_in->SetBit(input->GetSsaIndex()); } else if (has_out_location) { // `input` generates a result but it is not used by `current`. diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index 4b525531da..352c44f63a 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -94,25 +94,25 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) { HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0); block->AddInstruction(null_check); HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, + /* number_of_vregs= */ 5, + /* method= */ nullptr, + /* dex_pc= */ 0u, null_check); null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); null_check->SetRawEnvironment(null_check_env); HInstruction* length = new (GetAllocator()) HArrayLength(array, 0); block->AddInstruction(length); - HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc */ 0u); + HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(index, length, /* dex_pc= */ 0u); block->AddInstruction(bounds_check); HEnvironment* bounds_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, + /* number_of_vregs= */ 5, + /* method= */ nullptr, + /* dex_pc= */ 0u, bounds_check); bounds_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); bounds_check->SetRawEnvironment(bounds_check_env); HInstruction* array_set = - new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); + new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); @@ -163,9 +163,9 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { HInstruction* null_check = new (GetAllocator()) HNullCheck(array, 0); block->AddInstruction(null_check); HEnvironment* null_check_env = new (GetAllocator()) HEnvironment(GetAllocator(), - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, + /* number_of_vregs= */ 5, + /* method= */ nullptr, + /* dex_pc= */ 0u, null_check); null_check_env->CopyFrom(ArrayRef<HInstruction* const>(args)); null_check->SetRawEnvironment(null_check_env); @@ -175,17 +175,17 @@ TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { HInstruction* ae = new (GetAllocator()) HAboveOrEqual(index, length); block->AddInstruction(ae); HInstruction* deoptimize = new(GetAllocator()) HDeoptimize( - GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u); + GetAllocator(), ae, DeoptimizationKind::kBlockBCE, /* dex_pc= */ 0u); block->AddInstruction(deoptimize); HEnvironment* deoptimize_env = new (GetAllocator()) HEnvironment(GetAllocator(), - /* number_of_vregs */ 5, - /* method */ nullptr, - /* dex_pc */ 0u, + /* number_of_vregs= */ 5, + /* method= */ nullptr, + /* dex_pc= */ 0u, deoptimize); deoptimize_env->CopyFrom(ArrayRef<HInstruction* const>(args)); deoptimize->SetRawEnvironment(deoptimize_env); HInstruction* array_set = - new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc */ 0); + new (GetAllocator()) HArraySet(array, index, value, DataType::Type::kInt32, /* dex_pc= */ 0); block->AddInstruction(array_set); graph_->BuildDominatorTree(); diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 5370f43b4f..3fcb72e4fb 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -141,7 +141,7 @@ bool SsaRedundantPhiElimination::Run() { ArenaBitVector visited_phis_in_cycle(&allocator, graph_->GetCurrentInstructionId(), - /* expandable */ false, + /* expandable= */ false, kArenaAllocSsaPhiElimination); visited_phis_in_cycle.ClearAllBits(); ScopedArenaVector<HPhi*> cycle_worklist(allocator.Adapter(kArenaAllocSsaPhiElimination)); diff --git a/compiler/optimizing/superblock_cloner.h b/compiler/optimizing/superblock_cloner.h index f21172131b..dbe9008e92 100644 --- a/compiler/optimizing/superblock_cloner.h +++ b/compiler/optimizing/superblock_cloner.h @@ -372,8 +372,8 @@ class PeelUnrollHelper : public ValueObject { // Returns whether the loop can be peeled/unrolled. bool IsLoopClonable() const { return cloner_.IsSubgraphClonable(); } - HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll */ false); } - HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll */ true); } + HBasicBlock* DoPeeling() { return DoPeelUnrollImpl(/* to_unroll= */ false); } + HBasicBlock* DoUnrolling() { return DoPeelUnrollImpl(/* to_unroll= */ true); } HLoopInformation* GetRegionToBeAdjusted() const { return cloner_.GetRegionToBeAdjusted(); } protected: |