diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 10 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 48 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 8 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 53 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 52 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 48 | ||||
-rw-r--r-- | compiler/optimizing/instruction_simplifier_shared.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 64 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm_vixl.cc | 66 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 66 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 58 | ||||
-rw-r--r-- | compiler/optimizing/scheduler_arm.cc | 4 |
15 files changed, 246 insertions, 239 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index af08ddd150..b514f9bf9f 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1672,7 +1672,7 @@ void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, // When (non-Baker) read barriers are enabled, some instructions // use a slow path to emit a read barrier, which does not trigger // GC. - (kEmitCompilerReadBarrier && + (gUseReadBarrier && !kUseBakerReadBarrier && (instruction->IsInstanceFieldGet() || instruction->IsPredicatedInstanceFieldGet() || diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index b09219a2ed..7b46e13a44 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -57,8 +57,8 @@ static int32_t constexpr kPrimIntMax = 0x7fffffff; // Maximum value for a primitive long. static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff); -static constexpr ReadBarrierOption kCompilerReadBarrierOption = - kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; +static const ReadBarrierOption gCompilerReadBarrierOption = + gUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; class Assembler; class CodeGenerator; @@ -461,7 +461,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // If the target class is in the boot image, it's non-moveable and it doesn't matter // if we compare it with a from-space or to-space reference, the result is the same. // It's OK to traverse a class hierarchy jumping between from-space and to-space. - return kEmitCompilerReadBarrier && !instance_of->GetTargetClass()->IsInBootImage(); + return gUseReadBarrier && !instance_of->GetTargetClass()->IsInBootImage(); } static ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) { @@ -476,7 +476,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { case TypeCheckKind::kArrayObjectCheck: case TypeCheckKind::kInterfaceCheck: { bool needs_read_barrier = - kEmitCompilerReadBarrier && !check_cast->GetTargetClass()->IsInBootImage(); + gUseReadBarrier && !check_cast->GetTargetClass()->IsInBootImage(); // We do not emit read barriers for HCheckCast, so we can get false negatives // and the slow path shall re-check and simply return if the cast is actually OK. return !needs_read_barrier; @@ -679,7 +679,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { return LocationSummary::kCallOnMainOnly; case HLoadString::LoadKind::kJitTableAddress: DCHECK(!load->NeedsEnvironment()); - return kEmitCompilerReadBarrier + return gUseReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; break; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index d3031b71e5..eb95541db1 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -583,7 +583,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -762,7 +762,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root) : SlowPathCodeARM64(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -2058,7 +2058,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_field_get_with_read_barrier @@ -2114,7 +2114,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, MemOperand field = HeapOperand(InputRegisterAt(instruction, receiver_input), field_info.GetFieldOffset()); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && + if (gUseReadBarrier && kUseBakerReadBarrier && load_type == DataType::Type::kReference) { // Object FieldGet with Baker's read barrier case. // /* HeapReference<Object> */ out = *(base + offset) @@ -2556,7 +2556,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -2612,10 +2612,10 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { // does not support the HIntermediateAddress instruction. DCHECK(!((type == DataType::Type::kReference) && instruction->GetArray()->IsIntermediateAddress() && - kEmitCompilerReadBarrier && + gUseReadBarrier && !kUseBakerReadBarrier)); - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // Object ArrayGet with Baker's read barrier case. // Note that a potential implicit null check is handled in the // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call. @@ -3905,7 +3905,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && (kUseBakerReadBarrier || type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -5320,7 +5320,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -5334,7 +5334,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution or initialization and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -5361,7 +5361,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -5530,7 +5530,7 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -5584,7 +5584,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD temp, /* offset placeholder */ 0u, ldr_label, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load); codegen_->AddSlowPath(slow_path); @@ -5608,7 +5608,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD out.X(), /* offset= */ 0, /* fixup_label= */ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); return; } default: @@ -6469,7 +6469,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister( DataType::Type type = DataType::Type::kReference; Register out_reg = RegisterFrom(out, type); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -6510,7 +6510,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( Register out_reg = RegisterFrom(out, type); Register obj_reg = RegisterFrom(obj, type); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -6545,7 +6545,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( DCHECK(fixup_label == nullptr || offset == 0u); Register root_reg = RegisterFrom(root, DataType::Type::kReference); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used. @@ -6611,7 +6611,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( void CodeGeneratorARM64::GenerateIntrinsicCasMoveWithBakerReadBarrier( vixl::aarch64::Register marked_old_value, vixl::aarch64::Register old_value) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR. @@ -6633,7 +6633,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins const vixl::aarch64::MemOperand& src, bool needs_null_check, bool use_load_acquire) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the @@ -6729,7 +6729,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instru uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -6807,7 +6807,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instru void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { // The following condition is a compile-time one, so it does not have a run-time cost. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) { // The following condition is a run-time one; it is executed after the // previous compile-time test, to avoid penalizing non-debug builds. if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { @@ -6836,7 +6836,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -6861,7 +6861,7 @@ void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -6876,7 +6876,7 @@ void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction, void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index f4d652c29c..c1984e354f 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -92,7 +92,11 @@ const vixl::aarch64::CPURegList runtime_reserved_core_registers = vixl::aarch64::CPURegList( tr, // Reserve X20 as Marking Register when emitting Baker read barriers. - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg), + // TODO: We don't need to reserve marking-register for userfaultfd GC. But + // that would require some work in the assembler code as the right GC is + // chosen at load-time and not compile time. + ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier + ? mr : vixl::aarch64::NoCPUReg), kImplicitSuspendCheckRegister, vixl::aarch64::lr); @@ -111,7 +115,7 @@ inline Location FixedTempLocation() { const vixl::aarch64::CPURegList callee_saved_core_registers( vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize, - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ((gUseReadBarrier && kUseBakerReadBarrier) ? vixl::aarch64::x21.GetCode() : vixl::aarch64::x20.GetCode()), vixl::aarch64::x30.GetCode()); diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 00a6c837d9..bf8e896a9c 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -922,7 +922,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { public: ReadBarrierForRootSlowPathARMVIXL(HInstruction* instruction, Location out, Location root) : SlowPathCodeARMVIXL(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -2101,7 +2101,10 @@ void CodeGeneratorARMVIXL::SetupBlockedRegisters() const { blocked_core_registers_[LR] = true; blocked_core_registers_[PC] = true; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // TODO: We don't need to reserve marking-register for userfaultfd GC. But + // that would require some work in the assembler code as the right GC is + // chosen at load-time and not compile time. + if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) { // Reserve marking register. blocked_core_registers_[MR] = true; } @@ -5911,7 +5914,7 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); + gUseReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, @@ -6082,7 +6085,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, case DataType::Type::kReference: { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location(); // Note that a potential implicit null check is handled in this // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call. @@ -6386,7 +6389,7 @@ void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(DataType::Type type, void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -6534,14 +6537,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { // The read barrier instrumentation of object ArrayGet // instructions does not support the HIntermediateAddress // instruction. - DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier)); + DCHECK(!(has_intermediate_address && gUseReadBarrier)); static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier call. DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); @@ -7459,7 +7462,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -7473,7 +7476,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution or initialization and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7501,7 +7504,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -7721,7 +7724,7 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString and marking to save everything we need, including temps. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7760,7 +7763,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE codegen_->EmitMovwMovtPlaceholder(labels, out); // All aligned loads are implicitly atomic consume operations on ARM. codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption); + load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); @@ -7781,7 +7784,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE load->GetString())); // /* GcRoot<mirror::String> */ out = *out codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption); + load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption); return; } default: @@ -7838,7 +7841,7 @@ void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && (kUseBakerReadBarrier || type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -8773,7 +8776,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { vixl32::Register out_reg = RegisterFrom(out); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); DCHECK(maybe_temp.IsRegister()) << maybe_temp; if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. @@ -8808,7 +8811,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( vixl32::Register out_reg = RegisterFrom(out); vixl32::Register obj_reg = RegisterFrom(obj); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { DCHECK(maybe_temp.IsRegister()) << maybe_temp; // Load with fast path based Baker's read barrier. @@ -8837,7 +8840,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { vixl32::Register root_reg = RegisterFrom(root); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used. @@ -8901,7 +8904,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( void CodeGeneratorARMVIXL::GenerateIntrinsicCasMoveWithBakerReadBarrier( vixl::aarch32::Register marked_old_value, vixl::aarch32::Register old_value) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR. @@ -8935,7 +8938,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i vixl32::Register obj, const vixl32::MemOperand& src, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the @@ -9028,7 +9031,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, Location index, Location temp, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -9094,7 +9097,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { // The following condition is a compile-time one, so it does not have a run-time cost. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) { // The following condition is a run-time one; it is executed after the // previous compile-time test, to avoid penalizing non-debug builds. if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { @@ -9124,7 +9127,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -9150,7 +9153,7 @@ void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instructio Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -9165,7 +9168,7 @@ void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instructio void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 790ad0f8f7..62a4d368b7 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -84,7 +84,7 @@ static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::Regis vixl::aarch32::r6, vixl::aarch32::r7), // Do not consider r8 as a callee-save register with Baker read barriers. - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ((gUseReadBarrier && kUseBakerReadBarrier) ? vixl::aarch32::RegisterList() : vixl::aarch32::RegisterList(vixl::aarch32::r8)), vixl::aarch32::RegisterList(vixl::aarch32::r10, diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8c4a11c5ed..f4529bec7a 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -503,7 +503,7 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode { : SlowPathCode(instruction), ref_(ref), unpoison_ref_before_marking_(unpoison_ref_before_marking) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; } @@ -590,7 +590,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode { field_addr_(field_addr), unpoison_ref_before_marking_(unpoison_ref_before_marking), temp_(temp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -918,7 +918,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -1619,7 +1619,7 @@ void CodeGeneratorX86::LoadFromMemoryNoBarrier(DataType::Type dst_type, __ movsd(dst.AsFpuRegister<XmmRegister>(), src); break; case DataType::Type::kReference: - DCHECK(!kEmitCompilerReadBarrier); + DCHECK(!gUseReadBarrier); __ movl(dst.AsRegister<Register>(), src); __ MaybeUnpoisonHeapReference(dst.AsRegister<Register>()); break; @@ -5731,11 +5731,11 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, - kEmitCompilerReadBarrier + gUseReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { @@ -5793,7 +5793,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, if (load_type == DataType::Type::kReference) { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -6202,7 +6202,7 @@ void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -6244,7 +6244,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( @@ -7057,7 +7057,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -7071,7 +7071,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (call_kind == LocationSummary::kCallOnSlowPath && cls->HasPcRelativeLoadKind()) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution and/or initialization to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7109,7 +7109,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE bool generate_null_check = false; const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { DCHECK(!cls->CanCallRuntime()); @@ -7296,7 +7296,7 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7345,7 +7345,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S Address address = Address(method_address, CodeGeneratorX86::kPlaceholder32BitOffset); Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::String> */ out = *address /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); // No need for memory fence, thanks to the x86 memory model. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load); codegen_->AddSlowPath(slow_path); @@ -7365,7 +7365,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S Label* fixup_label = codegen_->NewJitRootStringPatch( load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); return; } default: @@ -7416,7 +7416,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && (type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -8188,7 +8188,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { Register out_reg = out.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -8222,7 +8222,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters( Register out_reg = out.AsRegister<Register>(); Register obj_reg = obj.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -8250,7 +8250,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { Register root_reg = root.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used: @@ -8314,7 +8314,7 @@ void CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instr Register obj, uint32_t offset, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // /* HeapReference<Object> */ ref = *(obj + offset) @@ -8328,7 +8328,7 @@ void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instr uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -8347,7 +8347,7 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i bool needs_null_check, bool always_update_field, Register* temp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // In slow path based read barriers, the read barrier call is @@ -8428,7 +8428,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -8455,7 +8455,7 @@ void CodeGeneratorX86::MaybeGenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -8470,7 +8470,7 @@ void CodeGeneratorX86::MaybeGenerateReadBarrierSlow(HInstruction* instruction, void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 5987410893..d31a6303b4 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -510,7 +510,7 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { : SlowPathCode(instruction), ref_(ref), unpoison_ref_before_marking_(unpoison_ref_before_marking) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; } @@ -601,7 +601,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode { unpoison_ref_before_marking_(unpoison_ref_before_marking), temp1_(temp1), temp2_(temp2) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { @@ -761,7 +761,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial // object has been overwritten by (or after) the heap object // reference load to be instrumented, e.g.: @@ -937,7 +937,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -5013,7 +5013,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, @@ -5064,7 +5064,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, if (load_type == DataType::Type::kReference) { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -5513,7 +5513,7 @@ void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -5551,7 +5551,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( @@ -6352,7 +6352,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -6366,7 +6366,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution and/or initialization to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -6403,7 +6403,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -6550,7 +6550,7 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -6598,7 +6598,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA /* no_rip= */ false); Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); // No need for memory fence, thanks to the x86-64 memory model. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load); codegen_->AddSlowPath(slow_path); @@ -6619,7 +6619,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA Label* fixup_label = codegen_->NewJitRootStringPatch( load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); return; } default: @@ -6672,7 +6672,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && (type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -7426,7 +7426,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { CpuRegister out_reg = out.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -7460,7 +7460,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters( CpuRegister out_reg = out.AsRegister<CpuRegister>(); CpuRegister obj_reg = obj.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -7488,7 +7488,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { CpuRegister root_reg = root.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used: @@ -7552,7 +7552,7 @@ void CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in CpuRegister obj, uint32_t offset, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // /* HeapReference<Object> */ ref = *(obj + offset) @@ -7566,7 +7566,7 @@ void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -7586,7 +7586,7 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction bool always_update_field, CpuRegister* temp1, CpuRegister* temp2) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // In slow path based read barriers, the read barrier call is @@ -7668,7 +7668,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -7695,7 +7695,7 @@ void CodeGeneratorX86_64::MaybeGenerateReadBarrierSlow(HInstruction* instruction Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -7710,7 +7710,7 @@ void CodeGeneratorX86_64::MaybeGenerateReadBarrierSlow(HInstruction* instruction void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index dc60ba62bb..fb8b01b75a 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -244,7 +244,7 @@ bool TryExtractArrayAccessAddress(HInstruction* access, // The access may require a runtime call or the original array pointer. return false; } - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && access->IsArrayGet() && access->GetType() == DataType::Type::kReference) { diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index f2d2b45da9..0feb92d734 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -392,7 +392,7 @@ void IntrinsicVisitor::CreateReferenceGetReferentLocations(HInvoke* invoke, } void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // Unimplemented for non-Baker read barrier. return; } diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 646f4f2ea7..0ce082b12b 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -92,7 +92,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierSystemArrayCopySlowPathARM64(HInstruction* instruction, Location tmp) : SlowPathCodeARM64(instruction), tmp_(tmp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -711,7 +711,7 @@ static void GenUnsafeGet(HInvoke* invoke, Location trg_loc = locations->Out(); Register trg = RegisterFrom(trg_loc, type); - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case. Register temp = WRegisterFrom(locations->GetTemp(0)); MacroAssembler* masm = codegen->GetVIXLAssembler(); @@ -754,7 +754,7 @@ static bool UnsafeGetIntrinsicOnCallList(Intrinsics intrinsic) { } static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -1096,7 +1096,7 @@ void IntrinsicCodeGeneratorARM64::VisitJdkUnsafePutLongRelease(HInvoke* invoke) } static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke); + const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -1448,7 +1448,7 @@ static void GenUnsafeCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM6 vixl::aarch64::Label* exit_loop = &exit_loop_label; vixl::aarch64::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && type == DataType::Type::kReference) { // We need to store the `old_value` in a non-scratch register to make sure // the read barrier in the slow path does not clobber it. old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path. @@ -1523,12 +1523,12 @@ void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* in } void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } CreateUnsafeCASLocations(allocator_, invoke); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // We need two non-scratch temporary registers for read barrier. LocationSummary* locations = invoke->GetLocations(); if (kUseBakerReadBarrier) { @@ -1578,7 +1578,7 @@ void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invok } void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -2814,7 +2814,7 @@ static constexpr int32_t kSystemArrayCopyThreshold = 128; void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2866,7 +2866,7 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Temporary register IP0, obtained from the VIXL scratch register // pool, cannot be used in ReadBarrierSystemArrayCopySlowPathARM64 // (because that register is clobbered by ReadBarrierMarkRegX @@ -2884,7 +2884,7 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); MacroAssembler* masm = GetVIXLAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -2991,7 +2991,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { UseScratchRegisterScope temps(masm); Location temp3_loc; // Used only for Baker read barrier. Register temp3; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { temp3_loc = locations->GetTemp(2); temp3 = WRegisterFrom(temp3_loc); } else { @@ -3004,7 +3004,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { // or the destination is Object[]. If none of these checks succeed, we go to the // slow path. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, @@ -3165,7 +3165,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, temp1_loc, @@ -3215,7 +3215,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { __ Cbz(WRegisterFrom(length), &done); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // TODO: Also convert this intrinsic to the IsGcMarking strategy? // SystemArrayCopy implementation for Baker read barriers (see @@ -3451,7 +3451,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) { IntrinsicVisitor::CreateReferenceGetReferentLocations(invoke, codegen_); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) { + if (gUseReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) { invoke->GetLocations()->AddTemp(Location::RequiresRegister()); } } @@ -3466,7 +3466,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). UseScratchRegisterScope temps(masm); Register temp = temps.AcquireW(); @@ -3493,7 +3493,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, WRegisterFrom(obj), @@ -3533,7 +3533,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceRefersTo(HInvoke* invoke) { __ Cmp(tmp, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); vixl::aarch64::Label calculate_result; @@ -4629,7 +4629,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, method.X(), ArtField::DeclaringClassOffset().Int32Value(), /*fixup_label=*/ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -4683,7 +4683,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) { } // Add a temporary for offset. - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields. // To preserve the offset value across the non-Baker read barrier slow path // for loading the declaring class, use a fixed callee-save register. @@ -4706,7 +4706,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke) { return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference && invoke->GetIntrinsic() != Intrinsics::kVarHandleGet && invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) { @@ -4746,7 +4746,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, DCHECK(use_load_acquire || order == std::memory_order_relaxed); // Load the value from the target location. - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // Piggy-back on the field load path using introspection for the Baker read barrier. // The `target.offset` is a temporary, use it for field address. Register tmp_ptr = target.offset.X(); @@ -4947,7 +4947,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo uint32_t number_of_arguments = invoke->GetNumberOfArguments(); DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field. This breaks the read barriers @@ -4961,7 +4961,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo LocationSummary* locations = CreateVarHandleCommonLocations(invoke); - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // We need callee-save registers for both the class object and offset instead of // the temporaries reserved in CreateVarHandleCommonLocations(). static_assert(POPCOUNT(kArm64CalleeSaveRefSpills) >= 2u); @@ -5002,7 +5002,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo locations->AddTemp(Location::RequiresRegister()); } } - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // Add a temporary for the `old_value_temp` in slow path. locations->AddTemp(Location::RequiresRegister()); } @@ -5068,7 +5068,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, // except for references that need the offset for the read barrier. UseScratchRegisterScope temps(masm); Register tmp_ptr = target.offset.X(); - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { tmp_ptr = temps.AcquireX(); } __ Add(tmp_ptr, target.object.X(), target.offset.X()); @@ -5151,7 +5151,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, vixl::aarch64::Label* exit_loop = &exit_loop_label; vixl::aarch64::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // The `old_value_temp` is used first for the marked `old_value` and then for the unmarked // reloaded old value for subsequent CAS in the slow path. It cannot be a scratch register. size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke); @@ -5296,7 +5296,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field, thus seeing the new value @@ -5372,7 +5372,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, // except for references that need the offset for the non-Baker read barrier. UseScratchRegisterScope temps(masm); Register tmp_ptr = target.offset.X(); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { tmp_ptr = temps.AcquireX(); } @@ -5402,7 +5402,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, // the new value unless it is zero bit pattern (+0.0f or +0.0) and need another one // in GenerateGetAndUpdate(). We have allocated a normal temporary to handle that. old_value = CPURegisterFrom(locations->GetTemp(1u), load_store_type); - } else if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) && + } else if ((gUseReadBarrier && kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Load the old value initially to a scratch register. // We shall move it to `out` later with a read barrier. @@ -5450,7 +5450,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, __ Sxtb(out.W(), old_value.W()); } else if (value_type == DataType::Type::kInt16) { __ Sxth(out.W(), old_value.W()); - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(out.W(), old_value.W()); } else { diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index d850cadc2b..da47fa6cf0 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -120,7 +120,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { public: explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -1242,7 +1242,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invo void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -1265,7 +1265,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) { locations->SetInAt(4, Location::RequiresRegister()); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Temporary register IP cannot be used in // ReadBarrierSystemArrayCopySlowPathARM (because that register // is clobbered by ReadBarrierMarkRegX entry points). Get an extra @@ -1339,7 +1339,7 @@ static void CheckPosition(ArmVIXLAssembler* assembler, void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); ArmVIXLAssembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1453,7 +1453,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // or the destination is Object[]. If none of these checks succeed, we go to the // slow path. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -1584,7 +1584,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false); @@ -1621,7 +1621,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // TODO: Also convert this intrinsic to the IsGcMarking strategy? // SystemArrayCopy implementation for Baker read barriers (see @@ -2511,7 +2511,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); @@ -2539,7 +2539,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, RegisterFrom(obj), @@ -2587,7 +2587,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) { assembler->MaybeUnpoisonHeapReference(tmp); codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); // `referent` is volatile. - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); vixl32::Label calculate_result; @@ -2613,7 +2613,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) { __ Bind(&calculate_result); } else { - DCHECK(!kEmitCompilerReadBarrier); + DCHECK(!gUseReadBarrier); __ Sub(out, tmp, other); } @@ -2732,7 +2732,7 @@ static void GenerateIntrinsicGet(HInvoke* invoke, } break; case DataType::Type::kReference: - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Piggy-back on the field load path using introspection for the Baker read barrier. vixl32::Register temp = RegisterFrom(maybe_temp); __ Add(temp, base, offset); @@ -2777,7 +2777,7 @@ static void GenerateIntrinsicGet(HInvoke* invoke, codegen->GenerateMemoryBarrier( seq_cst_barrier ? MemBarrierKind::kAnyAny : MemBarrierKind::kLoadAny); } - if (type == DataType::Type::kReference && !(kEmitCompilerReadBarrier && kUseBakerReadBarrier)) { + if (type == DataType::Type::kReference && !(gUseReadBarrier && kUseBakerReadBarrier)) { Location base_loc = LocationFrom(base); Location index_loc = LocationFrom(offset); codegen->MaybeGenerateReadBarrierSlow(invoke, out, out, base_loc, /* offset=*/ 0u, index_loc); @@ -2802,7 +2802,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen, DataType::Type type, bool atomic) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, @@ -2818,7 +2818,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke, locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); - if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || + if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || (type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) { // We need a temporary register for the read barrier marking slow // path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier, @@ -2837,7 +2837,7 @@ static void GenUnsafeGet(HInvoke* invoke, vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Long offset, lo part only. Location out = locations->Out(); Location maybe_temp = Location::NoLocation(); - if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || + if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || (type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) { maybe_temp = locations->GetTemp(0); } @@ -3470,7 +3470,7 @@ static void GenerateCompareAndSet(CodeGeneratorARMVIXL* codegen, // branch goes to the read barrier slow path that clobbers `success` anyway. bool init_failure_for_cmp = success.IsValid() && - !(kEmitCompilerReadBarrier && type == DataType::Type::kReference && expected.IsRegister()); + !(gUseReadBarrier && type == DataType::Type::kReference && expected.IsRegister()); // Instruction scheduling: Loading a constant between LDREX* and using the loaded value // is essentially free, so prepare the failure value here if we can. bool init_failure_for_cmp_early = @@ -3655,7 +3655,7 @@ class ReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL { }; static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke); + const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -3706,7 +3706,7 @@ static void GenUnsafeCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMV vixl32::Label* exit_loop = &exit_loop_label; vixl32::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && type == DataType::Type::kReference) { // If marking, check if the stored reference is a from-space reference to the same // object as the to-space reference `expected`. If so, perform a custom CAS loop. ReadBarrierCasSlowPathARMVIXL* slow_path = @@ -3770,7 +3770,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* i } void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers (b/173104084). - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3798,7 +3798,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* invo } void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers (b/173104084). - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -4351,7 +4351,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, LocationFrom(target.object), method, ArtField::DeclaringClassOffset().Int32Value(), - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -4403,7 +4403,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) { } // Add a temporary for offset. - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields. // To preserve the offset value across the non-Baker read barrier slow path // for loading the declaring class, use a fixed callee-save register. @@ -4428,7 +4428,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference && invoke->GetIntrinsic() != Intrinsics::kVarHandleGet && invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) { @@ -4476,7 +4476,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, Location maybe_temp = Location::NoLocation(); Location maybe_temp2 = Location::NoLocation(); Location maybe_temp3 = Location::NoLocation(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) { // Reuse the offset temporary. maybe_temp = LocationFrom(target.offset); } else if (DataType::Is64BitType(type) && Use64BitExclusiveLoadStore(atomic, codegen)) { @@ -4749,7 +4749,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo uint32_t number_of_arguments = invoke->GetNumberOfArguments(); DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field. This breaks the read barriers @@ -4763,7 +4763,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo LocationSummary* locations = CreateVarHandleCommonLocations(invoke); - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // We need callee-save registers for both the class object and offset instead of // the temporaries reserved in CreateVarHandleCommonLocations(). static_assert(POPCOUNT(kArmCalleeSaveRefSpills) >= 2u); @@ -4799,7 +4799,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo locations->AddRegisterTemps(2u); } } - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // Add a temporary for store result, also used for the `old_value_temp` in slow path. locations->AddTemp(Location::RequiresRegister()); } @@ -4930,7 +4930,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, vixl32::Label* exit_loop = &exit_loop_label; vixl32::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // The `old_value_temp` is used first for the marked `old_value` and then for the unmarked // reloaded old value for subsequent CAS in the slow path. This must not clobber `old_value`. vixl32::Register old_value_temp = return_success ? RegisterFrom(out) : store_result; @@ -5086,7 +5086,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field, thus seeing the new value @@ -5107,7 +5107,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, // Add temps needed to do the GenerateGetAndUpdate() with core registers. size_t temps_needed = (value_type == DataType::Type::kFloat64) ? 5u : 3u; locations->AddRegisterTemps(temps_needed - locations->GetTempCount()); - } else if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + } else if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // We need to preserve the declaring class (if present) and offset for read barrier // slow paths, so we must use a separate temporary for the exclusive store result. @@ -5213,7 +5213,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, if (byte_swap) { GenerateReverseBytes(assembler, DataType::Type::kInt32, arg, arg); } - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { // Load the old value initially to a temporary register. // We shall move it to `out` later with a read barrier. @@ -5296,7 +5296,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, } else { __ Vmov(SRegisterFrom(out), RegisterFrom(old_value)); } - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(RegisterFrom(out), RegisterFrom(old_value)); diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 7d90aae984..0f6eb8638a 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -75,7 +75,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { public: explicit ReadBarrierSystemArrayCopySlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -1699,7 +1699,7 @@ static void GenUnsafeGet(HInvoke* invoke, case DataType::Type::kReference: { Register output = output_loc.AsRegister<Register>(); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -1757,7 +1757,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type, bool is_volatile) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -2103,7 +2103,7 @@ void IntrinsicCodeGeneratorX86::VisitJdkUnsafePutLongRelease(HInvoke* invoke) { static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && + const bool can_call = gUseReadBarrier && kUseBakerReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = @@ -2175,7 +2175,7 @@ void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invo void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2304,7 +2304,7 @@ static void GenReferenceCAS(HInvoke* invoke, DCHECK_EQ(expected, EAX); DCHECK_NE(temp, temp2); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -2391,7 +2391,7 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register temp2 = locations->GetTemp(1).AsRegister<Register>(); @@ -2413,7 +2413,7 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2443,7 +2443,7 @@ void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2843,7 +2843,7 @@ static void GenSystemArrayCopyEndAddress(X86Assembler* assembler, void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2875,7 +2875,7 @@ void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -2995,7 +2995,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // slow path. if (!optimizations.GetSourceIsNonPrimitiveArray()) { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -3022,7 +3022,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (length.Equals(Location::RegisterLocation(temp3))) { // When Baker read barriers are enabled, register `temp3`, // which in the present case contains the `length` parameter, @@ -3120,7 +3120,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -3151,7 +3151,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // Compute the base source address in `temp1`. GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // If it is needed (in the case of the fast-path loop), the base // destination address is computed later, as `temp2` is used for // intermediate computations. @@ -3377,7 +3377,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). ThreadOffset32 offset = Thread::WeakRefAccessEnabledOffset<kX86PointerSize>(); __ fs()->cmpl(Address::Absolute(offset), @@ -3400,7 +3400,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, obj.AsRegister<Register>(), @@ -3442,7 +3442,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceRefersTo(HInvoke* invoke) { NearLabel end, return_true, return_false; __ cmpl(out, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); __ j(kEqual, &return_true); @@ -3781,7 +3781,7 @@ static Register GenerateVarHandleFieldReference(HInvoke* invoke, Location::RegisterLocation(temp), Address(temp, declaring_class_offset), /* fixup_label= */ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); return temp; } @@ -3794,7 +3794,7 @@ static Register GenerateVarHandleFieldReference(HInvoke* invoke, static void CreateVarHandleGetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3836,7 +3836,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke) { static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -3860,7 +3860,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) { Address field_addr(ref, offset, TIMES_1, 0); // Load the value from the field - if (type == DataType::Type::kReference && kCompilerReadBarrierOption == kWithReadBarrier) { + if (type == DataType::Type::kReference && gCompilerReadBarrierOption == kWithReadBarrier) { codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, out, ref, field_addr, /* needs_null_check= */ false); } else if (type == DataType::Type::kInt64 && @@ -3917,7 +3917,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetOpaque(HInvoke* invoke) { static void CreateVarHandleSetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3990,7 +3990,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke) { static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4087,7 +4087,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleSetOpaque(HInvoke* invoke) { static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4135,7 +4135,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4194,7 +4194,7 @@ static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codege __ movd(locations->Out().AsFpuRegister<XmmRegister>(), EAX); break; case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -4258,7 +4258,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndSetRelease(HInvoke* invoke) static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4322,7 +4322,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4441,7 +4441,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndExchangeRelease(HInvoke* static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4490,7 +4490,7 @@ static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4591,7 +4591,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndAddRelease(HInvoke* invoke) static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4659,7 +4659,7 @@ static void GenerateBitwiseOp(HInvoke* invoke, static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 3c31374f67..9921d907d5 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -71,7 +71,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { public: explicit ReadBarrierSystemArrayCopySlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -836,7 +836,7 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyInt(HInvoke* invoke) { void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -887,7 +887,7 @@ static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1002,7 +1002,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // slow path. bool did_unpoison = false; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = dest->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false); @@ -1034,7 +1034,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (!optimizations.GetDestinationIsNonPrimitiveArray()) { // Bail out if the destination is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ TMP = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false); @@ -1055,7 +1055,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // For the same reason given earlier, `temp1` is not trashed by the // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below. // /* HeapReference<Class> */ TMP = temp2->component_type_ @@ -1081,7 +1081,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (optimizations.GetDestinationIsTypedObjectArray()) { NearLabel do_copy; __ j(kEqual, &do_copy); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); @@ -1109,7 +1109,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -1141,7 +1141,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { GenSystemArrayCopyAddresses( GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // SystemArrayCopy implementation for Baker read barriers (see // also CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier): // @@ -1888,7 +1888,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -1930,7 +1930,7 @@ static bool UnsafeGetIntrinsicOnCallList(Intrinsics intrinsic) { } static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -2230,7 +2230,7 @@ void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafePutLongRelease(HInvoke* invoke) static void CreateUnsafeCASLocations(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && + const bool can_call = gUseReadBarrier && kUseBakerReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = @@ -2253,7 +2253,7 @@ static void CreateUnsafeCASLocations(ArenaAllocator* allocator, // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -2298,7 +2298,7 @@ void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* i void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2438,7 +2438,7 @@ static void GenCompareAndSetOrExchangeRef(CodeGeneratorX86_64* codegen, CpuRegister temp3, bool is_cmpxchg) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); @@ -2447,7 +2447,7 @@ static void GenCompareAndSetOrExchangeRef(CodeGeneratorX86_64* codegen, codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null); Address field_addr(base, offset, TIMES_1, 0); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -2556,7 +2556,7 @@ static void GenCompareAndSetOrExchange(CodeGeneratorX86_64* codegen, CpuRegister new_value_reg = new_value.AsRegister<CpuRegister>(); CpuRegister temp1 = locations->GetTemp(temp1_index).AsRegister<CpuRegister>(); CpuRegister temp2 = locations->GetTemp(temp2_index).AsRegister<CpuRegister>(); - CpuRegister temp3 = kEmitCompilerReadBarrier + CpuRegister temp3 = gUseReadBarrier ? locations->GetTemp(temp3_index).AsRegister<CpuRegister>() : CpuRegister(kNoRegister); DCHECK(RegsAreAllDifferent({base, offset, temp1, temp2, temp3})); @@ -2624,7 +2624,7 @@ void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invo void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -3128,7 +3128,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). ThreadOffset64 offset = Thread::WeakRefAccessEnabledOffset<kX86_64PointerSize>(); __ gs()->cmpl(Address::Absolute(offset, /* no_rip= */ true), @@ -3150,7 +3150,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, obj.AsRegister<CpuRegister>(), @@ -3191,7 +3191,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceRefersTo(HInvoke* invoke) { __ cmpl(out, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); NearLabel calculate_result; @@ -3771,7 +3771,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, Location::RegisterLocation(target.object), Address(method, ArtField::DeclaringClassOffset()), /*fixup_label=*/ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -3790,7 +3790,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, static bool HasVarHandleIntrinsicImplementation(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return false; } @@ -3876,7 +3876,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, Location out = locations->Out(); if (type == DataType::Type::kReference) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, out, CpuRegister(target.object), src, /* needs_null_check= */ false); @@ -4070,7 +4070,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -4085,7 +4085,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86_64* codegen, bool is_cmpxchg, bool byte_swap = false) { - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4218,7 +4218,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need a third temporary for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -4267,7 +4267,7 @@ static void GenerateVarHandleGetAndSet(HInvoke* invoke, CpuRegister temp2 = locations->GetTemp(temp_count - 2).AsRegister<CpuRegister>(); CpuRegister valreg = value.AsRegister<CpuRegister>(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, locations->GetTemp(temp_count - 3), @@ -4647,7 +4647,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, bool need_any_store_barrier, bool need_any_any_barrier, bool byte_swap = false) { - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index 965e1bd9f4..25dd1047e5 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -669,7 +669,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { } case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { last_visited_latency_ = kArmLoadWithBakerReadBarrierLatency; } else { if (index->IsConstant()) { @@ -937,7 +937,7 @@ void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruct break; case DataType::Type::kReference: - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency; last_visited_latency_ = kArmMemoryLoadLatency; } else { |