diff options
author | 2022-04-20 01:39:28 +0000 | |
---|---|---|
committer | 2022-08-10 18:06:05 +0000 | |
commit | ca5ed9f281a5758814d2495da80178de56945720 (patch) | |
tree | 3633a7ace321b10dd2ae6ffadd69d525c7a530e5 | |
parent | 1325bb173c3160b8ff2ecfc2d1aefd185c773ee4 (diff) |
Convert kUseReadBarrier to static const from constexpr
This CL would compile both CC and userfaultfd GC in the art library,
enabling us to choose either of the two during boot time depending on
whether the device has userfaultfd kernel feature or not.
The CC GC is still chosen unless we use ART_USE_READ_BARRIER=false
during build time. This behavior will later be changed to choosing CC
*only* if ART_USE_READ_BARRIER=true is used. In other cases, if the
device has userfaultfd support then that GC will be chosen.
Bug: 160737021
Bug: 230021033
Test: art/test/testrunner/testrunner.py
Change-Id: I370f1a9f6b8cdff8c2ce3cf7aa936bccd7ed675f
73 files changed, 464 insertions, 431 deletions
diff --git a/build/art.go b/build/art.go index c39b7e3f38..56df14239f 100644 --- a/build/art.go +++ b/build/art.go @@ -38,19 +38,13 @@ func globalFlags(ctx android.LoadHookContext) ([]string, []string) { opt := ctx.Config().GetenvWithDefault("ART_NDEBUG_OPT_FLAG", "-O3") cflags = append(cflags, opt) - tlab := false - - gcType := ctx.Config().GetenvWithDefault("ART_DEFAULT_GC_TYPE", "CMS") + gcType := ctx.Config().GetenvWithDefault("ART_DEFAULT_GC_TYPE", "CMC") if ctx.Config().IsEnvTrue("ART_TEST_DEBUG_GC") { gcType = "SS" - tlab = true } cflags = append(cflags, "-DART_DEFAULT_GC_TYPE_IS_"+gcType) - if tlab { - cflags = append(cflags, "-DART_USE_TLAB=1") - } if ctx.Config().IsEnvTrue("ART_HEAP_POISONING") { cflags = append(cflags, "-DART_HEAP_POISONING=1") @@ -70,11 +64,18 @@ func globalFlags(ctx android.LoadHookContext) ([]string, []string) { asflags = append(asflags, "-DART_USE_READ_BARRIER=1", "-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1") - } - if !ctx.Config().IsEnvFalse("ART_USE_GENERATIONAL_CC") { - cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1") + if !ctx.Config().IsEnvFalse("ART_USE_GENERATIONAL_CC") { + cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1") + } + // For now force CC as we don't want to make userfaultfd GC the default. + // Eventually, make it such that we force CC only if ART_USE_READ_BARRIER + // was set to true explicitly during build time. + cflags = append(cflags, "-DART_FORCE_USE_READ_BARRIER=1") } + // The only GC which does not want ART_USE_TLAB set is CMS, which isn't actually used. + // When read-barrier is not set, we use userfaultfd GC. + cflags = append(cflags, "-DART_USE_TLAB=1") cdexLevel := ctx.Config().GetenvWithDefault("ART_DEFAULT_COMPACT_DEX_LEVEL", "fast") cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel) diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 42072eb6e0..c8be993f5d 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -198,7 +198,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp // Skip this for @CriticalNative because we're not passing a `jclass` to the native method. std::unique_ptr<JNIMacroLabel> jclass_read_barrier_slow_path; std::unique_ptr<JNIMacroLabel> jclass_read_barrier_return; - if (kUseReadBarrier && is_static && LIKELY(!is_critical_native)) { + if (gUseReadBarrier && is_static && LIKELY(!is_critical_native)) { jclass_read_barrier_slow_path = __ CreateLabel(); jclass_read_barrier_return = __ CreateLabel(); @@ -592,7 +592,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp // 8.1. Read barrier slow path for the declaring class in the method for a static call. // Skip this for @CriticalNative because we're not passing a `jclass` to the native method. - if (kUseReadBarrier && is_static && !is_critical_native) { + if (gUseReadBarrier && is_static && !is_critical_native) { __ Bind(jclass_read_barrier_slow_path.get()); // Construct slow path for read barrier: diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index af08ddd150..b514f9bf9f 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1672,7 +1672,7 @@ void CodeGenerator::ValidateInvokeRuntime(QuickEntrypointEnum entrypoint, // When (non-Baker) read barriers are enabled, some instructions // use a slow path to emit a read barrier, which does not trigger // GC. - (kEmitCompilerReadBarrier && + (gUseReadBarrier && !kUseBakerReadBarrier && (instruction->IsInstanceFieldGet() || instruction->IsPredicatedInstanceFieldGet() || diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index b09219a2ed..7b46e13a44 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -57,8 +57,8 @@ static int32_t constexpr kPrimIntMax = 0x7fffffff; // Maximum value for a primitive long. static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff); -static constexpr ReadBarrierOption kCompilerReadBarrierOption = - kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; +static const ReadBarrierOption gCompilerReadBarrierOption = + gUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; class Assembler; class CodeGenerator; @@ -461,7 +461,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // If the target class is in the boot image, it's non-moveable and it doesn't matter // if we compare it with a from-space or to-space reference, the result is the same. // It's OK to traverse a class hierarchy jumping between from-space and to-space. - return kEmitCompilerReadBarrier && !instance_of->GetTargetClass()->IsInBootImage(); + return gUseReadBarrier && !instance_of->GetTargetClass()->IsInBootImage(); } static ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) { @@ -476,7 +476,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { case TypeCheckKind::kArrayObjectCheck: case TypeCheckKind::kInterfaceCheck: { bool needs_read_barrier = - kEmitCompilerReadBarrier && !check_cast->GetTargetClass()->IsInBootImage(); + gUseReadBarrier && !check_cast->GetTargetClass()->IsInBootImage(); // We do not emit read barriers for HCheckCast, so we can get false negatives // and the slow path shall re-check and simply return if the cast is actually OK. return !needs_read_barrier; @@ -679,7 +679,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { return LocationSummary::kCallOnMainOnly; case HLoadString::LoadKind::kJitTableAddress: DCHECK(!load->NeedsEnvironment()); - return kEmitCompilerReadBarrier + return gUseReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; break; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index d3031b71e5..eb95541db1 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -583,7 +583,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -762,7 +762,7 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root) : SlowPathCodeARM64(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -2058,7 +2058,7 @@ void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction, bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_field_get_with_read_barrier @@ -2114,7 +2114,7 @@ void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction, MemOperand field = HeapOperand(InputRegisterAt(instruction, receiver_input), field_info.GetFieldOffset()); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && + if (gUseReadBarrier && kUseBakerReadBarrier && load_type == DataType::Type::kReference) { // Object FieldGet with Baker's read barrier case. // /* HeapReference<Object> */ out = *(base + offset) @@ -2556,7 +2556,7 @@ void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -2612,10 +2612,10 @@ void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) { // does not support the HIntermediateAddress instruction. DCHECK(!((type == DataType::Type::kReference) && instruction->GetArray()->IsIntermediateAddress() && - kEmitCompilerReadBarrier && + gUseReadBarrier && !kUseBakerReadBarrier)); - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // Object ArrayGet with Baker's read barrier case. // Note that a potential implicit null check is handled in the // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call. @@ -3905,7 +3905,7 @@ void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* ins // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && (kUseBakerReadBarrier || type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -5320,7 +5320,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -5334,7 +5334,7 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution or initialization and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -5361,7 +5361,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -5530,7 +5530,7 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -5584,7 +5584,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD temp, /* offset placeholder */ 0u, ldr_label, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load); codegen_->AddSlowPath(slow_path); @@ -5608,7 +5608,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD out.X(), /* offset= */ 0, /* fixup_label= */ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); return; } default: @@ -6469,7 +6469,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister( DataType::Type type = DataType::Type::kReference; Register out_reg = RegisterFrom(out, type); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -6510,7 +6510,7 @@ void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters( Register out_reg = RegisterFrom(out, type); Register obj_reg = RegisterFrom(obj, type); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -6545,7 +6545,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( DCHECK(fixup_label == nullptr || offset == 0u); Register root_reg = RegisterFrom(root, DataType::Type::kReference); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used. @@ -6611,7 +6611,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad( void CodeGeneratorARM64::GenerateIntrinsicCasMoveWithBakerReadBarrier( vixl::aarch64::Register marked_old_value, vixl::aarch64::Register old_value) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR. @@ -6633,7 +6633,7 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins const vixl::aarch64::MemOperand& src, bool needs_null_check, bool use_load_acquire) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the @@ -6729,7 +6729,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instru uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -6807,7 +6807,7 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HArrayGet* instru void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { // The following condition is a compile-time one, so it does not have a run-time cost. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) { // The following condition is a run-time one; it is executed after the // previous compile-time test, to avoid penalizing non-debug builds. if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { @@ -6836,7 +6836,7 @@ void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -6861,7 +6861,7 @@ void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -6876,7 +6876,7 @@ void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction, void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index f4d652c29c..c1984e354f 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -92,7 +92,11 @@ const vixl::aarch64::CPURegList runtime_reserved_core_registers = vixl::aarch64::CPURegList( tr, // Reserve X20 as Marking Register when emitting Baker read barriers. - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg), + // TODO: We don't need to reserve marking-register for userfaultfd GC. But + // that would require some work in the assembler code as the right GC is + // chosen at load-time and not compile time. + ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier + ? mr : vixl::aarch64::NoCPUReg), kImplicitSuspendCheckRegister, vixl::aarch64::lr); @@ -111,7 +115,7 @@ inline Location FixedTempLocation() { const vixl::aarch64::CPURegList callee_saved_core_registers( vixl::aarch64::CPURegister::kRegister, vixl::aarch64::kXRegSize, - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ((gUseReadBarrier && kUseBakerReadBarrier) ? vixl::aarch64::x21.GetCode() : vixl::aarch64::x20.GetCode()), vixl::aarch64::x30.GetCode()); diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 00a6c837d9..bf8e896a9c 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathARMVIXL : public SlowPathCodeARMVIXL { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -922,7 +922,7 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { public: ReadBarrierForRootSlowPathARMVIXL(HInstruction* instruction, Location out, Location root) : SlowPathCodeARMVIXL(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -2101,7 +2101,10 @@ void CodeGeneratorARMVIXL::SetupBlockedRegisters() const { blocked_core_registers_[LR] = true; blocked_core_registers_[PC] = true; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // TODO: We don't need to reserve marking-register for userfaultfd GC. But + // that would require some work in the assembler code as the right GC is + // chosen at load-time and not compile time. + if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) { // Reserve marking register. blocked_core_registers_[MR] = true; } @@ -5911,7 +5914,7 @@ void LocationsBuilderARMVIXL::HandleFieldGet(HInstruction* instruction, instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); + gUseReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, @@ -6082,7 +6085,7 @@ void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction, case DataType::Type::kReference: { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location(); // Note that a potential implicit null check is handled in this // CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call. @@ -6386,7 +6389,7 @@ void CodeGeneratorARMVIXL::StoreToShiftedRegOffset(DataType::Type type, void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -6534,14 +6537,14 @@ void InstructionCodeGeneratorARMVIXL::VisitArrayGet(HArrayGet* instruction) { // The read barrier instrumentation of object ArrayGet // instructions does not support the HIntermediateAddress // instruction. - DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier)); + DCHECK(!(has_intermediate_address && gUseReadBarrier)); static_assert( sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier call. DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0))); @@ -7459,7 +7462,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -7473,7 +7476,7 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution or initialization and marking to save everything we need. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7501,7 +7504,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_ const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -7721,7 +7724,7 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString and marking to save everything we need, including temps. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7760,7 +7763,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE codegen_->EmitMovwMovtPlaceholder(labels, out); // All aligned loads are implicitly atomic consume operations on ARM. codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption); + load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption); LoadStringSlowPathARMVIXL* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load); codegen_->AddSlowPath(slow_path); @@ -7781,7 +7784,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE load->GetString())); // /* GcRoot<mirror::String> */ out = *out codegen_->GenerateGcRootFieldLoad( - load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption); + load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption); return; } default: @@ -7838,7 +7841,7 @@ void InstructionCodeGeneratorARMVIXL::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && (kUseBakerReadBarrier || type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -8773,7 +8776,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { vixl32::Register out_reg = RegisterFrom(out); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); DCHECK(maybe_temp.IsRegister()) << maybe_temp; if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. @@ -8808,7 +8811,7 @@ void InstructionCodeGeneratorARMVIXL::GenerateReferenceLoadTwoRegisters( vixl32::Register out_reg = RegisterFrom(out); vixl32::Register obj_reg = RegisterFrom(obj); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { DCHECK(maybe_temp.IsRegister()) << maybe_temp; // Load with fast path based Baker's read barrier. @@ -8837,7 +8840,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { vixl32::Register root_reg = RegisterFrom(root); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used. @@ -8901,7 +8904,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad( void CodeGeneratorARMVIXL::GenerateIntrinsicCasMoveWithBakerReadBarrier( vixl::aarch32::Register marked_old_value, vixl::aarch32::Register old_value) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR. @@ -8935,7 +8938,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i vixl32::Register obj, const vixl32::MemOperand& src, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the @@ -9028,7 +9031,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, Location index, Location temp, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -9094,7 +9097,7 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref, void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) { // The following condition is a compile-time one, so it does not have a run-time cost. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) { + if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) { // The following condition is a run-time one; it is executed after the // previous compile-time test, to avoid penalizing non-debug builds. if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) { @@ -9124,7 +9127,7 @@ void CodeGeneratorARMVIXL::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -9150,7 +9153,7 @@ void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instructio Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -9165,7 +9168,7 @@ void CodeGeneratorARMVIXL::MaybeGenerateReadBarrierSlow(HInstruction* instructio void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index 790ad0f8f7..62a4d368b7 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -84,7 +84,7 @@ static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::Regis vixl::aarch32::r6, vixl::aarch32::r7), // Do not consider r8 as a callee-save register with Baker read barriers. - ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) + ((gUseReadBarrier && kUseBakerReadBarrier) ? vixl::aarch32::RegisterList() : vixl::aarch32::RegisterList(vixl::aarch32::r8)), vixl::aarch32::RegisterList(vixl::aarch32::r10, diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8c4a11c5ed..f4529bec7a 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -503,7 +503,7 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode { : SlowPathCode(instruction), ref_(ref), unpoison_ref_before_marking_(unpoison_ref_before_marking) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; } @@ -590,7 +590,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86 : public SlowPathCode { field_addr_(field_addr), unpoison_ref_before_marking_(unpoison_ref_before_marking), temp_(temp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; } @@ -744,7 +744,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial object // has been overwritten by (or after) the heap object reference load // to be instrumented, e.g.: @@ -918,7 +918,7 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -1619,7 +1619,7 @@ void CodeGeneratorX86::LoadFromMemoryNoBarrier(DataType::Type dst_type, __ movsd(dst.AsFpuRegister<XmmRegister>(), src); break; case DataType::Type::kReference: - DCHECK(!kEmitCompilerReadBarrier); + DCHECK(!gUseReadBarrier); __ movl(dst.AsRegister<Register>(), src); __ MaybeUnpoisonHeapReference(dst.AsRegister<Register>()); break; @@ -5731,11 +5731,11 @@ void LocationsBuilderX86::HandleFieldGet(HInstruction* instruction, const FieldI instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, - kEmitCompilerReadBarrier + gUseReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall); if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { @@ -5793,7 +5793,7 @@ void InstructionCodeGeneratorX86::HandleFieldGet(HInstruction* instruction, if (load_type == DataType::Type::kReference) { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -6202,7 +6202,7 @@ void InstructionCodeGeneratorX86::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -6244,7 +6244,7 @@ void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) { "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( @@ -7057,7 +7057,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -7071,7 +7071,7 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (call_kind == LocationSummary::kCallOnSlowPath && cls->HasPcRelativeLoadKind()) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution and/or initialization to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7109,7 +7109,7 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE bool generate_null_check = false; const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { DCHECK(!cls->CanCallRuntime()); @@ -7296,7 +7296,7 @@ void LocationsBuilderX86::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -7345,7 +7345,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S Address address = Address(method_address, CodeGeneratorX86::kPlaceholder32BitOffset); Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::String> */ out = *address /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); // No need for memory fence, thanks to the x86 memory model. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load); codegen_->AddSlowPath(slow_path); @@ -7365,7 +7365,7 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S Label* fixup_label = codegen_->NewJitRootStringPatch( load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); return; } default: @@ -7416,7 +7416,7 @@ void InstructionCodeGeneratorX86::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && (type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -8188,7 +8188,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { Register out_reg = out.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -8222,7 +8222,7 @@ void InstructionCodeGeneratorX86::GenerateReferenceLoadTwoRegisters( Register out_reg = out.AsRegister<Register>(); Register obj_reg = obj.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -8250,7 +8250,7 @@ void InstructionCodeGeneratorX86::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { Register root_reg = root.AsRegister<Register>(); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used: @@ -8314,7 +8314,7 @@ void CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instr Register obj, uint32_t offset, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // /* HeapReference<Object> */ ref = *(obj + offset) @@ -8328,7 +8328,7 @@ void CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instr uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -8347,7 +8347,7 @@ void CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* i bool needs_null_check, bool always_update_field, Register* temp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // In slow path based read barriers, the read barrier call is @@ -8428,7 +8428,7 @@ void CodeGeneratorX86::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -8455,7 +8455,7 @@ void CodeGeneratorX86::MaybeGenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -8470,7 +8470,7 @@ void CodeGeneratorX86::MaybeGenerateReadBarrierSlow(HInstruction* instruction, void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 5987410893..d31a6303b4 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -510,7 +510,7 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { : SlowPathCode(instruction), ref_(ref), unpoison_ref_before_marking_(unpoison_ref_before_marking) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; } @@ -601,7 +601,7 @@ class ReadBarrierMarkAndUpdateFieldSlowPathX86_64 : public SlowPathCode { unpoison_ref_before_marking_(unpoison_ref_before_marking), temp1_(temp1), temp2_(temp2) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } const char* GetDescription() const override { @@ -761,7 +761,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { obj_(obj), offset_(offset), index_(index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // If `obj` is equal to `out` or `ref`, it means the initial // object has been overwritten by (or after) the heap object // reference load to be instrumented, e.g.: @@ -937,7 +937,7 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root) : SlowPathCode(instruction), out_(out), root_(root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); } void EmitNativeCode(CodeGenerator* codegen) override { @@ -5013,7 +5013,7 @@ void LocationsBuilderX86_64::HandleFieldGet(HInstruction* instruction) { instruction->IsPredicatedInstanceFieldGet()); bool object_field_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); bool is_predicated = instruction->IsPredicatedInstanceFieldGet(); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, @@ -5064,7 +5064,7 @@ void InstructionCodeGeneratorX86_64::HandleFieldGet(HInstruction* instruction, if (load_type == DataType::Type::kReference) { // /* HeapReference<Object> */ out = *(base + offset) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call. codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -5513,7 +5513,7 @@ void InstructionCodeGeneratorX86_64::VisitNullCheck(HNullCheck* instruction) { void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) { bool object_array_get_with_read_barrier = - kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference); + gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference); LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction, object_array_get_with_read_barrier @@ -5551,7 +5551,7 @@ void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) { "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); // /* HeapReference<Object> */ out = // *(obj + data_offset + index * sizeof(HeapReference<Object>)) - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Note that a potential implicit null check is handled in this // CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call. codegen_->GenerateArrayLoadWithBakerReadBarrier( @@ -6352,7 +6352,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { load_kind == HLoadClass::LoadKind::kBssEntryPublic || load_kind == HLoadClass::LoadKind::kBssEntryPackage); - const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage(); + const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage(); LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier) ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; @@ -6366,7 +6366,7 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { } locations->SetOut(Location::RequiresRegister()); if (load_kind == HLoadClass::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the type resolution and/or initialization to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -6403,7 +6403,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S const ReadBarrierOption read_barrier_option = cls->IsInBootImage() ? kWithoutReadBarrier - : kCompilerReadBarrierOption; + : gCompilerReadBarrierOption; bool generate_null_check = false; switch (load_kind) { case HLoadClass::LoadKind::kReferrersClass: { @@ -6550,7 +6550,7 @@ void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) { } else { locations->SetOut(Location::RequiresRegister()); if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) { - if (!kUseReadBarrier || kUseBakerReadBarrier) { + if (!gUseReadBarrier || kUseBakerReadBarrier) { // Rely on the pResolveString to save everything. locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves()); } else { @@ -6598,7 +6598,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA /* no_rip= */ false); Label* fixup_label = codegen_->NewStringBssEntryPatch(load); // /* GcRoot<mirror::Class> */ out = *address /* PC-relative */ - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); // No need for memory fence, thanks to the x86-64 memory model. SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load); codegen_->AddSlowPath(slow_path); @@ -6619,7 +6619,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA Label* fixup_label = codegen_->NewJitRootStringPatch( load->GetDexFile(), load->GetStringIndex(), load->GetString()); // /* GcRoot<mirror::String> */ out = *address - GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption); return; } default: @@ -6672,7 +6672,7 @@ void InstructionCodeGeneratorX86_64::VisitThrow(HThrow* instruction) { // Temp is used for read barrier. static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) { - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && (type_check_kind == TypeCheckKind::kAbstractClassCheck || type_check_kind == TypeCheckKind::kClassHierarchyCheck || @@ -7426,7 +7426,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadOneRegister( ReadBarrierOption read_barrier_option) { CpuRegister out_reg = out.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(out + offset) @@ -7460,7 +7460,7 @@ void InstructionCodeGeneratorX86_64::GenerateReferenceLoadTwoRegisters( CpuRegister out_reg = out.AsRegister<CpuRegister>(); CpuRegister obj_reg = obj.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - CHECK(kEmitCompilerReadBarrier); + CHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Load with fast path based Baker's read barrier. // /* HeapReference<Object> */ out = *(obj + offset) @@ -7488,7 +7488,7 @@ void InstructionCodeGeneratorX86_64::GenerateGcRootFieldLoad( ReadBarrierOption read_barrier_option) { CpuRegister root_reg = root.AsRegister<CpuRegister>(); if (read_barrier_option == kWithReadBarrier) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // Fast path implementation of art::ReadBarrier::BarrierForRoot when // Baker's read barrier are used: @@ -7552,7 +7552,7 @@ void CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* in CpuRegister obj, uint32_t offset, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // /* HeapReference<Object> */ ref = *(obj + offset) @@ -7566,7 +7566,7 @@ void CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* in uint32_t data_offset, Location index, bool needs_null_check) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); static_assert( @@ -7586,7 +7586,7 @@ void CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction bool always_update_field, CpuRegister* temp1, CpuRegister* temp2) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); // In slow path based read barriers, the read barrier call is @@ -7668,7 +7668,7 @@ void CodeGeneratorX86_64::GenerateReadBarrierSlow(HInstruction* instruction, Location obj, uint32_t offset, Location index) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the reference load. // @@ -7695,7 +7695,7 @@ void CodeGeneratorX86_64::MaybeGenerateReadBarrierSlow(HInstruction* instruction Location obj, uint32_t offset, Location index) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Baker's read barriers shall be handled by the fast path // (CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier). DCHECK(!kUseBakerReadBarrier); @@ -7710,7 +7710,7 @@ void CodeGeneratorX86_64::MaybeGenerateReadBarrierSlow(HInstruction* instruction void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); // Insert a slow path based read barrier *after* the GC root load. // diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc index dc60ba62bb..fb8b01b75a 100644 --- a/compiler/optimizing/instruction_simplifier_shared.cc +++ b/compiler/optimizing/instruction_simplifier_shared.cc @@ -244,7 +244,7 @@ bool TryExtractArrayAccessAddress(HInstruction* access, // The access may require a runtime call or the original array pointer. return false; } - if (kEmitCompilerReadBarrier && + if (gUseReadBarrier && !kUseBakerReadBarrier && access->IsArrayGet() && access->GetType() == DataType::Type::kReference) { diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index f2d2b45da9..0feb92d734 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -392,7 +392,7 @@ void IntrinsicVisitor::CreateReferenceGetReferentLocations(HInvoke* invoke, } void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke) { - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // Unimplemented for non-Baker read barrier. return; } diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 646f4f2ea7..0ce082b12b 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -92,7 +92,7 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierSystemArrayCopySlowPathARM64(HInstruction* instruction, Location tmp) : SlowPathCodeARM64(instruction), tmp_(tmp) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -711,7 +711,7 @@ static void GenUnsafeGet(HInvoke* invoke, Location trg_loc = locations->Out(); Register trg = RegisterFrom(trg_loc, type); - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case. Register temp = WRegisterFrom(locations->GetTemp(0)); MacroAssembler* masm = codegen->GetVIXLAssembler(); @@ -754,7 +754,7 @@ static bool UnsafeGetIntrinsicOnCallList(Intrinsics intrinsic) { } static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -1096,7 +1096,7 @@ void IntrinsicCodeGeneratorARM64::VisitJdkUnsafePutLongRelease(HInvoke* invoke) } static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke); + const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -1448,7 +1448,7 @@ static void GenUnsafeCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM6 vixl::aarch64::Label* exit_loop = &exit_loop_label; vixl::aarch64::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && type == DataType::Type::kReference) { // We need to store the `old_value` in a non-scratch register to make sure // the read barrier in the slow path does not clobber it. old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path. @@ -1523,12 +1523,12 @@ void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* in } void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } CreateUnsafeCASLocations(allocator_, invoke); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // We need two non-scratch temporary registers for read barrier. LocationSummary* locations = invoke->GetLocations(); if (kUseBakerReadBarrier) { @@ -1578,7 +1578,7 @@ void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invok } void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -2814,7 +2814,7 @@ static constexpr int32_t kSystemArrayCopyThreshold = 128; void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2866,7 +2866,7 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Temporary register IP0, obtained from the VIXL scratch register // pool, cannot be used in ReadBarrierSystemArrayCopySlowPathARM64 // (because that register is clobbered by ReadBarrierMarkRegX @@ -2884,7 +2884,7 @@ void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); MacroAssembler* masm = GetVIXLAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -2991,7 +2991,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { UseScratchRegisterScope temps(masm); Location temp3_loc; // Used only for Baker read barrier. Register temp3; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { temp3_loc = locations->GetTemp(2); temp3 = WRegisterFrom(temp3_loc); } else { @@ -3004,7 +3004,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { // or the destination is Object[]. If none of these checks succeed, we go to the // slow path. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, @@ -3165,7 +3165,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, temp1_loc, @@ -3215,7 +3215,7 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { __ Cbz(WRegisterFrom(length), &done); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // TODO: Also convert this intrinsic to the IsGcMarking strategy? // SystemArrayCopy implementation for Baker read barriers (see @@ -3451,7 +3451,7 @@ void IntrinsicCodeGeneratorARM64::VisitIntegerValueOf(HInvoke* invoke) { void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) { IntrinsicVisitor::CreateReferenceGetReferentLocations(invoke, codegen_); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) { + if (gUseReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) { invoke->GetLocations()->AddTemp(Location::RequiresRegister()); } } @@ -3466,7 +3466,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). UseScratchRegisterScope temps(masm); Register temp = temps.AcquireW(); @@ -3493,7 +3493,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, WRegisterFrom(obj), @@ -3533,7 +3533,7 @@ void IntrinsicCodeGeneratorARM64::VisitReferenceRefersTo(HInvoke* invoke) { __ Cmp(tmp, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); vixl::aarch64::Label calculate_result; @@ -4629,7 +4629,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, method.X(), ArtField::DeclaringClassOffset().Int32Value(), /*fixup_label=*/ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -4683,7 +4683,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) { } // Add a temporary for offset. - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields. // To preserve the offset value across the non-Baker read barrier slow path // for loading the declaring class, use a fixed callee-save register. @@ -4706,7 +4706,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke) { return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference && invoke->GetIntrinsic() != Intrinsics::kVarHandleGet && invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) { @@ -4746,7 +4746,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, DCHECK(use_load_acquire || order == std::memory_order_relaxed); // Load the value from the target location. - if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) { // Piggy-back on the field load path using introspection for the Baker read barrier. // The `target.offset` is a temporary, use it for field address. Register tmp_ptr = target.offset.X(); @@ -4947,7 +4947,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo uint32_t number_of_arguments = invoke->GetNumberOfArguments(); DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field. This breaks the read barriers @@ -4961,7 +4961,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo LocationSummary* locations = CreateVarHandleCommonLocations(invoke); - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // We need callee-save registers for both the class object and offset instead of // the temporaries reserved in CreateVarHandleCommonLocations(). static_assert(POPCOUNT(kArm64CalleeSaveRefSpills) >= 2u); @@ -5002,7 +5002,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo locations->AddTemp(Location::RequiresRegister()); } } - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // Add a temporary for the `old_value_temp` in slow path. locations->AddTemp(Location::RequiresRegister()); } @@ -5068,7 +5068,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, // except for references that need the offset for the read barrier. UseScratchRegisterScope temps(masm); Register tmp_ptr = target.offset.X(); - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { tmp_ptr = temps.AcquireX(); } __ Add(tmp_ptr, target.object.X(), target.offset.X()); @@ -5151,7 +5151,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, vixl::aarch64::Label* exit_loop = &exit_loop_label; vixl::aarch64::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // The `old_value_temp` is used first for the marked `old_value` and then for the unmarked // reloaded old value for subsequent CAS in the slow path. It cannot be a scratch register. size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke); @@ -5296,7 +5296,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field, thus seeing the new value @@ -5372,7 +5372,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, // except for references that need the offset for the non-Baker read barrier. UseScratchRegisterScope temps(masm); Register tmp_ptr = target.offset.X(); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { tmp_ptr = temps.AcquireX(); } @@ -5402,7 +5402,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, // the new value unless it is zero bit pattern (+0.0f or +0.0) and need another one // in GenerateGetAndUpdate(). We have allocated a normal temporary to handle that. old_value = CPURegisterFrom(locations->GetTemp(1u), load_store_type); - } else if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) && + } else if ((gUseReadBarrier && kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Load the old value initially to a scratch register. // We shall move it to `out` later with a read barrier. @@ -5450,7 +5450,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, __ Sxtb(out.W(), old_value.W()); } else if (value_type == DataType::Type::kInt16) { __ Sxth(out.W(), old_value.W()); - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(out.W(), old_value.W()); } else { diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index d850cadc2b..da47fa6cf0 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -120,7 +120,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { public: explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction) : SlowPathCodeARMVIXL(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -1242,7 +1242,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitStringNewStringFromString(HInvoke* invo void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -1265,7 +1265,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) { locations->SetInAt(4, Location::RequiresRegister()); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Temporary register IP cannot be used in // ReadBarrierSystemArrayCopySlowPathARM (because that register // is clobbered by ReadBarrierMarkRegX entry points). Get an extra @@ -1339,7 +1339,7 @@ static void CheckPosition(ArmVIXLAssembler* assembler, void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); ArmVIXLAssembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1453,7 +1453,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { // or the destination is Object[]. If none of these checks succeed, we go to the // slow path. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( @@ -1584,7 +1584,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false); @@ -1621,7 +1621,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // TODO: Also convert this intrinsic to the IsGcMarking strategy? // SystemArrayCopy implementation for Baker read barriers (see @@ -2511,7 +2511,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); vixl32::Register temp = temps.Acquire(); @@ -2539,7 +2539,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, RegisterFrom(obj), @@ -2587,7 +2587,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) { assembler->MaybeUnpoisonHeapReference(tmp); codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); // `referent` is volatile. - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); vixl32::Label calculate_result; @@ -2613,7 +2613,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) { __ Bind(&calculate_result); } else { - DCHECK(!kEmitCompilerReadBarrier); + DCHECK(!gUseReadBarrier); __ Sub(out, tmp, other); } @@ -2732,7 +2732,7 @@ static void GenerateIntrinsicGet(HInvoke* invoke, } break; case DataType::Type::kReference: - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Piggy-back on the field load path using introspection for the Baker read barrier. vixl32::Register temp = RegisterFrom(maybe_temp); __ Add(temp, base, offset); @@ -2777,7 +2777,7 @@ static void GenerateIntrinsicGet(HInvoke* invoke, codegen->GenerateMemoryBarrier( seq_cst_barrier ? MemBarrierKind::kAnyAny : MemBarrierKind::kLoadAny); } - if (type == DataType::Type::kReference && !(kEmitCompilerReadBarrier && kUseBakerReadBarrier)) { + if (type == DataType::Type::kReference && !(gUseReadBarrier && kUseBakerReadBarrier)) { Location base_loc = LocationFrom(base); Location index_loc = LocationFrom(offset); codegen->MaybeGenerateReadBarrierSlow(invoke, out, out, base_loc, /* offset=*/ 0u, index_loc); @@ -2802,7 +2802,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke, CodeGeneratorARMVIXL* codegen, DataType::Type type, bool atomic) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator(); LocationSummary* locations = new (allocator) LocationSummary(invoke, @@ -2818,7 +2818,7 @@ static void CreateUnsafeGetLocations(HInvoke* invoke, locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap)); - if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || + if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || (type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) { // We need a temporary register for the read barrier marking slow // path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier, @@ -2837,7 +2837,7 @@ static void GenUnsafeGet(HInvoke* invoke, vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Long offset, lo part only. Location out = locations->Out(); Location maybe_temp = Location::NoLocation(); - if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || + if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) || (type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) { maybe_temp = locations->GetTemp(0); } @@ -3470,7 +3470,7 @@ static void GenerateCompareAndSet(CodeGeneratorARMVIXL* codegen, // branch goes to the read barrier slow path that clobbers `success` anyway. bool init_failure_for_cmp = success.IsValid() && - !(kEmitCompilerReadBarrier && type == DataType::Type::kReference && expected.IsRegister()); + !(gUseReadBarrier && type == DataType::Type::kReference && expected.IsRegister()); // Instruction scheduling: Loading a constant between LDREX* and using the loaded value // is essentially free, so prepare the failure value here if we can. bool init_failure_for_cmp_early = @@ -3655,7 +3655,7 @@ class ReadBarrierCasSlowPathARMVIXL : public SlowPathCodeARMVIXL { }; static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke); + const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -3706,7 +3706,7 @@ static void GenUnsafeCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARMV vixl32::Label* exit_loop = &exit_loop_label; vixl32::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && type == DataType::Type::kReference) { // If marking, check if the stored reference is a from-space reference to the same // object as the to-space reference `expected`. If so, perform a custom CAS loop. ReadBarrierCasSlowPathARMVIXL* slow_path = @@ -3770,7 +3770,7 @@ void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* i } void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers (b/173104084). - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3798,7 +3798,7 @@ void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* invo } void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers (b/173104084). - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenUnsafeCas(invoke, DataType::Type::kReference, codegen_); } @@ -4351,7 +4351,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, LocationFrom(target.object), method, ArtField::DeclaringClassOffset().Int32Value(), - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -4403,7 +4403,7 @@ static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) { } // Add a temporary for offset. - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields. // To preserve the offset value across the non-Baker read barrier slow path // for loading the declaring class, use a fixed callee-save register. @@ -4428,7 +4428,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference && invoke->GetIntrinsic() != Intrinsics::kVarHandleGet && invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) { @@ -4476,7 +4476,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, Location maybe_temp = Location::NoLocation(); Location maybe_temp2 = Location::NoLocation(); Location maybe_temp3 = Location::NoLocation(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) { + if (gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) { // Reuse the offset temporary. maybe_temp = LocationFrom(target.offset); } else if (DataType::Is64BitType(type) && Use64BitExclusiveLoadStore(atomic, codegen)) { @@ -4749,7 +4749,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo uint32_t number_of_arguments = invoke->GetNumberOfArguments(); DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u); - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field. This breaks the read barriers @@ -4763,7 +4763,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo LocationSummary* locations = CreateVarHandleCommonLocations(invoke); - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // We need callee-save registers for both the class object and offset instead of // the temporaries reserved in CreateVarHandleCommonLocations(). static_assert(POPCOUNT(kArmCalleeSaveRefSpills) >= 2u); @@ -4799,7 +4799,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, boo locations->AddRegisterTemps(2u); } } - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // Add a temporary for store result, also used for the `old_value_temp` in slow path. locations->AddTemp(Location::RequiresRegister()); } @@ -4930,7 +4930,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, vixl32::Label* exit_loop = &exit_loop_label; vixl32::Label* cmp_failure = &exit_loop_label; - if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + if (gUseReadBarrier && value_type == DataType::Type::kReference) { // The `old_value_temp` is used first for the marked `old_value` and then for the unmarked // reloaded old value for subsequent CAS in the slow path. This must not clobber `old_value`. vixl32::Register old_value_temp = return_success ? RegisterFrom(out) : store_result; @@ -5086,7 +5086,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, return; } - if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + if ((gUseReadBarrier && !kUseBakerReadBarrier) && invoke->GetType() == DataType::Type::kReference) { // Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores // the passed reference and reloads it from the field, thus seeing the new value @@ -5107,7 +5107,7 @@ static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke, // Add temps needed to do the GenerateGetAndUpdate() with core registers. size_t temps_needed = (value_type == DataType::Type::kFloat64) ? 5u : 3u; locations->AddRegisterTemps(temps_needed - locations->GetTempCount()); - } else if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) && + } else if ((gUseReadBarrier && !kUseBakerReadBarrier) && value_type == DataType::Type::kReference) { // We need to preserve the declaring class (if present) and offset for read barrier // slow paths, so we must use a separate temporary for the exclusive store result. @@ -5213,7 +5213,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, if (byte_swap) { GenerateReverseBytes(assembler, DataType::Type::kInt32, arg, arg); } - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { // Load the old value initially to a temporary register. // We shall move it to `out` later with a read barrier. @@ -5296,7 +5296,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, } else { __ Vmov(SRegisterFrom(out), RegisterFrom(old_value)); } - } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) { + } else if (gUseReadBarrier && value_type == DataType::Type::kReference) { if (kUseBakerReadBarrier) { codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(RegisterFrom(out), RegisterFrom(old_value)); diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 7d90aae984..0f6eb8638a 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -75,7 +75,7 @@ class ReadBarrierSystemArrayCopySlowPathX86 : public SlowPathCode { public: explicit ReadBarrierSystemArrayCopySlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -1699,7 +1699,7 @@ static void GenUnsafeGet(HInvoke* invoke, case DataType::Type::kReference: { Register output = output_loc.AsRegister<Register>(); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -1757,7 +1757,7 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke, DataType::Type type, bool is_volatile) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -2103,7 +2103,7 @@ void IntrinsicCodeGeneratorX86::VisitJdkUnsafePutLongRelease(HInvoke* invoke) { static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && + const bool can_call = gUseReadBarrier && kUseBakerReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = @@ -2175,7 +2175,7 @@ void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invo void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2304,7 +2304,7 @@ static void GenReferenceCAS(HInvoke* invoke, DCHECK_EQ(expected, EAX); DCHECK_NE(temp, temp2); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -2391,7 +2391,7 @@ static void GenCAS(DataType::Type type, HInvoke* invoke, CodeGeneratorX86* codeg if (type == DataType::Type::kReference) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); Register temp = locations->GetTemp(0).AsRegister<Register>(); Register temp2 = locations->GetTemp(1).AsRegister<Register>(); @@ -2413,7 +2413,7 @@ void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) { // The only read barrier implementation supporting the // UnsafeCASObject intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2443,7 +2443,7 @@ void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -2843,7 +2843,7 @@ static void GenSystemArrayCopyEndAddress(X86Assembler* assembler, void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2875,7 +2875,7 @@ void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -2995,7 +2995,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // slow path. if (!optimizations.GetSourceIsNonPrimitiveArray()) { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -3022,7 +3022,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { if (length.Equals(Location::RegisterLocation(temp3))) { // When Baker read barriers are enabled, register `temp3`, // which in the present case contains the `length` parameter, @@ -3120,7 +3120,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -3151,7 +3151,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // Compute the base source address in `temp1`. GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // If it is needed (in the case of the fast-path loop), the base // destination address is computed later, as `temp2` is used for // intermediate computations. @@ -3377,7 +3377,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). ThreadOffset32 offset = Thread::WeakRefAccessEnabledOffset<kX86PointerSize>(); __ fs()->cmpl(Address::Absolute(offset), @@ -3400,7 +3400,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, obj.AsRegister<Register>(), @@ -3442,7 +3442,7 @@ void IntrinsicCodeGeneratorX86::VisitReferenceRefersTo(HInvoke* invoke) { NearLabel end, return_true, return_false; __ cmpl(out, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); __ j(kEqual, &return_true); @@ -3781,7 +3781,7 @@ static Register GenerateVarHandleFieldReference(HInvoke* invoke, Location::RegisterLocation(temp), Address(temp, declaring_class_offset), /* fixup_label= */ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); return temp; } @@ -3794,7 +3794,7 @@ static Register GenerateVarHandleFieldReference(HInvoke* invoke, static void CreateVarHandleGetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3836,7 +3836,7 @@ static void CreateVarHandleGetLocations(HInvoke* invoke) { static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -3860,7 +3860,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) { Address field_addr(ref, offset, TIMES_1, 0); // Load the value from the field - if (type == DataType::Type::kReference && kCompilerReadBarrierOption == kWithReadBarrier) { + if (type == DataType::Type::kReference && gCompilerReadBarrierOption == kWithReadBarrier) { codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, out, ref, field_addr, /* needs_null_check= */ false); } else if (type == DataType::Type::kInt64 && @@ -3917,7 +3917,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetOpaque(HInvoke* invoke) { static void CreateVarHandleSetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -3990,7 +3990,7 @@ static void CreateVarHandleSetLocations(HInvoke* invoke) { static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4087,7 +4087,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleSetOpaque(HInvoke* invoke) { static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4135,7 +4135,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4194,7 +4194,7 @@ static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codege __ movd(locations->Out().AsFpuRegister<XmmRegister>(), EAX); break; case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -4258,7 +4258,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndSetRelease(HInvoke* invoke) static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4322,7 +4322,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4441,7 +4441,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndExchangeRelease(HInvoke* static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4490,7 +4490,7 @@ static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) { static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4591,7 +4591,7 @@ void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndAddRelease(HInvoke* invoke) static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -4659,7 +4659,7 @@ static void GenerateBitwiseOp(HInvoke* invoke, static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) { // The only read barrier implementation supporting the // VarHandleGet intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 3c31374f67..9921d907d5 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -71,7 +71,7 @@ class ReadBarrierSystemArrayCopySlowPathX86_64 : public SlowPathCode { public: explicit ReadBarrierSystemArrayCopySlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); } @@ -836,7 +836,7 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopyInt(HInvoke* invoke) { void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -887,7 +887,7 @@ static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -1002,7 +1002,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // slow path. bool did_unpoison = false; - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = dest->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false); @@ -1034,7 +1034,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (!optimizations.GetDestinationIsNonPrimitiveArray()) { // Bail out if the destination is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ TMP = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false); @@ -1055,7 +1055,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (!optimizations.GetSourceIsNonPrimitiveArray()) { // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // For the same reason given earlier, `temp1` is not trashed by the // read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below. // /* HeapReference<Class> */ TMP = temp2->component_type_ @@ -1081,7 +1081,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { if (optimizations.GetDestinationIsTypedObjectArray()) { NearLabel do_copy; __ j(kEqual, &do_copy); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = temp1->component_type_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false); @@ -1109,7 +1109,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } else if (!optimizations.GetSourceIsNonPrimitiveArray()) { DCHECK(optimizations.GetDestinationIsNonPrimitiveArray()); // Bail out if the source is not a non primitive array. - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // /* HeapReference<Class> */ temp1 = src->klass_ codegen_->GenerateFieldLoadWithBakerReadBarrier( invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false); @@ -1141,7 +1141,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { GenSystemArrayCopyAddresses( GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // SystemArrayCopy implementation for Baker read barriers (see // also CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier): // @@ -1888,7 +1888,7 @@ static void GenUnsafeGet(HInvoke* invoke, break; case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { if (kUseBakerReadBarrier) { Address src(base, offset, ScaleFactor::TIMES_1, 0); codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -1930,7 +1930,7 @@ static bool UnsafeGetIntrinsicOnCallList(Intrinsics intrinsic) { } static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) { - bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); + bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic()); LocationSummary* locations = new (allocator) LocationSummary(invoke, can_call @@ -2230,7 +2230,7 @@ void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafePutLongRelease(HInvoke* invoke) static void CreateUnsafeCASLocations(ArenaAllocator* allocator, DataType::Type type, HInvoke* invoke) { - const bool can_call = kEmitCompilerReadBarrier && + const bool can_call = gUseReadBarrier && kUseBakerReadBarrier && IsUnsafeCASObject(invoke); LocationSummary* locations = @@ -2253,7 +2253,7 @@ static void CreateUnsafeCASLocations(ArenaAllocator* allocator, // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -2298,7 +2298,7 @@ void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* i void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return; } @@ -2438,7 +2438,7 @@ static void GenCompareAndSetOrExchangeRef(CodeGeneratorX86_64* codegen, CpuRegister temp3, bool is_cmpxchg) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler()); @@ -2447,7 +2447,7 @@ static void GenCompareAndSetOrExchangeRef(CodeGeneratorX86_64* codegen, codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null); Address field_addr(base, offset, TIMES_1, 0); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // Need to make sure the reference stored in the field is a to-space // one before attempting the CAS or the CAS could fail incorrectly. codegen->GenerateReferenceLoadWithBakerReadBarrier( @@ -2556,7 +2556,7 @@ static void GenCompareAndSetOrExchange(CodeGeneratorX86_64* codegen, CpuRegister new_value_reg = new_value.AsRegister<CpuRegister>(); CpuRegister temp1 = locations->GetTemp(temp1_index).AsRegister<CpuRegister>(); CpuRegister temp2 = locations->GetTemp(temp2_index).AsRegister<CpuRegister>(); - CpuRegister temp3 = kEmitCompilerReadBarrier + CpuRegister temp3 = gUseReadBarrier ? locations->GetTemp(temp3_index).AsRegister<CpuRegister>() : CpuRegister(kNoRegister); DCHECK(RegsAreAllDifferent({base, offset, temp1, temp2, temp3})); @@ -2624,7 +2624,7 @@ void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invo void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); GenCAS(DataType::Type::kReference, invoke, codegen_); } @@ -3128,7 +3128,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); codegen_->AddSlowPath(slow_path); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Check self->GetWeakRefAccessEnabled(). ThreadOffset64 offset = Thread::WeakRefAccessEnabledOffset<kX86_64PointerSize>(); __ gs()->cmpl(Address::Absolute(offset, /* no_rip= */ true), @@ -3150,7 +3150,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceGetReferent(HInvoke* invoke) { // Load the value from the field. uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke, out, obj.AsRegister<CpuRegister>(), @@ -3191,7 +3191,7 @@ void IntrinsicCodeGeneratorX86_64::VisitReferenceRefersTo(HInvoke* invoke) { __ cmpl(out, other); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); NearLabel calculate_result; @@ -3771,7 +3771,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, Location::RegisterLocation(target.object), Address(method, ArtField::DeclaringClassOffset()), /*fixup_label=*/ nullptr, - kCompilerReadBarrierOption); + gCompilerReadBarrierOption); } } } else { @@ -3790,7 +3790,7 @@ static void GenerateVarHandleTarget(HInvoke* invoke, static bool HasVarHandleIntrinsicImplementation(HInvoke* invoke) { // The only supported read barrier implementation is the Baker-style read barriers. - if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { return false; } @@ -3876,7 +3876,7 @@ static void GenerateVarHandleGet(HInvoke* invoke, Location out = locations->Out(); if (type == DataType::Type::kReference) { - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { DCHECK(kUseBakerReadBarrier); codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, out, CpuRegister(target.object), src, /* needs_null_check= */ false); @@ -4070,7 +4070,7 @@ static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) { // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -4085,7 +4085,7 @@ static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86_64* codegen, bool is_cmpxchg, bool byte_swap = false) { - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); @@ -4218,7 +4218,7 @@ static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) { // Need two temporaries for MarkGCCard. locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); - if (kEmitCompilerReadBarrier) { + if (gUseReadBarrier) { // Need a third temporary for GenerateReferenceLoadWithBakerReadBarrier. DCHECK(kUseBakerReadBarrier); locations->AddTemp(Location::RequiresRegister()); @@ -4267,7 +4267,7 @@ static void GenerateVarHandleGetAndSet(HInvoke* invoke, CpuRegister temp2 = locations->GetTemp(temp_count - 2).AsRegister<CpuRegister>(); CpuRegister valreg = value.AsRegister<CpuRegister>(); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { codegen->GenerateReferenceLoadWithBakerReadBarrier( invoke, locations->GetTemp(temp_count - 3), @@ -4647,7 +4647,7 @@ static void GenerateVarHandleGetAndUpdate(HInvoke* invoke, bool need_any_store_barrier, bool need_any_any_barrier, bool byte_swap = false) { - DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier); + DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier); X86_64Assembler* assembler = codegen->GetAssembler(); LocationSummary* locations = invoke->GetLocations(); diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc index 965e1bd9f4..25dd1047e5 100644 --- a/compiler/optimizing/scheduler_arm.cc +++ b/compiler/optimizing/scheduler_arm.cc @@ -669,7 +669,7 @@ void SchedulingLatencyVisitorARM::VisitArrayGet(HArrayGet* instruction) { } case DataType::Type::kReference: { - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { last_visited_latency_ = kArmLoadWithBakerReadBarrierLatency; } else { if (index->IsConstant()) { @@ -937,7 +937,7 @@ void SchedulingLatencyVisitorARM::HandleFieldGetLatencies(HInstruction* instruct break; case DataType::Type::kReference: - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency; last_visited_latency_ = kArmMemoryLoadLatency; } else { diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc index 77f5d7081a..0271db9ce1 100644 --- a/compiler/utils/arm/assembler_arm_vixl.cc +++ b/compiler/utils/arm/assembler_arm_vixl.cc @@ -82,7 +82,7 @@ void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) { void ArmVIXLAssembler::GenerateMarkingRegisterCheck(vixl32::Register temp, int code) { // The Marking Register is only used in the Baker read barrier configuration. - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); vixl32::Label mr_is_ok; diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc index a4fddbce20..035e13d231 100644 --- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc +++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc @@ -155,7 +155,7 @@ void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, // Pop LR to PC unless we need to emit some read barrier code just before returning. bool emit_code_before_return = - (kEmitCompilerReadBarrier && kUseBakerReadBarrier) && + (gUseReadBarrier && kUseBakerReadBarrier) && (may_suspend || (kIsDebugBuild && emit_run_time_checks_in_debug_mode_)); if ((core_spill_mask & (1u << lr.GetCode())) != 0u && !emit_code_before_return) { DCHECK_EQ(core_spill_mask & (1u << pc.GetCode()), 0u); @@ -215,7 +215,9 @@ void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size, } } - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Emit marking register refresh even with uffd-GC as we are still using the + // register due to nterp's dependency. + if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) { if (may_suspend) { // The method may be suspended; refresh the Marking Register. ___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value())); @@ -1172,7 +1174,7 @@ void ArmVIXLJNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnary UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); vixl32::Register test_reg; DCHECK_EQ(Thread::IsGcMarkingSize(), 4u); - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // TestGcMarking() is used in the JNI stub entry when the marking register is up to date. if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) { diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 6100ed9855..df05838e2c 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -188,7 +188,7 @@ void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) { void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) { // The Marking Register is only used in the Baker read barrier configuration. - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); DCHECK(kUseBakerReadBarrier); vixl::aarch64::Register mr = reg_x(MR); // Marking Register. diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc index 0f6e5ebaf7..4043a3360d 100644 --- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc @@ -992,7 +992,7 @@ void Arm64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCo UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); Register test_reg; DCHECK_EQ(Thread::IsGcMarkingSize(), 4u); - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); if (kUseBakerReadBarrier) { // TestGcMarking() is used in the JNI stub entry when the marking register is up to date. if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) { @@ -1118,7 +1118,9 @@ void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size, asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size); asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // Emit marking register refresh even with uffd-GC as we are still using the + // register due to nterp's dependency. + if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) { vixl::aarch64::Register mr = reg_x(MR); // Marking Register. vixl::aarch64::Register tr = reg_x(TR); // Thread Register. diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 27bae654c3..a25fd1d3cc 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -962,7 +962,7 @@ class Dex2Oat final { compiler_options_->GetNativeDebuggable()); key_value_store_->Put(OatHeader::kCompilerFilter, CompilerFilter::NameOfFilter(compiler_options_->GetCompilerFilter())); - key_value_store_->Put(OatHeader::kConcurrentCopying, kUseReadBarrier); + key_value_store_->Put(OatHeader::kConcurrentCopying, gUseReadBarrier); if (invocation_file_.get() != -1) { std::ostringstream oss; for (int i = 0; i < argc; ++i) { diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.cc b/dex2oat/linker/arm64/relative_patcher_arm64.cc index 4028f758b9..5794040d14 100644 --- a/dex2oat/linker/arm64/relative_patcher_arm64.cc +++ b/dex2oat/linker/arm64/relative_patcher_arm64.cc @@ -251,7 +251,7 @@ void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code, } else { if ((insn & 0xfffffc00) == 0x91000000) { // ADD immediate, 64-bit with imm12 == 0 (unset). - if (!kEmitCompilerReadBarrier) { + if (!gUseReadBarrier) { DCHECK(patch.GetType() == LinkerPatch::Type::kIntrinsicReference || patch.GetType() == LinkerPatch::Type::kMethodRelative || patch.GetType() == LinkerPatch::Type::kTypeRelative || diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc index fc19cd131c..e0c37d800f 100644 --- a/libartbase/base/utils.cc +++ b/libartbase/base/utils.cc @@ -50,7 +50,6 @@ #if defined(__linux__) #include <linux/unistd.h> #include <sys/syscall.h> -#include <sys/utsname.h> #endif #if defined(_WIN32) @@ -158,6 +157,17 @@ bool FlushCpuCaches(void* begin, void* end) { #endif +#if defined(__linux__) +bool IsKernelVersionAtLeast(int reqd_major, int reqd_minor) { + struct utsname uts; + int major, minor; + CHECK_EQ(uname(&uts), 0); + CHECK_EQ(strcmp(uts.sysname, "Linux"), 0); + CHECK_EQ(sscanf(uts.release, "%d.%d:", &major, &minor), 2); + return major > reqd_major || (major == reqd_major && minor >= reqd_minor); +} +#endif + bool CacheOperationsMaySegFault() { #if defined(__linux__) && defined(__aarch64__) // Avoid issue on older ARM64 kernels where data cache operations could be classified as writes @@ -167,18 +177,10 @@ bool CacheOperationsMaySegFault() { // // This behaviour means we should avoid the dual view JIT on the device. This is just // an issue when running tests on devices that have an old kernel. - static constexpr int kRequiredMajor = 3; - static constexpr int kRequiredMinor = 12; - struct utsname uts; - int major, minor; - if (uname(&uts) != 0 || - strcmp(uts.sysname, "Linux") != 0 || - sscanf(uts.release, "%d.%d", &major, &minor) != 2 || - (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) { - return true; - } -#endif + return !IsKernelVersionAtLeast(3, 12); +#else return false; +#endif } uint32_t GetTid() { diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h index 0e8231a92c..90eb2daa4d 100644 --- a/libartbase/base/utils.h +++ b/libartbase/base/utils.h @@ -31,6 +31,10 @@ #include "globals.h" #include "macros.h" +#if defined(__linux__) +#include <sys/utsname.h> +#endif + namespace art { static inline uint32_t PointerToLowMemUInt32(const void* p) { @@ -125,6 +129,10 @@ NO_RETURN void SleepForever(); // Flush CPU caches. Returns true on success, false if flush failed. WARN_UNUSED bool FlushCpuCaches(void* begin, void* end); +#if defined(__linux__) +bool IsKernelVersionAtLeast(int reqd_major, int reqd_minor); +#endif + // On some old kernels, a cache operation may segfault. WARN_UNUSED bool CacheOperationsMaySegFault(); diff --git a/openjdkjvmti/jvmti_weak_table-inl.h b/openjdkjvmti/jvmti_weak_table-inl.h index 5b28e458f8..17578d28f2 100644 --- a/openjdkjvmti/jvmti_weak_table-inl.h +++ b/openjdkjvmti/jvmti_weak_table-inl.h @@ -114,7 +114,7 @@ bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::ObjPtr<art::mirror: return true; } - if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) { + if (art::gUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) { // Under concurrent GC, there is a window between moving objects and sweeping of system // weaks in which mutators are active. We may receive a to-space object pointer in obj, // but still have from-space pointers in the table. Explicitly update the table once. @@ -156,7 +156,7 @@ bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::ObjPtr<art::mirror::Ob return true; } - if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) { + if (art::gUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) { // Under concurrent GC, there is a window between moving objects and sweeping of system // weaks in which mutators are active. We may receive a to-space object pointer in obj, // but still have from-space pointers in the table. Explicitly update the table once. diff --git a/openjdkjvmti/jvmti_weak_table.h b/openjdkjvmti/jvmti_weak_table.h index ea0d023728..afa2d1da0a 100644 --- a/openjdkjvmti/jvmti_weak_table.h +++ b/openjdkjvmti/jvmti_weak_table.h @@ -152,7 +152,7 @@ class JvmtiWeakTable : public art::gc::SystemWeakHolder { // Performance optimization: To avoid multiple table updates, ensure that during GC we // only update once. See the comment on the implementation of GetTagSlowPath. - if (art::kUseReadBarrier && + if (art::gUseReadBarrier && self != nullptr && self->GetIsGcMarking() && !update_since_last_sweep_) { diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index b0b0064643..555babec78 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -91,7 +91,7 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) { qpoints->SetReadBarrierMarkReg10(is_active ? art_quick_read_barrier_mark_reg10 : nullptr); qpoints->SetReadBarrierMarkReg11(is_active ? art_quick_read_barrier_mark_reg11 : nullptr); - if (kUseReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { // For the alignment check, strip the Thumb mode bit. DCHECK_ALIGNED(reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection) - 1u, 256u); diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 8921577a99..ccf4ff2704 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2116,7 +2116,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { const bool tracing_enabled = Trace::IsTracingEnabled(); Thread* const self = Thread::Current(); WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); - if (kUseReadBarrier) { + if (gUseReadBarrier) { // We do not track new roots for CC. DCHECK_EQ(0, flags & (kVisitRootFlagNewRoots | kVisitRootFlagClearRootLog | @@ -2152,7 +2152,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { root.VisitRoot(visitor, RootInfo(kRootVMInternal)); } } - } else if (!kUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) { + } else if (!gUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) { for (auto& root : new_class_roots_) { ObjPtr<mirror::Class> old_ref = root.Read<kWithoutReadBarrier>(); root.VisitRoot(visitor, RootInfo(kRootStickyClass)); @@ -2173,13 +2173,13 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) { } } } - if (!kUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) { + if (!gUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) { new_class_roots_.clear(); new_bss_roots_boot_oat_files_.clear(); } - if (!kUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) { + if (!gUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) { log_new_roots_ = true; - } else if (!kUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) { + } else if (!gUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) { log_new_roots_ = false; } // We deliberately ignore the class roots in the image since we diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 9fa9c5d7e5..e1360730d6 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -305,7 +305,7 @@ class CheckJniAbortCatcher { } #define TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS() \ - if (!kEmitCompilerReadBarrier || !kUseBakerReadBarrier) { \ + if (!gUseReadBarrier || !kUseBakerReadBarrier) { \ printf("WARNING: TEST DISABLED FOR GC WITHOUT BAKER READ BARRIER\n"); \ return; \ } @@ -317,7 +317,7 @@ class CheckJniAbortCatcher { } #define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING_WITHOUT_READ_BARRIERS() \ - if (kRunningOnMemoryTool && kPoisonHeapReferences && !kEmitCompilerReadBarrier) { \ + if (kRunningOnMemoryTool && kPoisonHeapReferences && !gUseReadBarrier) { \ printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING WITHOUT READ BARRIERS\n"); \ return; \ } diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index 0bc913874b..03fd4232c8 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -448,7 +448,7 @@ void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type } static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); uint32_t monitor_offset = mirror::Object::MonitorOffset().Uint32Value(); if (kUseBakerReadBarrier && (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64)) { @@ -483,7 +483,7 @@ static bool IsValidImplicitCheck(uintptr_t addr, const Instruction& instr) } case Instruction::IGET_OBJECT: - if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) { + if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) { return true; } FALLTHROUGH_INTENDED; @@ -507,7 +507,7 @@ static bool IsValidImplicitCheck(uintptr_t addr, const Instruction& instr) } case Instruction::AGET_OBJECT: - if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) { + if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) { return true; } FALLTHROUGH_INTENDED; diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index e7b362339a..81a9e21ab0 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -435,7 +435,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, } extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); return ReadBarrier::Mark(obj); } @@ -443,14 +443,12 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS mirror::Object* obj, uint32_t offset) { // Used only in connection with non-volatile loads. - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset; mirror::HeapReference<mirror::Object>* ref_addr = reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr); - constexpr ReadBarrierOption kReadBarrierOption = - kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier; mirror::Object* result = - ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>( + ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier>( obj, MemberOffset(offset), ref_addr); @@ -458,7 +456,7 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS } extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) { - DCHECK(kEmitCompilerReadBarrier); + DCHECK(gUseReadBarrier); return root->Read(); } diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 8569124307..fafa3c702b 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -47,7 +47,7 @@ static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial"); extern "C" void artJniReadBarrier(ArtMethod* method) { - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); mirror::CompressedReference<mirror::Object>* declaring_class = method->GetDeclaringClassAddressWithoutBarrier(); if (kUseBakerReadBarrier) { diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 91d252dc02..9e21007387 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -1941,7 +1941,7 @@ class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor { // The declaring class must be marked. auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>( method->GetDeclaringClassAddressWithoutBarrier()); - if (kUseReadBarrier) { + if (gUseReadBarrier) { artJniReadBarrier(method); } sm_.AdvancePointer(declaring_class); diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc index 561eae7bb8..9586e9d70a 100644 --- a/runtime/gc/allocation_record.cc +++ b/runtime/gc/allocation_record.cc @@ -141,13 +141,13 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) { } void AllocRecordObjectMap::AllowNewAllocationRecords() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); allow_new_record_ = true; new_record_condition_.Broadcast(Thread::Current()); } void AllocRecordObjectMap::DisallowNewAllocationRecords() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); allow_new_record_ = false; } @@ -240,8 +240,8 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self, // Since nobody seemed to really notice or care it might not be worth the trouble. // Wait for GC's sweeping to complete and allow new records. - while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) || - (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { + while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) || + (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the // presence of threads blocking for weak ref access. self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_); diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 2820ae0f51..0701ceb698 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -18,6 +18,7 @@ #include "base/quasi_atomic.h" #include "base/systrace.h" +#include "base/utils.h" #include "gc/accounting/mod_union_table-inl.h" #include "gc/reference_processor.h" #include "gc/space/bump_pointer_space.h" @@ -25,6 +26,7 @@ #include "gc/verification-inl.h" #include "jit/jit_code_cache.h" #include "mirror/object-refvisitor-inl.h" +#include "read_barrier_config.h" #include "scoped_thread_state_change-inl.h" #include "sigchain.h" #include "thread_list.h" @@ -37,10 +39,6 @@ #include <fstream> #include <numeric> -namespace art { -namespace gc { -namespace collector { - #ifndef __BIONIC__ #ifndef MREMAP_DONTUNMAP #define MREMAP_DONTUNMAP 4 @@ -59,7 +57,52 @@ namespace collector { #endif #endif // __NR_userfaultfd #endif // __BIONIC__ -// Turn of kCheckLocks when profiling the GC as it slows down the GC + +namespace art { + +#ifndef ART_FORCE_USE_READ_BARRIER +static bool ShouldUseUserfaultfd() { +#if !defined(__linux__) + return false; +#elif !defined(ART_TARGET) + // We require MREMAP_DONTUNMAP functionality in mremap syscall, which was + // introduced in 5.13 kernel version. Check for that on host. Not required + // checking on target as MREMAP_DONTUNMAP and userfaultfd were enabled + // together. + if (!IsKernelVersionAtLeast(5, 13)) { + return false; + } +#endif + int fd = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY); +#ifndef ART_TARGET + // On host we may not have the kernel patches that restrict userfaultfd to + // user mode. But that is not a security concern as we are on host. + // Therefore, attempt one more time without UFFD_USER_MODE_ONLY. + if (fd == -1 && errno == EINVAL) { + fd = syscall(__NR_userfaultfd, O_CLOEXEC); + } +#endif + if (fd >= 0) { + close(fd); + return true; + } else { + return false; + } +} +#endif + +#ifdef ART_FORCE_USE_READ_BARRIER +const bool gUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier; +#else +const bool gUseReadBarrier = (kUseBakerReadBarrier || kUseTableLookupReadBarrier) + && !ShouldUseUserfaultfd(); +#endif +const bool gUseUserfaultfd = !gUseReadBarrier; + +namespace gc { +namespace collector { + +// Turn off kCheckLocks when profiling the GC as it slows down the GC // significantly. static constexpr bool kCheckLocks = kDebugLocking; static constexpr bool kVerifyRootsMarked = kIsDebugBuild; diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h index 992a0bae3b..8fdb524f8e 100644 --- a/runtime/gc/collector_type.h +++ b/runtime/gc/collector_type.h @@ -65,11 +65,11 @@ enum CollectorType { std::ostream& operator<<(std::ostream& os, CollectorType collector_type); static constexpr CollectorType kCollectorTypeDefault = -#if ART_DEFAULT_GC_TYPE_IS_CMS - kCollectorTypeCMS +#if ART_DEFAULT_GC_TYPE_IS_CMC + kCollectorTypeCMC #elif ART_DEFAULT_GC_TYPE_IS_SS kCollectorTypeSS -#else +#elif ART_DEFAULT_GC_TYPE_IS_CMS kCollectorTypeCMS #error "ART default GC type must be set" #endif diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 9e1524e657..9c76060062 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -209,10 +209,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, } // IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for the // BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be - // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant - // since the allocator_type should be constant propagated. - if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent() - && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) { + // optimized out. + if (IsGcConcurrent() && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) { need_gc = true; } GetMetrics()->TotalBytesAllocated()->Add(bytes_tl_bulk_allocated); @@ -442,7 +440,7 @@ inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_co return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass()); } -inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, +inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type ATTRIBUTE_UNUSED, size_t alloc_size, bool grow) { size_t old_target = target_footprint_.load(std::memory_order_relaxed); @@ -457,7 +455,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, return true; } // We are between target_footprint_ and growth_limit_ . - if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) { + if (IsGcConcurrent()) { return false; } else { if (grow) { diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h index e20d981fa3..a235c44033 100644 --- a/runtime/gc/heap-visit-objects-inl.h +++ b/runtime/gc/heap-visit-objects-inl.h @@ -118,7 +118,7 @@ inline void Heap::VisitObjectsInternal(Visitor&& visitor) { // For speed reasons, only perform it when Rosalloc could possibly be used. // (Disabled for read barriers because it never uses Rosalloc). // (See the DCHECK in RosAllocSpace constructor). - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { // Rosalloc has a race in allocation. Objects can be written into the allocation // stack before their header writes are visible to this thread. // See b/28790624 for more details. diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 6937d1cf15..a8195a393f 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -417,7 +417,7 @@ Heap::Heap(size_t initial_size, if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { LOG(INFO) << "Heap() entering"; } - if (kUseReadBarrier) { + if (gUseReadBarrier) { CHECK_EQ(foreground_collector_type_, kCollectorTypeCC); CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground); } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) { @@ -999,7 +999,7 @@ void Heap::IncrementDisableThreadFlip(Thread* self) { } void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) { - if (kUseUserfaultfd) { + if (gUseUserfaultfd) { // Use volatile to ensure that compiler loads from memory to trigger userfaults, if required. volatile uint8_t volatile_sum; volatile uint8_t* start = reinterpret_cast<volatile uint8_t*>(obj.Ptr()); @@ -1533,7 +1533,7 @@ void Heap::DoPendingCollectorTransition() { VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state"; } } else if (desired_collector_type == kCollectorTypeCCBackground) { - DCHECK(kUseReadBarrier); + DCHECK(gUseReadBarrier); if (!CareAboutPauseTimes()) { // Invoke CC full compaction. CollectGarbageInternal(collector::kGcTypeFull, @@ -4251,7 +4251,7 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const { } void Heap::AllowNewAllocationRecords() const { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); AllocRecordObjectMap* allocation_records = GetAllocationRecords(); if (allocation_records != nullptr) { @@ -4260,7 +4260,7 @@ void Heap::AllowNewAllocationRecords() const { } void Heap::DisallowNewAllocationRecords() const { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_); AllocRecordObjectMap* allocation_records = GetAllocationRecords(); if (allocation_records != nullptr) { @@ -4637,7 +4637,7 @@ void Heap::PostForkChildAction(Thread* self) { uint64_t last_adj_time = NanoTime(); next_gc_type_ = NonStickyGcType(); // Always start with a full gc. - if (kUseUserfaultfd) { + if (gUseUserfaultfd) { DCHECK_NE(mark_compact_, nullptr); mark_compact_->CreateUserfaultfd(/*post_fork*/true); } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 0fc04a94ec..044999d33b 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -1034,15 +1034,6 @@ class Heap { allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRegion; } - static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { - if (kUseUserfaultfd || kUseReadBarrier) { - // May have the TLAB allocator but is always concurrent. TODO: clean this up. - return true; - } - return - allocator_type != kAllocatorTypeTLAB && - allocator_type != kAllocatorTypeBumpPointer; - } static bool IsMovingGc(CollectorType collector_type) { return collector_type == kCollectorTypeCC || diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index 5e41ee4ef8..772174f885 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -90,7 +90,7 @@ void ReferenceProcessor::BroadcastForSlowPath(Thread* self) { ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self, ObjPtr<mirror::Reference> reference) { auto slow_path_required = [this, self]() REQUIRES_SHARED(Locks::mutator_lock_) { - return kUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled(); + return gUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled(); }; if (!slow_path_required()) { return reference->GetReferent(); @@ -118,10 +118,10 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self, // Keeping reference_processor_lock_ blocks the broadcast when we try to reenable the fast path. while (slow_path_required()) { DCHECK(collector_ != nullptr); - constexpr bool kOtherReadBarrier = kUseReadBarrier && !kUseBakerReadBarrier; + const bool other_read_barrier = !kUseBakerReadBarrier && gUseReadBarrier; if (UNLIKELY(reference->IsFinalizerReferenceInstance() || rp_state_ == RpState::kStarting /* too early to determine mark state */ - || (kOtherReadBarrier && reference->IsPhantomReferenceInstance()))) { + || (other_read_barrier && reference->IsPhantomReferenceInstance()))) { // Odd cases in which it doesn't hurt to just wait, or the wait is likely to be very brief. // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the @@ -210,7 +210,7 @@ void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings) } { MutexLock mu(self, *Locks::reference_processor_lock_); - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { CHECK_EQ(SlowPathEnabled(), concurrent_) << "Slow path must be enabled iff concurrent"; } else { // Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent_ == false). @@ -305,7 +305,7 @@ void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings) // could result in a stale is_marked_callback_ being called before the reference processing // starts since there is a small window of time where slow_path_enabled_ is enabled but the // callback isn't yet set. - if (!kUseReadBarrier && concurrent_) { + if (!gUseReadBarrier && concurrent_) { // Done processing, disable the slow path and broadcast to the waiters. DisableSlowPath(self); } @@ -418,8 +418,8 @@ void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) { void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) { // Wait until we are done processing reference. - while ((!kUseReadBarrier && SlowPathEnabled()) || - (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { + while ((!gUseReadBarrier && SlowPathEnabled()) || + (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) { // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the // presence of threads blocking for weak ref access. self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_); diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h index ef85b3942f..77b9548211 100644 --- a/runtime/gc/system_weak.h +++ b/runtime/gc/system_weak.h @@ -48,7 +48,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder { void Allow() override REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!allow_disallow_lock_) { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); MutexLock mu(Thread::Current(), allow_disallow_lock_); allow_new_system_weak_ = true; new_weak_condition_.Broadcast(Thread::Current()); @@ -57,7 +57,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder { void Disallow() override REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!allow_disallow_lock_) { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); MutexLock mu(Thread::Current(), allow_disallow_lock_); allow_new_system_weak_ = false; } @@ -78,8 +78,8 @@ class SystemWeakHolder : public AbstractSystemWeakHolder { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(allow_disallow_lock_) { // Wait for GC's sweeping to complete and allow new records - while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) || - (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { + while (UNLIKELY((!gUseReadBarrier && !allow_new_system_weak_) || + (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) { // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the // presence of threads blocking for weak ref access. self->CheckEmptyCheckpointFromWeakRefAccess(&allow_disallow_lock_); diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index 49733550fe..10b2d65f45 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -190,8 +190,8 @@ void InternTable::WaitUntilAccessible(Thread* self) { { ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead); MutexLock mu(self, *Locks::intern_table_lock_); - while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) || - (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) { + while ((!gUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) || + (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) { weak_intern_condition_.Wait(self); } } @@ -218,7 +218,7 @@ ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s, if (strong != nullptr) { return strong; } - if (kUseReadBarrier ? self->GetWeakRefAccessEnabled() + if (gUseReadBarrier ? self->GetWeakRefAccessEnabled() : weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) { break; } @@ -230,7 +230,7 @@ ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s, auto h = hs.NewHandleWrapper(&s); WaitUntilAccessible(self); } - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal); } else { CHECK(self->GetWeakRefAccessEnabled()); @@ -429,7 +429,7 @@ void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) { } void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); weak_root_state_ = new_state; if (new_state != gc::kWeakRootStateNoReadsOrWrites) { weak_intern_condition_.Broadcast(Thread::Current()); diff --git a/runtime/interpreter/interpreter_cache-inl.h b/runtime/interpreter/interpreter_cache-inl.h index cea8157d26..1dda78b0ab 100644 --- a/runtime/interpreter/interpreter_cache-inl.h +++ b/runtime/interpreter/interpreter_cache-inl.h @@ -39,7 +39,7 @@ inline void InterpreterCache::Set(Thread* self, const void* key, size_t value) { // For simplicity, only update the cache if weak ref accesses are enabled. If // they are disabled, this means the GC is processing the cache, and is // reading it concurrently. - if (kUseReadBarrier && self->GetWeakRefAccessEnabled()) { + if (gUseReadBarrier && self->GetWeakRefAccessEnabled()) { data_[IndexOf(key)] = Entry{key, value}; } } diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc index 5922efd5b8..3938d54d62 100644 --- a/runtime/interpreter/mterp/nterp.cc +++ b/runtime/interpreter/mterp/nterp.cc @@ -34,7 +34,7 @@ namespace art { namespace interpreter { bool IsNterpSupported() { - return !kPoisonHeapReferences && kUseReadBarrier; + return !kPoisonHeapReferences && gUseReadBarrier; } bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) { diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index cec6f1dc8d..4d3e0304c0 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -1557,7 +1557,7 @@ void UnstartedRuntime::UnstartedJdkUnsafeCompareAndSwapObject( mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 5); // Must use non transactional mode. - if (kUseReadBarrier) { + if (gUseReadBarrier) { // Need to make sure the reference stored in the field is a to-space one before attempting the // CAS or the CAS could fail incorrectly. mirror::HeapReference<mirror::Object>* field_addr = diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 199a00b18a..ad8cc9ab63 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -559,7 +559,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { } bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { - return kUseReadBarrier + return gUseReadBarrier ? self->GetWeakRefAccessEnabled() : is_weak_access_enabled_.load(std::memory_order_seq_cst); } @@ -582,13 +582,13 @@ void JitCodeCache::BroadcastForInlineCacheAccess() { } void JitCodeCache::AllowInlineCacheAccess() { - DCHECK(!kUseReadBarrier); + DCHECK(!gUseReadBarrier); is_weak_access_enabled_.store(true, std::memory_order_seq_cst); BroadcastForInlineCacheAccess(); } void JitCodeCache::DisallowInlineCacheAccess() { - DCHECK(!kUseReadBarrier); + DCHECK(!gUseReadBarrier); is_weak_access_enabled_.store(false, std::memory_order_seq_cst); } diff --git a/runtime/jni/java_vm_ext-inl.h b/runtime/jni/java_vm_ext-inl.h index 29cdf1b773..c98a5532f6 100644 --- a/runtime/jni/java_vm_ext-inl.h +++ b/runtime/jni/java_vm_ext-inl.h @@ -26,7 +26,7 @@ namespace art { inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const { DCHECK(self != nullptr); - return kUseReadBarrier + return gUseReadBarrier ? self->GetWeakRefAccessEnabled() : allow_accessing_weak_globals_.load(std::memory_order_seq_cst); } diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc index f41b6c06fa..39d5729698 100644 --- a/runtime/jni/java_vm_ext.cc +++ b/runtime/jni/java_vm_ext.cc @@ -729,8 +729,8 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) { MutexLock mu(self, *Locks::jni_weak_globals_lock_); // CMS needs this to block for concurrent reference processing because an object allocated during // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak - // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant. - if (!kUseReadBarrier) { + // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant. + if (!gUseReadBarrier) { WaitForWeakGlobalsAccess(self); } std::string error_msg; @@ -809,7 +809,7 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) { } void JavaVMExt::DisallowNewWeakGlobals() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); Thread* const self = Thread::Current(); MutexLock mu(self, *Locks::jni_weak_globals_lock_); // DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the @@ -820,7 +820,7 @@ void JavaVMExt::DisallowNewWeakGlobals() { } void JavaVMExt::AllowNewWeakGlobals() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::jni_weak_globals_lock_); allow_accessing_weak_globals_.store(true, std::memory_order_seq_cst); @@ -876,7 +876,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, I return DecodeWeakGlobal(self, ref); } // self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal(). - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { DCHECK(allow_accessing_weak_globals_.load(std::memory_order_seq_cst)); } return weak_globals_.SynchronizedGet(ref); diff --git a/runtime/lock_word.h b/runtime/lock_word.h index 84f45c2dc6..599a5994df 100644 --- a/runtime/lock_word.h +++ b/runtime/lock_word.h @@ -183,8 +183,7 @@ class LockWord { LockState GetState() const { CheckReadBarrierState(); - if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) || - (kUseReadBarrier && UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0))) { + if (UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0)) { return kUnlocked; } else { uint32_t internal_state = (value_ >> kStateShift) & kStateMask; @@ -288,7 +287,7 @@ class LockWord { void CheckReadBarrierState() const { if (kIsDebugBuild && ((value_ >> kStateShift) & kStateMask) != kStateForwardingAddress) { uint32_t rb_state = ReadBarrierState(); - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { DCHECK_EQ(rb_state, 0U); } else { DCHECK(rb_state == ReadBarrier::NonGrayState() || diff --git a/runtime/mirror/class-refvisitor-inl.h b/runtime/mirror/class-refvisitor-inl.h index 9bcfd03f3b..ee5c11f99f 100644 --- a/runtime/mirror/class-refvisitor-inl.h +++ b/runtime/mirror/class-refvisitor-inl.h @@ -55,7 +55,7 @@ template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, class Vis void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) { VisitFields<kReadBarrierOption>([&](ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) { field->VisitRoots(visitor); - if (kIsDebugBuild && !kUseUserfaultfd && IsResolved()) { + if (kIsDebugBuild && !gUseUserfaultfd && IsResolved()) { CHECK_EQ(field->GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus() << field->GetDeclaringClass()->PrettyClass() << " != " << PrettyClass(); } diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 74f9ccbbda..b937c2cb95 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -60,7 +60,7 @@ T* DexCache::AllocArray(MemberOffset obj_offset, MemberOffset num_offset, size_t return nullptr; } mirror::DexCache* dex_cache = this; - if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { + if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) { // Several code paths use DexCache without read-barrier for performance. // We have to check the "to-space" object here to avoid allocating twice. dex_cache = reinterpret_cast<DexCache*>(ReadBarrier::Mark(dex_cache)); diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 0ac71bfa2c..318a811fa1 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -104,7 +104,7 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) { } inline uint32_t Object::GetMarkBit() { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); return GetLockWord(false).MarkBitState(); } diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index ede1c66577..bb9e85dd0e 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -115,7 +115,7 @@ ObjPtr<Object> Object::CopyObject(ObjPtr<mirror::Object> dest, } } - if (kUseReadBarrier) { + if (gUseReadBarrier) { // We need a RB here. After copying the whole object above, copy references fields one by one // again with a RB to make sure there are no from space refs. TODO: Optimize this later? CopyReferenceFieldsWithReadBarrierVisitor visitor(dest); diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h index 1bd0185743..87f24eb230 100644 --- a/runtime/mirror/object_array-inl.h +++ b/runtime/mirror/object_array-inl.h @@ -121,7 +121,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, if (copy_forward) { // Forward copy. bool baker_non_gray_case = false; - if (kUseReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { uintptr_t fake_address_dependency; if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) { baker_non_gray_case = true; @@ -146,7 +146,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos, } else { // Backward copy. bool baker_non_gray_case = false; - if (kUseReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { uintptr_t fake_address_dependency; if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) { baker_non_gray_case = true; @@ -196,7 +196,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos, // We can't use memmove since it does not handle read barriers and may do by per byte copying. // See b/32012820. bool baker_non_gray_case = false; - if (kUseReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { uintptr_t fake_address_dependency; if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) { baker_non_gray_case = true; @@ -244,7 +244,7 @@ inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjPtr<T> o = nullptr; int i = 0; bool baker_non_gray_case = false; - if (kUseReadBarrier && kUseBakerReadBarrier) { + if (gUseReadBarrier && kUseBakerReadBarrier) { uintptr_t fake_address_dependency; if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) { baker_non_gray_case = true; diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc index d36a2abadc..68d329d15b 100644 --- a/runtime/mirror/var_handle.cc +++ b/runtime/mirror/var_handle.cc @@ -205,7 +205,7 @@ static ObjPtr<Class> GetReturnType(VarHandle::AccessModeTemplate access_mode_tem // Method to insert a read barrier for accessors to reference fields. inline void ReadBarrierForVarHandleAccess(ObjPtr<Object> obj, MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { - if (kUseReadBarrier) { + if (gUseReadBarrier) { // We need to ensure that the reference stored in the field is a to-space one before attempting // the CompareAndSet/CompareAndExchange/Exchange operation otherwise it will fail incorrectly // if obj is in the process of being moved. diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 0cad79b6e3..4e64c95b8c 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -1139,7 +1139,7 @@ ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self, lock_word.GCState())); // Only this thread pays attention to the count. Thus there is no need for stronger // than relaxed memory ordering. - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { h_obj->SetLockWord(thin_locked, /* as_volatile= */ false); AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false); return h_obj.Get(); // Success! @@ -1239,7 +1239,7 @@ bool Monitor::MonitorExit(Thread* self, ObjPtr<mirror::Object> obj) { } else { new_lw = LockWord::FromDefault(lock_word.GCState()); } - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { DCHECK_EQ(new_lw.ReadBarrierState(), 0U); // TODO: This really only needs memory_order_release, but we currently have // no way to specify that. In fact there seem to be no legitimate uses of SetLockWord @@ -1409,7 +1409,7 @@ ThreadState Monitor::FetchState(const Thread* thread, { ObjPtr<mirror::Object> lock_object = thread->GetMonitorEnterObject(); if (lock_object != nullptr) { - if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { + if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) { // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack // may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in // which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward @@ -1613,13 +1613,13 @@ MonitorList::~MonitorList() { } void MonitorList::DisallowNewMonitors() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); MutexLock mu(Thread::Current(), monitor_list_lock_); allow_new_monitors_ = false; } void MonitorList::AllowNewMonitors() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, monitor_list_lock_); allow_new_monitors_ = true; @@ -1637,8 +1637,8 @@ void MonitorList::Add(Monitor* m) { MutexLock mu(self, monitor_list_lock_); // CMS needs this to block for concurrent reference processing because an object allocated during // the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak - // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant. - while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) { + // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant. + while (!gUseReadBarrier && UNLIKELY(!allow_new_monitors_)) { // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the // presence of threads blocking for weak ref access. self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_); diff --git a/runtime/monitor_objects_stack_visitor.cc b/runtime/monitor_objects_stack_visitor.cc index 2e75e37bd1..524c0ec62f 100644 --- a/runtime/monitor_objects_stack_visitor.cc +++ b/runtime/monitor_objects_stack_visitor.cc @@ -90,7 +90,7 @@ bool MonitorObjectsStackVisitor::VisitFrame() { void MonitorObjectsStackVisitor::VisitLockedObject(ObjPtr<mirror::Object> o, void* context) { MonitorObjectsStackVisitor* self = reinterpret_cast<MonitorObjectsStackVisitor*>(context); if (o != nullptr) { - if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { + if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) { // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the // IdentityHashCode call below will crash. So explicitly mark/forward it here. diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc index f23010bf48..8b5635d2ec 100644 --- a/runtime/native/java_lang_ref_Reference.cc +++ b/runtime/native/java_lang_ref_Reference.cc @@ -37,7 +37,7 @@ static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) { } static jboolean Reference_refersTo0(JNIEnv* env, jobject javaThis, jobject o) { - if (kUseReadBarrier && !kUseBakerReadBarrier) { + if (gUseReadBarrier && !kUseBakerReadBarrier) { // Fall back to naive implementation that may block and needlessly preserve javaThis. return env->IsSameObject(Reference_getReferent(env, javaThis), o); } @@ -48,7 +48,7 @@ static jboolean Reference_refersTo0(JNIEnv* env, jobject javaThis, jobject o) { if (referent == other) { return JNI_TRUE; } - if (!kUseReadBarrier || referent.IsNull() || other.IsNull()) { + if (!gUseReadBarrier || referent.IsNull() || other.IsNull()) { return JNI_FALSE; } // Explicitly handle the case in which referent is a from-space pointer. Don't use a diff --git a/runtime/native/jdk_internal_misc_Unsafe.cc b/runtime/native/jdk_internal_misc_Unsafe.cc index 307a2fa8b9..e70873289c 100644 --- a/runtime/native/jdk_internal_misc_Unsafe.cc +++ b/runtime/native/jdk_internal_misc_Unsafe.cc @@ -99,7 +99,7 @@ static jboolean Unsafe_compareAndSetObject(JNIEnv* env, jobject, jobject javaObj ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue); ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue); // JNI must use non transactional mode. - if (kUseReadBarrier) { + if (gUseReadBarrier) { // Need to make sure the reference stored in the field is a to-space one before attempting the // CAS or the CAS could fail incorrectly. // Note that the read barrier load does NOT need to be volatile. diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index e9c5af013d..1781a29a27 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -69,7 +69,7 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue); ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue); // JNI must use non transactional mode. - if (kUseReadBarrier) { + if (gUseReadBarrier) { // Need to make sure the reference stored in the field is a to-space one before attempting the // CAS or the CAS could fail incorrectly. // Note that the read barrier load does NOT need to be volatile. diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index e8358b7468..e0189a9353 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -1815,7 +1815,7 @@ class OatFileBackedByVdex final : public OatFileBase { store.Put(OatHeader::kCompilerFilter, CompilerFilter::NameOfFilter(CompilerFilter::kVerify)); store.Put(OatHeader::kCompilationReasonKey, "vdex"); store.Put(OatHeader::kConcurrentCopying, - kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue); + gUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue); oat_header_.reset(OatHeader::Create(kRuntimeISA, isa_features.get(), number_of_dex_files, diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 454ee796fb..78ab53bbf6 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -545,9 +545,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile& // compiled code and are otherwise okay, we should return something like // kOatRelocationOutOfDate. If they don't contain compiled code, the read // barrier state doesn't matter. - const bool is_cc = file.GetOatHeader().IsConcurrentCopying(); - constexpr bool kRuntimeIsCC = kUseReadBarrier; - if (is_cc != kRuntimeIsCC) { + if (file.GetOatHeader().IsConcurrentCopying() != gUseReadBarrier) { return kOatCannotOpen; } diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index f028473e83..ff4693f55e 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -35,7 +35,7 @@ template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierO inline MirrorType* ReadBarrier::Barrier( mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) { constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; - if (kUseReadBarrier && with_read_barrier) { + if (gUseReadBarrier && with_read_barrier) { if (kCheckDebugDisallowReadBarrierCount) { Thread* const self = Thread::Current(); if (self != nullptr) { @@ -93,7 +93,7 @@ inline MirrorType* ReadBarrier::Barrier( UNREACHABLE(); } } else if (kReadBarrierOption == kWithFromSpaceBarrier) { - CHECK(kUseUserfaultfd); + CHECK(gUseUserfaultfd); MirrorType* old = ref_addr->template AsMirrorPtr<kIsVolatile>(); mirror::Object* ref = Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(old); @@ -109,7 +109,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, GcRootSource* gc_root_source) { MirrorType* ref = *root; const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; - if (kUseReadBarrier && with_read_barrier) { + if (gUseReadBarrier && with_read_barrier) { if (kCheckDebugDisallowReadBarrierCount) { Thread* const self = Thread::Current(); if (self != nullptr) { @@ -154,7 +154,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro GcRootSource* gc_root_source) { MirrorType* ref = root->AsMirrorPtr(); const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier; - if (kUseReadBarrier && with_read_barrier) { + if (gUseReadBarrier && with_read_barrier) { if (kCheckDebugDisallowReadBarrierCount) { Thread* const self = Thread::Current(); if (self != nullptr) { @@ -199,7 +199,7 @@ template <typename MirrorType> inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) { // Only read-barrier configurations can have mutators run while // the GC is marking. - if (!kUseReadBarrier) { + if (!gUseReadBarrier) { return ref; } // IsMarked does not handle null, so handle it here. diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h index 3b89377860..be5a9a030a 100644 --- a/runtime/read_barrier.h +++ b/runtime/read_barrier.h @@ -94,7 +94,7 @@ class ReadBarrier { // Without the holder object, and only with the read barrier configuration (no-op otherwise). static void MaybeAssertToSpaceInvariant(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { - if (kUseReadBarrier) { + if (gUseReadBarrier) { AssertToSpaceInvariant(ref); } } diff --git a/runtime/read_barrier_config.h b/runtime/read_barrier_config.h index 3c5afad25f..e974b04797 100644 --- a/runtime/read_barrier_config.h +++ b/runtime/read_barrier_config.h @@ -62,18 +62,8 @@ static constexpr bool kUseTableLookupReadBarrier = true; static constexpr bool kUseTableLookupReadBarrier = false; #endif -static constexpr bool kUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier; -static constexpr bool kUseUserfaultfd = !kUseReadBarrier; - -// Debugging flag that forces the generation of read barriers, but -// does not trigger the use of the concurrent copying GC. -// -// TODO: Remove this flag when the read barriers compiler -// instrumentation is completed. -static constexpr bool kForceReadBarrier = false; -// TODO: Likewise, remove this flag when kForceReadBarrier is removed -// and replace it with kUseReadBarrier. -static constexpr bool kEmitCompilerReadBarrier = kForceReadBarrier || kUseReadBarrier; +extern const bool gUseReadBarrier; +extern const bool gUseUserfaultfd; // Disabled for performance reasons. static constexpr bool kCheckDebugDisallowReadBarrierCount = kIsDebugBuild; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 1ae44053c9..80d8a3b920 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -201,10 +201,6 @@ static constexpr double kLowMemoryMaxLoadFactor = 0.8; static constexpr double kNormalMinLoadFactor = 0.4; static constexpr double kNormalMaxLoadFactor = 0.7; -// Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read -// barrier config. -static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0; - Runtime* Runtime::instance_ = nullptr; struct TraceConfig { @@ -1162,10 +1158,6 @@ void Runtime::InitNonZygoteOrPostFork( } // Create the thread pools. - if (!kUseUserfaultfd) { - // Userfaultfd GC creates the thread-pool on its own. - heap_->CreateThreadPool(); - } // Avoid creating the runtime thread pool for system server since it will not be used and would // waste memory. if (!is_system_server) { @@ -1615,9 +1607,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // If low memory mode, use 1.0 as the multiplier by default. foreground_heap_growth_multiplier = 1.0f; } else { + // Extra added to the default heap growth multiplier for concurrent GC + // compaction algorithms. This is done for historical reasons. + // TODO: remove when we revisit heap configurations. foreground_heap_growth_multiplier = - runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) + - kExtraDefaultHeapGrowthMultiplier; + runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) + 1.0f; } XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption); @@ -1645,9 +1639,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { image_locations_, instruction_set_, // Override the collector type to CC if the read barrier config. - kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_, - kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground) - : runtime_options.GetOrDefault(Opt::BackgroundGc), + gUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_, + gUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground) + : BackgroundGcOption(xgc_option.collector_type_), runtime_options.GetOrDefault(Opt::LargeObjectSpace), runtime_options.GetOrDefault(Opt::LargeObjectThreshold), runtime_options.GetOrDefault(Opt::ParallelGCThreads), @@ -2617,7 +2611,7 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() { } void Runtime::DisallowNewSystemWeaks() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); monitor_list_->DisallowNewMonitors(); intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites); java_vm_->DisallowNewWeakGlobals(); @@ -2633,7 +2627,7 @@ void Runtime::DisallowNewSystemWeaks() { } void Runtime::AllowNewSystemWeaks() { - CHECK(!kUseReadBarrier); + CHECK(!gUseReadBarrier); monitor_list_->AllowNewMonitors(); intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping. java_vm_->AllowNewWeakGlobals(); diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 76d16579bb..6721834705 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -80,7 +80,7 @@ RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown) RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint) RUNTIME_OPTIONS_KEY (bool, AlwaysLogExplicitGcs, true) RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode) -RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier)) +RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab) RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true) RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, true) RUNTIME_OPTIONS_KEY (bool, UseProfiledJitCompilation, false) diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 324cd3787a..a3ac1e763c 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -373,7 +373,7 @@ inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) { } inline bool Thread::GetWeakRefAccessEnabled() const { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); DCHECK(this == Thread::Current()); WeakRefAccessState s = tls32_.weak_ref_access_enabled.load(std::memory_order_relaxed); if (LIKELY(s == WeakRefAccessState::kVisiblyEnabled)) { @@ -428,7 +428,7 @@ inline bool Thread::ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier, SuspendReason reason) { - if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) { + if (delta > 0 && ((gUseReadBarrier && this != self) || suspend_barrier != nullptr)) { // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop. while (true) { diff --git a/runtime/thread.cc b/runtime/thread.cc index 0561c5b615..5492cc869f 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -169,7 +169,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); tls32_.is_gc_marking = is_marking; UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking); } @@ -1482,7 +1482,7 @@ bool Thread::ModifySuspendCountInternal(Thread* self, return false; } - if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { + if (gUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { // Force retry of a suspend request if it's in the middle of a thread flip to avoid a // deadlock. b/31683379. return false; @@ -2578,7 +2578,7 @@ void Thread::Destroy() { } // Mark-stack revocation must be performed at the very end. No // checkpoint/flip-function or read-barrier should be called after this. - if (kUseReadBarrier) { + if (gUseReadBarrier) { Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); } } @@ -4716,7 +4716,7 @@ bool Thread::IsAotCompiler() { mirror::Object* Thread::GetPeerFromOtherThread() const { DCHECK(tlsPtr_.jpeer == nullptr); mirror::Object* peer = tlsPtr_.opeer; - if (kUseReadBarrier && Current()->GetIsGcMarking()) { + if (gUseReadBarrier && Current()->GetIsGcMarking()) { // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly // mark/forward it here. diff --git a/runtime/thread.h b/runtime/thread.h index f40f03f371..60fd076c4d 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -379,11 +379,11 @@ class Thread { void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); return tlsPtr_.thread_local_mark_stack; } void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); tlsPtr_.thread_local_mark_stack = stack; } @@ -1045,7 +1045,7 @@ class Thread { } bool GetIsGcMarking() const { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); return tls32_.is_gc_marking; } @@ -1058,7 +1058,7 @@ class Thread { bool GetWeakRefAccessEnabled() const; // Only safe for current thread. void SetWeakRefAccessEnabled(bool enabled) { - CHECK(kUseReadBarrier); + CHECK(gUseReadBarrier); WeakRefAccessState new_state = enabled ? WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled; tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release); @@ -2236,13 +2236,13 @@ class ScopedTransitioningToRunnable : public ValueObject { explicit ScopedTransitioningToRunnable(Thread* self) : self_(self) { DCHECK_EQ(self, Thread::Current()); - if (kUseReadBarrier) { + if (gUseReadBarrier) { self_->SetIsTransitioningToRunnable(true); } } ~ScopedTransitioningToRunnable() { - if (kUseReadBarrier) { + if (gUseReadBarrier) { self_->SetIsTransitioningToRunnable(false); } } diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 555d2fde20..6b23c349ef 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -1270,7 +1270,7 @@ void ThreadList::Register(Thread* self) { } CHECK(!Contains(self)); list_.push_back(self); - if (kUseReadBarrier) { + if (gUseReadBarrier) { gc::collector::ConcurrentCopying* const cc = Runtime::Current()->GetHeap()->ConcurrentCopyingCollector(); // Initialize according to the state of the CC collector. |