Convert kUseReadBarrier to static const from constexpr
This CL would compile both CC and userfaultfd GC in the art library,
enabling us to choose either of the two during boot time depending on
whether the device has userfaultfd kernel feature or not.
The CC GC is still chosen unless we use ART_USE_READ_BARRIER=false
during build time. This behavior will later be changed to choosing CC
*only* if ART_USE_READ_BARRIER=true is used. In other cases, if the
device has userfaultfd support then that GC will be chosen.
Bug: 160737021
Bug: 230021033
Test: art/test/testrunner/testrunner.py
Change-Id: I370f1a9f6b8cdff8c2ce3cf7aa936bccd7ed675f
diff --git a/build/art.go b/build/art.go
index c39b7e3..56df142 100644
--- a/build/art.go
+++ b/build/art.go
@@ -38,19 +38,13 @@
opt := ctx.Config().GetenvWithDefault("ART_NDEBUG_OPT_FLAG", "-O3")
cflags = append(cflags, opt)
- tlab := false
-
- gcType := ctx.Config().GetenvWithDefault("ART_DEFAULT_GC_TYPE", "CMS")
+ gcType := ctx.Config().GetenvWithDefault("ART_DEFAULT_GC_TYPE", "CMC")
if ctx.Config().IsEnvTrue("ART_TEST_DEBUG_GC") {
gcType = "SS"
- tlab = true
}
cflags = append(cflags, "-DART_DEFAULT_GC_TYPE_IS_"+gcType)
- if tlab {
- cflags = append(cflags, "-DART_USE_TLAB=1")
- }
if ctx.Config().IsEnvTrue("ART_HEAP_POISONING") {
cflags = append(cflags, "-DART_HEAP_POISONING=1")
@@ -70,11 +64,18 @@
asflags = append(asflags,
"-DART_USE_READ_BARRIER=1",
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
- }
- if !ctx.Config().IsEnvFalse("ART_USE_GENERATIONAL_CC") {
- cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
+ if !ctx.Config().IsEnvFalse("ART_USE_GENERATIONAL_CC") {
+ cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
+ }
+ // For now force CC as we don't want to make userfaultfd GC the default.
+ // Eventually, make it such that we force CC only if ART_USE_READ_BARRIER
+ // was set to true explicitly during build time.
+ cflags = append(cflags, "-DART_FORCE_USE_READ_BARRIER=1")
}
+ // The only GC which does not want ART_USE_TLAB set is CMS, which isn't actually used.
+ // When read-barrier is not set, we use userfaultfd GC.
+ cflags = append(cflags, "-DART_USE_TLAB=1")
cdexLevel := ctx.Config().GetenvWithDefault("ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 42072eb..c8be993 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -198,7 +198,7 @@
// Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
std::unique_ptr<JNIMacroLabel> jclass_read_barrier_slow_path;
std::unique_ptr<JNIMacroLabel> jclass_read_barrier_return;
- if (kUseReadBarrier && is_static && LIKELY(!is_critical_native)) {
+ if (gUseReadBarrier && is_static && LIKELY(!is_critical_native)) {
jclass_read_barrier_slow_path = __ CreateLabel();
jclass_read_barrier_return = __ CreateLabel();
@@ -592,7 +592,7 @@
// 8.1. Read barrier slow path for the declaring class in the method for a static call.
// Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
- if (kUseReadBarrier && is_static && !is_critical_native) {
+ if (gUseReadBarrier && is_static && !is_critical_native) {
__ Bind(jclass_read_barrier_slow_path.get());
// Construct slow path for read barrier:
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index af08ddd..b514f9b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1672,7 +1672,7 @@
// When (non-Baker) read barriers are enabled, some instructions
// use a slow path to emit a read barrier, which does not trigger
// GC.
- (kEmitCompilerReadBarrier &&
+ (gUseReadBarrier &&
!kUseBakerReadBarrier &&
(instruction->IsInstanceFieldGet() ||
instruction->IsPredicatedInstanceFieldGet() ||
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b09219a..7b46e13 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -57,8 +57,8 @@
// Maximum value for a primitive long.
static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
-static constexpr ReadBarrierOption kCompilerReadBarrierOption =
- kEmitCompilerReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
+static const ReadBarrierOption gCompilerReadBarrierOption =
+ gUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
class Assembler;
class CodeGenerator;
@@ -461,7 +461,7 @@
// If the target class is in the boot image, it's non-moveable and it doesn't matter
// if we compare it with a from-space or to-space reference, the result is the same.
// It's OK to traverse a class hierarchy jumping between from-space and to-space.
- return kEmitCompilerReadBarrier && !instance_of->GetTargetClass()->IsInBootImage();
+ return gUseReadBarrier && !instance_of->GetTargetClass()->IsInBootImage();
}
static ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) {
@@ -476,7 +476,7 @@
case TypeCheckKind::kArrayObjectCheck:
case TypeCheckKind::kInterfaceCheck: {
bool needs_read_barrier =
- kEmitCompilerReadBarrier && !check_cast->GetTargetClass()->IsInBootImage();
+ gUseReadBarrier && !check_cast->GetTargetClass()->IsInBootImage();
// We do not emit read barriers for HCheckCast, so we can get false negatives
// and the slow path shall re-check and simply return if the cast is actually OK.
return !needs_read_barrier;
@@ -679,7 +679,7 @@
return LocationSummary::kCallOnMainOnly;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(!load->NeedsEnvironment());
- return kEmitCompilerReadBarrier
+ return gUseReadBarrier
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d3031b7..eb95541 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -583,7 +583,7 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -762,7 +762,7 @@
public:
ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root)
: SlowPathCodeARM64(instruction), out_(out), root_(root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -2058,7 +2058,7 @@
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
bool object_field_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -2114,7 +2114,7 @@
MemOperand field =
HeapOperand(InputRegisterAt(instruction, receiver_input), field_info.GetFieldOffset());
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier &&
+ if (gUseReadBarrier && kUseBakerReadBarrier &&
load_type == DataType::Type::kReference) {
// Object FieldGet with Baker's read barrier case.
// /* HeapReference<Object> */ out = *(base + offset)
@@ -2556,7 +2556,7 @@
void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -2612,10 +2612,10 @@
// does not support the HIntermediateAddress instruction.
DCHECK(!((type == DataType::Type::kReference) &&
instruction->GetArray()->IsIntermediateAddress() &&
- kEmitCompilerReadBarrier &&
+ gUseReadBarrier &&
!kUseBakerReadBarrier));
- if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
// Object ArrayGet with Baker's read barrier case.
// Note that a potential implicit null check is handled in the
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
@@ -3905,7 +3905,7 @@
// Temp is used for read barrier.
static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (kEmitCompilerReadBarrier &&
+ if (gUseReadBarrier &&
(kUseBakerReadBarrier ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -5320,7 +5320,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -5334,7 +5334,7 @@
}
locations->SetOut(Location::RequiresRegister());
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -5361,7 +5361,7 @@
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
- : kCompilerReadBarrierOption;
+ : gCompilerReadBarrierOption;
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5530,7 +5530,7 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -5584,7 +5584,7 @@
temp,
/* offset placeholder */ 0u,
ldr_label,
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
@@ -5608,7 +5608,7 @@
out.X(),
/* offset= */ 0,
/* fixup_label= */ nullptr,
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
return;
}
default:
@@ -6469,7 +6469,7 @@
DataType::Type type = DataType::Type::kReference;
Register out_reg = RegisterFrom(out, type);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -6510,7 +6510,7 @@
Register out_reg = RegisterFrom(out, type);
Register obj_reg = RegisterFrom(obj, type);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -6545,7 +6545,7 @@
DCHECK(fixup_label == nullptr || offset == 0u);
Register root_reg = RegisterFrom(root, DataType::Type::kReference);
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
@@ -6611,7 +6611,7 @@
void CodeGeneratorARM64::GenerateIntrinsicCasMoveWithBakerReadBarrier(
vixl::aarch64::Register marked_old_value,
vixl::aarch64::Register old_value) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
@@ -6633,7 +6633,7 @@
const vixl::aarch64::MemOperand& src,
bool needs_null_check,
bool use_load_acquire) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
@@ -6729,7 +6729,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
static_assert(
@@ -6807,7 +6807,7 @@
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) {
+ if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) {
// The following condition is a run-time one; it is executed after the
// previous compile-time test, to avoid penalizing non-debug builds.
if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) {
@@ -6836,7 +6836,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the reference load.
//
@@ -6861,7 +6861,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -6876,7 +6876,7 @@
void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f4d652c..c1984e3 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -92,7 +92,11 @@
vixl::aarch64::CPURegList(
tr,
// Reserve X20 as Marking Register when emitting Baker read barriers.
- ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg),
+ // TODO: We don't need to reserve marking-register for userfaultfd GC. But
+ // that would require some work in the assembler code as the right GC is
+ // chosen at load-time and not compile time.
+ ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier
+ ? mr : vixl::aarch64::NoCPUReg),
kImplicitSuspendCheckRegister,
vixl::aarch64::lr);
@@ -111,7 +115,7 @@
const vixl::aarch64::CPURegList callee_saved_core_registers(
vixl::aarch64::CPURegister::kRegister,
vixl::aarch64::kXRegSize,
- ((kEmitCompilerReadBarrier && kUseBakerReadBarrier)
+ ((gUseReadBarrier && kUseBakerReadBarrier)
? vixl::aarch64::x21.GetCode()
: vixl::aarch64::x20.GetCode()),
vixl::aarch64::x30.GetCode());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 00a6c83..bf8e896 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -744,7 +744,7 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -922,7 +922,7 @@
public:
ReadBarrierForRootSlowPathARMVIXL(HInstruction* instruction, Location out, Location root)
: SlowPathCodeARMVIXL(instruction), out_(out), root_(root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -2101,7 +2101,10 @@
blocked_core_registers_[LR] = true;
blocked_core_registers_[PC] = true;
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // TODO: We don't need to reserve marking-register for userfaultfd GC. But
+ // that would require some work in the assembler code as the right GC is
+ // chosen at load-time and not compile time.
+ if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) {
// Reserve marking register.
blocked_core_registers_[MR] = true;
}
@@ -5911,7 +5914,7 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- kEmitCompilerReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
+ gUseReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
@@ -6082,7 +6085,7 @@
case DataType::Type::kReference: {
// /* HeapReference<Object> */ out = *(base + offset)
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location();
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
@@ -6386,7 +6389,7 @@
void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -6534,14 +6537,14 @@
// The read barrier instrumentation of object ArrayGet
// instructions does not support the HIntermediateAddress
// instruction.
- DCHECK(!(has_intermediate_address && kEmitCompilerReadBarrier));
+ DCHECK(!(has_intermediate_address && gUseReadBarrier));
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier call.
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
@@ -7459,7 +7462,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7473,7 +7476,7 @@
}
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -7501,7 +7504,7 @@
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
- : kCompilerReadBarrierOption;
+ : gCompilerReadBarrierOption;
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -7721,7 +7724,7 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -7760,7 +7763,7 @@
codegen_->EmitMovwMovtPlaceholder(labels, out);
// All aligned loads are implicitly atomic consume operations on ARM.
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
@@ -7781,7 +7784,7 @@
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /*offset=*/ 0, kCompilerReadBarrierOption);
+ load, out_loc, out, /*offset=*/ 0, gCompilerReadBarrierOption);
return;
}
default:
@@ -7838,7 +7841,7 @@
// Temp is used for read barrier.
static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (kEmitCompilerReadBarrier &&
+ if (gUseReadBarrier &&
(kUseBakerReadBarrier ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -8773,7 +8776,7 @@
ReadBarrierOption read_barrier_option) {
vixl32::Register out_reg = RegisterFrom(out);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -8808,7 +8811,7 @@
vixl32::Register out_reg = RegisterFrom(out);
vixl32::Register obj_reg = RegisterFrom(obj);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
// Load with fast path based Baker's read barrier.
@@ -8837,7 +8840,7 @@
ReadBarrierOption read_barrier_option) {
vixl32::Register root_reg = RegisterFrom(root);
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
@@ -8901,7 +8904,7 @@
void CodeGeneratorARMVIXL::GenerateIntrinsicCasMoveWithBakerReadBarrier(
vixl::aarch32::Register marked_old_value,
vixl::aarch32::Register old_value) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
@@ -8935,7 +8938,7 @@
vixl32::Register obj,
const vixl32::MemOperand& src,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
@@ -9028,7 +9031,7 @@
Location index,
Location temp,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
static_assert(
@@ -9094,7 +9097,7 @@
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) {
+ if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) {
// The following condition is a run-time one; it is executed after the
// previous compile-time test, to avoid penalizing non-debug builds.
if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) {
@@ -9124,7 +9127,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the reference load.
//
@@ -9150,7 +9153,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -9165,7 +9168,7 @@
void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 790ad0f..62a4d36 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -84,7 +84,7 @@
vixl::aarch32::r6,
vixl::aarch32::r7),
// Do not consider r8 as a callee-save register with Baker read barriers.
- ((kEmitCompilerReadBarrier && kUseBakerReadBarrier)
+ ((gUseReadBarrier && kUseBakerReadBarrier)
? vixl::aarch32::RegisterList()
: vixl::aarch32::RegisterList(vixl::aarch32::r8)),
vixl::aarch32::RegisterList(vixl::aarch32::r10,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8c4a11c..f4529be 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -503,7 +503,7 @@
: SlowPathCode(instruction),
ref_(ref),
unpoison_ref_before_marking_(unpoison_ref_before_marking) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
@@ -590,7 +590,7 @@
field_addr_(field_addr),
unpoison_ref_before_marking_(unpoison_ref_before_marking),
temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
@@ -744,7 +744,7 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -918,7 +918,7 @@
public:
ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root)
: SlowPathCode(instruction), out_(out), root_(root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -1619,7 +1619,7 @@
__ movsd(dst.AsFpuRegister<XmmRegister>(), src);
break;
case DataType::Type::kReference:
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!gUseReadBarrier);
__ movl(dst.AsRegister<Register>(), src);
__ MaybeUnpoisonHeapReference(dst.AsRegister<Register>());
break;
@@ -5731,11 +5731,11 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
- kEmitCompilerReadBarrier
+ gUseReadBarrier
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
@@ -5793,7 +5793,7 @@
if (load_type == DataType::Type::kReference) {
// /* HeapReference<Object> */ out = *(base + offset)
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -6202,7 +6202,7 @@
void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -6244,7 +6244,7 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -7057,7 +7057,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7071,7 +7071,7 @@
}
locations->SetOut(Location::RequiresRegister());
if (call_kind == LocationSummary::kCallOnSlowPath && cls->HasPcRelativeLoadKind()) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution and/or initialization to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -7109,7 +7109,7 @@
bool generate_null_check = false;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
- : kCompilerReadBarrierOption;
+ : gCompilerReadBarrierOption;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7296,7 +7296,7 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -7345,7 +7345,7 @@
Address address = Address(method_address, CodeGeneratorX86::kPlaceholder32BitOffset);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption);
// No need for memory fence, thanks to the x86 memory model.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
@@ -7365,7 +7365,7 @@
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption);
return;
}
default:
@@ -7416,7 +7416,7 @@
// Temp is used for read barrier.
static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (kEmitCompilerReadBarrier &&
+ if (gUseReadBarrier &&
!kUseBakerReadBarrier &&
(type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -8188,7 +8188,7 @@
ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -8222,7 +8222,7 @@
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -8250,7 +8250,7 @@
ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
@@ -8314,7 +8314,7 @@
Register obj,
uint32_t offset,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// /* HeapReference<Object> */ ref = *(obj + offset)
@@ -8328,7 +8328,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
static_assert(
@@ -8347,7 +8347,7 @@
bool needs_null_check,
bool always_update_field,
Register* temp) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// In slow path based read barriers, the read barrier call is
@@ -8428,7 +8428,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the reference load.
//
@@ -8455,7 +8455,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -8470,7 +8470,7 @@
void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5987410..d31a630 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -510,7 +510,7 @@
: SlowPathCode(instruction),
ref_(ref),
unpoison_ref_before_marking_(unpoison_ref_before_marking) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
@@ -601,7 +601,7 @@
unpoison_ref_before_marking_(unpoison_ref_before_marking),
temp1_(temp1),
temp2_(temp2) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override {
@@ -761,7 +761,7 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial
// object has been overwritten by (or after) the heap object
// reference load to be instrumented, e.g.:
@@ -937,7 +937,7 @@
public:
ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root)
: SlowPathCode(instruction), out_(out), root_(root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
@@ -5013,7 +5013,7 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
@@ -5064,7 +5064,7 @@
if (load_type == DataType::Type::kReference) {
// /* HeapReference<Object> */ out = *(base + offset)
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -5513,7 +5513,7 @@
void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- kEmitCompilerReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -5551,7 +5551,7 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -6352,7 +6352,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -6366,7 +6366,7 @@
}
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution and/or initialization to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -6403,7 +6403,7 @@
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
- : kCompilerReadBarrierOption;
+ : gCompilerReadBarrierOption;
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -6550,7 +6550,7 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
+ if (!gUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
} else {
@@ -6598,7 +6598,7 @@
/* no_rip= */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption);
// No need for memory fence, thanks to the x86-64 memory model.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
@@ -6619,7 +6619,7 @@
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+ GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, gCompilerReadBarrierOption);
return;
}
default:
@@ -6672,7 +6672,7 @@
// Temp is used for read barrier.
static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (kEmitCompilerReadBarrier &&
+ if (gUseReadBarrier &&
!kUseBakerReadBarrier &&
(type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -7426,7 +7426,7 @@
ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -7460,7 +7460,7 @@
CpuRegister out_reg = out.AsRegister<CpuRegister>();
CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(kEmitCompilerReadBarrier);
+ CHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7488,7 +7488,7 @@
ReadBarrierOption read_barrier_option) {
CpuRegister root_reg = root.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
@@ -7552,7 +7552,7 @@
CpuRegister obj,
uint32_t offset,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// /* HeapReference<Object> */ ref = *(obj + offset)
@@ -7566,7 +7566,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
static_assert(
@@ -7586,7 +7586,7 @@
bool always_update_field,
CpuRegister* temp1,
CpuRegister* temp2) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
// In slow path based read barriers, the read barrier call is
@@ -7668,7 +7668,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the reference load.
//
@@ -7695,7 +7695,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -7710,7 +7710,7 @@
void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index dc60ba6..fb8b01b 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -244,7 +244,7 @@
// The access may require a runtime call or the original array pointer.
return false;
}
- if (kEmitCompilerReadBarrier &&
+ if (gUseReadBarrier &&
!kUseBakerReadBarrier &&
access->IsArrayGet() &&
access->GetType() == DataType::Type::kReference) {
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index f2d2b45..0feb92d 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -392,7 +392,7 @@
}
void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke) {
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
// Unimplemented for non-Baker read barrier.
return;
}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 646f4f2..0ce082b 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -92,7 +92,7 @@
public:
ReadBarrierSystemArrayCopySlowPathARM64(HInstruction* instruction, Location tmp)
: SlowPathCodeARM64(instruction), tmp_(tmp) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
}
@@ -711,7 +711,7 @@
Location trg_loc = locations->Out();
Register trg = RegisterFrom(trg_loc, type);
- if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
// UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case.
Register temp = WRegisterFrom(locations->GetTemp(0));
MacroAssembler* masm = codegen->GetVIXLAssembler();
@@ -754,7 +754,7 @@
}
static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -1096,7 +1096,7 @@
}
static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke);
+ const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -1448,7 +1448,7 @@
vixl::aarch64::Label* exit_loop = &exit_loop_label;
vixl::aarch64::Label* cmp_failure = &exit_loop_label;
- if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) {
+ if (gUseReadBarrier && type == DataType::Type::kReference) {
// We need to store the `old_value` in a non-scratch register to make sure
// the read barrier in the slow path does not clobber it.
old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path.
@@ -1523,12 +1523,12 @@
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
CreateUnsafeCASLocations(allocator_, invoke);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// We need two non-scratch temporary registers for read barrier.
LocationSummary* locations = invoke->GetLocations();
if (kUseBakerReadBarrier) {
@@ -1578,7 +1578,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -2814,7 +2814,7 @@
void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2866,7 +2866,7 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Temporary register IP0, obtained from the VIXL scratch register
// pool, cannot be used in ReadBarrierSystemArrayCopySlowPathARM64
// (because that register is clobbered by ReadBarrierMarkRegX
@@ -2884,7 +2884,7 @@
void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2991,7 +2991,7 @@
UseScratchRegisterScope temps(masm);
Location temp3_loc; // Used only for Baker read barrier.
Register temp3;
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
temp3_loc = locations->GetTemp(2);
temp3 = WRegisterFrom(temp3_loc);
} else {
@@ -3004,7 +3004,7 @@
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -3165,7 +3165,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
temp1_loc,
@@ -3215,7 +3215,7 @@
__ Cbz(WRegisterFrom(length), &done);
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// TODO: Also convert this intrinsic to the IsGcMarking strategy?
// SystemArrayCopy implementation for Baker read barriers (see
@@ -3451,7 +3451,7 @@
void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) {
IntrinsicVisitor::CreateReferenceGetReferentLocations(invoke, codegen_);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) {
+ if (gUseReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) {
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
}
@@ -3466,7 +3466,7 @@
SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Check self->GetWeakRefAccessEnabled().
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
@@ -3493,7 +3493,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
WRegisterFrom(obj),
@@ -3533,7 +3533,7 @@
__ Cmp(tmp, other);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
DCHECK(kUseBakerReadBarrier);
vixl::aarch64::Label calculate_result;
@@ -4629,7 +4629,7 @@
method.X(),
ArtField::DeclaringClassOffset().Int32Value(),
/*fixup_label=*/ nullptr,
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
}
}
} else {
@@ -4683,7 +4683,7 @@
}
// Add a temporary for offset.
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
// To preserve the offset value across the non-Baker read barrier slow path
// for loading the declaring class, use a fixed callee-save register.
@@ -4706,7 +4706,7 @@
return;
}
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
invoke->GetType() == DataType::Type::kReference &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
@@ -4746,7 +4746,7 @@
DCHECK(use_load_acquire || order == std::memory_order_relaxed);
// Load the value from the target location.
- if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
// Piggy-back on the field load path using introspection for the Baker read barrier.
// The `target.offset` is a temporary, use it for field address.
Register tmp_ptr = target.offset.X();
@@ -4947,7 +4947,7 @@
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u);
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
value_type == DataType::Type::kReference) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field. This breaks the read barriers
@@ -4961,7 +4961,7 @@
LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
// We need callee-save registers for both the class object and offset instead of
// the temporaries reserved in CreateVarHandleCommonLocations().
static_assert(POPCOUNT(kArm64CalleeSaveRefSpills) >= 2u);
@@ -5002,7 +5002,7 @@
locations->AddTemp(Location::RequiresRegister());
}
}
- if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ if (gUseReadBarrier && value_type == DataType::Type::kReference) {
// Add a temporary for the `old_value_temp` in slow path.
locations->AddTemp(Location::RequiresRegister());
}
@@ -5068,7 +5068,7 @@
// except for references that need the offset for the read barrier.
UseScratchRegisterScope temps(masm);
Register tmp_ptr = target.offset.X();
- if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ if (gUseReadBarrier && value_type == DataType::Type::kReference) {
tmp_ptr = temps.AcquireX();
}
__ Add(tmp_ptr, target.object.X(), target.offset.X());
@@ -5151,7 +5151,7 @@
vixl::aarch64::Label* exit_loop = &exit_loop_label;
vixl::aarch64::Label* cmp_failure = &exit_loop_label;
- if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ if (gUseReadBarrier && value_type == DataType::Type::kReference) {
// The `old_value_temp` is used first for the marked `old_value` and then for the unmarked
// reloaded old value for subsequent CAS in the slow path. It cannot be a scratch register.
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
@@ -5296,7 +5296,7 @@
return;
}
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
invoke->GetType() == DataType::Type::kReference) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field, thus seeing the new value
@@ -5372,7 +5372,7 @@
// except for references that need the offset for the non-Baker read barrier.
UseScratchRegisterScope temps(masm);
Register tmp_ptr = target.offset.X();
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
value_type == DataType::Type::kReference) {
tmp_ptr = temps.AcquireX();
}
@@ -5402,7 +5402,7 @@
// the new value unless it is zero bit pattern (+0.0f or +0.0) and need another one
// in GenerateGetAndUpdate(). We have allocated a normal temporary to handle that.
old_value = CPURegisterFrom(locations->GetTemp(1u), load_store_type);
- } else if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier) &&
+ } else if ((gUseReadBarrier && kUseBakerReadBarrier) &&
value_type == DataType::Type::kReference) {
// Load the old value initially to a scratch register.
// We shall move it to `out` later with a read barrier.
@@ -5450,7 +5450,7 @@
__ Sxtb(out.W(), old_value.W());
} else if (value_type == DataType::Type::kInt16) {
__ Sxth(out.W(), old_value.W());
- } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
if (kUseBakerReadBarrier) {
codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(out.W(), old_value.W());
} else {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index d850cad..da47fa6 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -120,7 +120,7 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction)
: SlowPathCodeARMVIXL(instruction) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
}
@@ -1242,7 +1242,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -1265,7 +1265,7 @@
if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
locations->SetInAt(4, Location::RequiresRegister());
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Temporary register IP cannot be used in
// ReadBarrierSystemArrayCopySlowPathARM (because that register
// is clobbered by ReadBarrierMarkRegX entry points). Get an extra
@@ -1339,7 +1339,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
ArmVIXLAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1453,7 +1453,7 @@
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -1584,7 +1584,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
@@ -1621,7 +1621,7 @@
__ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// TODO: Also convert this intrinsic to the IsGcMarking strategy?
// SystemArrayCopy implementation for Baker read barriers (see
@@ -2511,7 +2511,7 @@
SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Check self->GetWeakRefAccessEnabled().
UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
@@ -2539,7 +2539,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
RegisterFrom(obj),
@@ -2587,7 +2587,7 @@
assembler->MaybeUnpoisonHeapReference(tmp);
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); // `referent` is volatile.
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
DCHECK(kUseBakerReadBarrier);
vixl32::Label calculate_result;
@@ -2613,7 +2613,7 @@
__ Bind(&calculate_result);
} else {
- DCHECK(!kEmitCompilerReadBarrier);
+ DCHECK(!gUseReadBarrier);
__ Sub(out, tmp, other);
}
@@ -2732,7 +2732,7 @@
}
break;
case DataType::Type::kReference:
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Piggy-back on the field load path using introspection for the Baker read barrier.
vixl32::Register temp = RegisterFrom(maybe_temp);
__ Add(temp, base, offset);
@@ -2777,7 +2777,7 @@
codegen->GenerateMemoryBarrier(
seq_cst_barrier ? MemBarrierKind::kAnyAny : MemBarrierKind::kLoadAny);
}
- if (type == DataType::Type::kReference && !(kEmitCompilerReadBarrier && kUseBakerReadBarrier)) {
+ if (type == DataType::Type::kReference && !(gUseReadBarrier && kUseBakerReadBarrier)) {
Location base_loc = LocationFrom(base);
Location index_loc = LocationFrom(offset);
codegen->MaybeGenerateReadBarrierSlow(invoke, out, out, base_loc, /* offset=*/ 0u, index_loc);
@@ -2802,7 +2802,7 @@
CodeGeneratorARMVIXL* codegen,
DataType::Type type,
bool atomic) {
- bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
@@ -2818,7 +2818,7 @@
locations->SetInAt(2, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(),
(can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
- if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
+ if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
(type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) {
// We need a temporary register for the read barrier marking slow
// path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier,
@@ -2837,7 +2837,7 @@
vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Long offset, lo part only.
Location out = locations->Out();
Location maybe_temp = Location::NoLocation();
- if ((kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
+ if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
(type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) {
maybe_temp = locations->GetTemp(0);
}
@@ -3470,7 +3470,7 @@
// branch goes to the read barrier slow path that clobbers `success` anyway.
bool init_failure_for_cmp =
success.IsValid() &&
- !(kEmitCompilerReadBarrier && type == DataType::Type::kReference && expected.IsRegister());
+ !(gUseReadBarrier && type == DataType::Type::kReference && expected.IsRegister());
// Instruction scheduling: Loading a constant between LDREX* and using the loaded value
// is essentially free, so prepare the failure value here if we can.
bool init_failure_for_cmp_early =
@@ -3655,7 +3655,7 @@
};
static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- const bool can_call = kEmitCompilerReadBarrier && IsUnsafeCASObject(invoke);
+ const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -3706,7 +3706,7 @@
vixl32::Label* exit_loop = &exit_loop_label;
vixl32::Label* cmp_failure = &exit_loop_label;
- if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) {
+ if (gUseReadBarrier && type == DataType::Type::kReference) {
// If marking, check if the stored reference is a from-space reference to the same
// object as the to-space reference `expected`. If so, perform a custom CAS loop.
ReadBarrierCasSlowPathARMVIXL* slow_path =
@@ -3770,7 +3770,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers (b/173104084).
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -3798,7 +3798,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers (b/173104084).
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -4351,7 +4351,7 @@
LocationFrom(target.object),
method,
ArtField::DeclaringClassOffset().Int32Value(),
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
}
}
} else {
@@ -4403,7 +4403,7 @@
}
// Add a temporary for offset.
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
// To preserve the offset value across the non-Baker read barrier slow path
// for loading the declaring class, use a fixed callee-save register.
@@ -4428,7 +4428,7 @@
return;
}
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
invoke->GetType() == DataType::Type::kReference &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
@@ -4476,7 +4476,7 @@
Location maybe_temp = Location::NoLocation();
Location maybe_temp2 = Location::NoLocation();
Location maybe_temp3 = Location::NoLocation();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) {
+ if (gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) {
// Reuse the offset temporary.
maybe_temp = LocationFrom(target.offset);
} else if (DataType::Is64BitType(type) && Use64BitExclusiveLoadStore(atomic, codegen)) {
@@ -4749,7 +4749,7 @@
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u);
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
value_type == DataType::Type::kReference) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field. This breaks the read barriers
@@ -4763,7 +4763,7 @@
LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
// We need callee-save registers for both the class object and offset instead of
// the temporaries reserved in CreateVarHandleCommonLocations().
static_assert(POPCOUNT(kArmCalleeSaveRefSpills) >= 2u);
@@ -4799,7 +4799,7 @@
locations->AddRegisterTemps(2u);
}
}
- if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ if (gUseReadBarrier && value_type == DataType::Type::kReference) {
// Add a temporary for store result, also used for the `old_value_temp` in slow path.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4930,7 +4930,7 @@
vixl32::Label* exit_loop = &exit_loop_label;
vixl32::Label* cmp_failure = &exit_loop_label;
- if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ if (gUseReadBarrier && value_type == DataType::Type::kReference) {
// The `old_value_temp` is used first for the marked `old_value` and then for the unmarked
// reloaded old value for subsequent CAS in the slow path. This must not clobber `old_value`.
vixl32::Register old_value_temp = return_success ? RegisterFrom(out) : store_result;
@@ -5086,7 +5086,7 @@
return;
}
- if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
invoke->GetType() == DataType::Type::kReference) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field, thus seeing the new value
@@ -5107,7 +5107,7 @@
// Add temps needed to do the GenerateGetAndUpdate() with core registers.
size_t temps_needed = (value_type == DataType::Type::kFloat64) ? 5u : 3u;
locations->AddRegisterTemps(temps_needed - locations->GetTempCount());
- } else if ((kEmitCompilerReadBarrier && !kUseBakerReadBarrier) &&
+ } else if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
value_type == DataType::Type::kReference) {
// We need to preserve the declaring class (if present) and offset for read barrier
// slow paths, so we must use a separate temporary for the exclusive store result.
@@ -5213,7 +5213,7 @@
if (byte_swap) {
GenerateReverseBytes(assembler, DataType::Type::kInt32, arg, arg);
}
- } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
if (kUseBakerReadBarrier) {
// Load the old value initially to a temporary register.
// We shall move it to `out` later with a read barrier.
@@ -5296,7 +5296,7 @@
} else {
__ Vmov(SRegisterFrom(out), RegisterFrom(old_value));
}
- } else if (kEmitCompilerReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
if (kUseBakerReadBarrier) {
codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(RegisterFrom(out),
RegisterFrom(old_value));
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 7d90aae..0f6eb86 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -75,7 +75,7 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathX86(HInstruction* instruction)
: SlowPathCode(instruction) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
}
@@ -1699,7 +1699,7 @@
case DataType::Type::kReference: {
Register output = output_loc.AsRegister<Register>();
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -1757,7 +1757,7 @@
HInvoke* invoke,
DataType::Type type,
bool is_volatile) {
- bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -2103,7 +2103,7 @@
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
- const bool can_call = kEmitCompilerReadBarrier &&
+ const bool can_call = gUseReadBarrier &&
kUseBakerReadBarrier &&
IsUnsafeCASObject(invoke);
LocationSummary* locations =
@@ -2175,7 +2175,7 @@
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2304,7 +2304,7 @@
DCHECK_EQ(expected, EAX);
DCHECK_NE(temp, temp2);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -2391,7 +2391,7 @@
if (type == DataType::Type::kReference) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register temp2 = locations->GetTemp(1).AsRegister<Register>();
@@ -2413,7 +2413,7 @@
void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2443,7 +2443,7 @@
void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2843,7 +2843,7 @@
void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2875,7 +2875,7 @@
void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2995,7 +2995,7 @@
// slow path.
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -3022,7 +3022,7 @@
__ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
if (length.Equals(Location::RegisterLocation(temp3))) {
// When Baker read barriers are enabled, register `temp3`,
// which in the present case contains the `length` parameter,
@@ -3120,7 +3120,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -3151,7 +3151,7 @@
// Compute the base source address in `temp1`.
GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// If it is needed (in the case of the fast-path loop), the base
// destination address is computed later, as `temp2` is used for
// intermediate computations.
@@ -3377,7 +3377,7 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Check self->GetWeakRefAccessEnabled().
ThreadOffset32 offset = Thread::WeakRefAccessEnabledOffset<kX86PointerSize>();
__ fs()->cmpl(Address::Absolute(offset),
@@ -3400,7 +3400,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
obj.AsRegister<Register>(),
@@ -3442,7 +3442,7 @@
NearLabel end, return_true, return_false;
__ cmpl(out, other);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
DCHECK(kUseBakerReadBarrier);
__ j(kEqual, &return_true);
@@ -3781,7 +3781,7 @@
Location::RegisterLocation(temp),
Address(temp, declaring_class_offset),
/* fixup_label= */ nullptr,
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
return temp;
}
@@ -3794,7 +3794,7 @@
static void CreateVarHandleGetLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -3836,7 +3836,7 @@
static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3860,7 +3860,7 @@
Address field_addr(ref, offset, TIMES_1, 0);
// Load the value from the field
- if (type == DataType::Type::kReference && kCompilerReadBarrierOption == kWithReadBarrier) {
+ if (type == DataType::Type::kReference && gCompilerReadBarrierOption == kWithReadBarrier) {
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke, out, ref, field_addr, /* needs_null_check= */ false);
} else if (type == DataType::Type::kInt64 &&
@@ -3917,7 +3917,7 @@
static void CreateVarHandleSetLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -3990,7 +3990,7 @@
static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4087,7 +4087,7 @@
static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -4135,7 +4135,7 @@
static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4194,7 +4194,7 @@
__ movd(locations->Out().AsFpuRegister<XmmRegister>(), EAX);
break;
case DataType::Type::kReference: {
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -4258,7 +4258,7 @@
static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -4322,7 +4322,7 @@
static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4441,7 +4441,7 @@
static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -4490,7 +4490,7 @@
static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4591,7 +4591,7 @@
static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -4659,7 +4659,7 @@
static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 3c31374..9921d90 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -71,7 +71,7 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathX86_64(HInstruction* instruction)
: SlowPathCode(instruction) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
}
@@ -836,7 +836,7 @@
void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -887,7 +887,7 @@
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1002,7 +1002,7 @@
// slow path.
bool did_unpoison = false;
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
@@ -1034,7 +1034,7 @@
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
@@ -1055,7 +1055,7 @@
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// Bail out if the source is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// For the same reason given earlier, `temp1` is not trashed by the
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ TMP = temp2->component_type_
@@ -1081,7 +1081,7 @@
if (optimizations.GetDestinationIsTypedObjectArray()) {
NearLabel do_copy;
__ j(kEqual, &do_copy);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
@@ -1109,7 +1109,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -1141,7 +1141,7 @@
GenSystemArrayCopyAddresses(
GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// SystemArrayCopy implementation for Baker read barriers (see
// also CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier):
//
@@ -1888,7 +1888,7 @@
break;
case DataType::Type::kReference: {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -1930,7 +1930,7 @@
}
static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- bool can_call = kEmitCompilerReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -2230,7 +2230,7 @@
static void CreateUnsafeCASLocations(ArenaAllocator* allocator,
DataType::Type type,
HInvoke* invoke) {
- const bool can_call = kEmitCompilerReadBarrier &&
+ const bool can_call = gUseReadBarrier &&
kUseBakerReadBarrier &&
IsUnsafeCASObject(invoke);
LocationSummary* locations =
@@ -2253,7 +2253,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
locations->AddTemp(Location::RequiresRegister());
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -2298,7 +2298,7 @@
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return;
}
@@ -2438,7 +2438,7 @@
CpuRegister temp3,
bool is_cmpxchg) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
@@ -2447,7 +2447,7 @@
codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
Address field_addr(base, offset, TIMES_1, 0);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -2556,7 +2556,7 @@
CpuRegister new_value_reg = new_value.AsRegister<CpuRegister>();
CpuRegister temp1 = locations->GetTemp(temp1_index).AsRegister<CpuRegister>();
CpuRegister temp2 = locations->GetTemp(temp2_index).AsRegister<CpuRegister>();
- CpuRegister temp3 = kEmitCompilerReadBarrier
+ CpuRegister temp3 = gUseReadBarrier
? locations->GetTemp(temp3_index).AsRegister<CpuRegister>()
: CpuRegister(kNoRegister);
DCHECK(RegsAreAllDifferent({base, offset, temp1, temp2, temp3}));
@@ -2624,7 +2624,7 @@
void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -3128,7 +3128,7 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Check self->GetWeakRefAccessEnabled().
ThreadOffset64 offset = Thread::WeakRefAccessEnabledOffset<kX86_64PointerSize>();
__ gs()->cmpl(Address::Absolute(offset, /* no_rip= */ true),
@@ -3150,7 +3150,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
obj.AsRegister<CpuRegister>(),
@@ -3191,7 +3191,7 @@
__ cmpl(out, other);
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
DCHECK(kUseBakerReadBarrier);
NearLabel calculate_result;
@@ -3771,7 +3771,7 @@
Location::RegisterLocation(target.object),
Address(method, ArtField::DeclaringClassOffset()),
/*fixup_label=*/ nullptr,
- kCompilerReadBarrierOption);
+ gCompilerReadBarrierOption);
}
}
} else {
@@ -3790,7 +3790,7 @@
static bool HasVarHandleIntrinsicImplementation(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
return false;
}
@@ -3876,7 +3876,7 @@
Location out = locations->Out();
if (type == DataType::Type::kReference) {
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
DCHECK(kUseBakerReadBarrier);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke, out, CpuRegister(target.object), src, /* needs_null_check= */ false);
@@ -4070,7 +4070,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -4085,7 +4085,7 @@
CodeGeneratorX86_64* codegen,
bool is_cmpxchg,
bool byte_swap = false) {
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4218,7 +4218,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (kEmitCompilerReadBarrier) {
+ if (gUseReadBarrier) {
// Need a third temporary for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -4267,7 +4267,7 @@
CpuRegister temp2 = locations->GetTemp(temp_count - 2).AsRegister<CpuRegister>();
CpuRegister valreg = value.AsRegister<CpuRegister>();
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke,
locations->GetTemp(temp_count - 3),
@@ -4647,7 +4647,7 @@
bool need_any_store_barrier,
bool need_any_any_barrier,
bool byte_swap = false) {
- DCHECK_IMPLIES(kEmitCompilerReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 965e1bd..25dd104 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -669,7 +669,7 @@
}
case DataType::Type::kReference: {
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
last_visited_latency_ = kArmLoadWithBakerReadBarrierLatency;
} else {
if (index->IsConstant()) {
@@ -937,7 +937,7 @@
break;
case DataType::Type::kReference:
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency;
last_visited_latency_ = kArmMemoryLoadLatency;
} else {
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 77f5d70..0271db9 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -82,7 +82,7 @@
void ArmVIXLAssembler::GenerateMarkingRegisterCheck(vixl32::Register temp, int code) {
// The Marking Register is only used in the Baker read barrier configuration.
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
vixl32::Label mr_is_ok;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index a4fddbc..035e13d 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -155,7 +155,7 @@
// Pop LR to PC unless we need to emit some read barrier code just before returning.
bool emit_code_before_return =
- (kEmitCompilerReadBarrier && kUseBakerReadBarrier) &&
+ (gUseReadBarrier && kUseBakerReadBarrier) &&
(may_suspend || (kIsDebugBuild && emit_run_time_checks_in_debug_mode_));
if ((core_spill_mask & (1u << lr.GetCode())) != 0u && !emit_code_before_return) {
DCHECK_EQ(core_spill_mask & (1u << pc.GetCode()), 0u);
@@ -215,7 +215,9 @@
}
}
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Emit marking register refresh even with uffd-GC as we are still using the
+ // register due to nterp's dependency.
+ if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) {
if (may_suspend) {
// The method may be suspended; refresh the Marking Register.
___ Ldr(mr, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
@@ -1172,7 +1174,7 @@
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
vixl32::Register test_reg;
DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// TestGcMarking() is used in the JNI stub entry when the marking register is up to date.
if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) {
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 6100ed9..df05838 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -188,7 +188,7 @@
void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
// The Marking Register is only used in the Baker read barrier configuration.
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(kUseBakerReadBarrier);
vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 0f6e5eb..4043a33 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -992,7 +992,7 @@
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register test_reg;
DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// TestGcMarking() is used in the JNI stub entry when the marking register is up to date.
if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) {
@@ -1118,7 +1118,9 @@
asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Emit marking register refresh even with uffd-GC as we are still using the
+ // register due to nterp's dependency.
+ if ((gUseReadBarrier || gUseUserfaultfd) && kUseBakerReadBarrier) {
vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 27bae65..a25fd1d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -962,7 +962,7 @@
compiler_options_->GetNativeDebuggable());
key_value_store_->Put(OatHeader::kCompilerFilter,
CompilerFilter::NameOfFilter(compiler_options_->GetCompilerFilter()));
- key_value_store_->Put(OatHeader::kConcurrentCopying, kUseReadBarrier);
+ key_value_store_->Put(OatHeader::kConcurrentCopying, gUseReadBarrier);
if (invocation_file_.get() != -1) {
std::ostringstream oss;
for (int i = 0; i < argc; ++i) {
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.cc b/dex2oat/linker/arm64/relative_patcher_arm64.cc
index 4028f75..5794040 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.cc
@@ -251,7 +251,7 @@
} else {
if ((insn & 0xfffffc00) == 0x91000000) {
// ADD immediate, 64-bit with imm12 == 0 (unset).
- if (!kEmitCompilerReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK(patch.GetType() == LinkerPatch::Type::kIntrinsicReference ||
patch.GetType() == LinkerPatch::Type::kMethodRelative ||
patch.GetType() == LinkerPatch::Type::kTypeRelative ||
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index fc19cd1..e0c37d8 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -50,7 +50,6 @@
#if defined(__linux__)
#include <linux/unistd.h>
#include <sys/syscall.h>
-#include <sys/utsname.h>
#endif
#if defined(_WIN32)
@@ -158,6 +157,17 @@
#endif
+#if defined(__linux__)
+bool IsKernelVersionAtLeast(int reqd_major, int reqd_minor) {
+ struct utsname uts;
+ int major, minor;
+ CHECK_EQ(uname(&uts), 0);
+ CHECK_EQ(strcmp(uts.sysname, "Linux"), 0);
+ CHECK_EQ(sscanf(uts.release, "%d.%d:", &major, &minor), 2);
+ return major > reqd_major || (major == reqd_major && minor >= reqd_minor);
+}
+#endif
+
bool CacheOperationsMaySegFault() {
#if defined(__linux__) && defined(__aarch64__)
// Avoid issue on older ARM64 kernels where data cache operations could be classified as writes
@@ -167,18 +177,10 @@
//
// This behaviour means we should avoid the dual view JIT on the device. This is just
// an issue when running tests on devices that have an old kernel.
- static constexpr int kRequiredMajor = 3;
- static constexpr int kRequiredMinor = 12;
- struct utsname uts;
- int major, minor;
- if (uname(&uts) != 0 ||
- strcmp(uts.sysname, "Linux") != 0 ||
- sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
- (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
- return true;
- }
-#endif
+ return !IsKernelVersionAtLeast(3, 12);
+#else
return false;
+#endif
}
uint32_t GetTid() {
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 0e8231a..90eb2da 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -31,6 +31,10 @@
#include "globals.h"
#include "macros.h"
+#if defined(__linux__)
+#include <sys/utsname.h>
+#endif
+
namespace art {
static inline uint32_t PointerToLowMemUInt32(const void* p) {
@@ -125,6 +129,10 @@
// Flush CPU caches. Returns true on success, false if flush failed.
WARN_UNUSED bool FlushCpuCaches(void* begin, void* end);
+#if defined(__linux__)
+bool IsKernelVersionAtLeast(int reqd_major, int reqd_minor);
+#endif
+
// On some old kernels, a cache operation may segfault.
WARN_UNUSED bool CacheOperationsMaySegFault();
diff --git a/openjdkjvmti/jvmti_weak_table-inl.h b/openjdkjvmti/jvmti_weak_table-inl.h
index 5b28e45..17578d2 100644
--- a/openjdkjvmti/jvmti_weak_table-inl.h
+++ b/openjdkjvmti/jvmti_weak_table-inl.h
@@ -114,7 +114,7 @@
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ if (art::gUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
// Under concurrent GC, there is a window between moving objects and sweeping of system
// weaks in which mutators are active. We may receive a to-space object pointer in obj,
// but still have from-space pointers in the table. Explicitly update the table once.
@@ -156,7 +156,7 @@
return true;
}
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ if (art::gUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
// Under concurrent GC, there is a window between moving objects and sweeping of system
// weaks in which mutators are active. We may receive a to-space object pointer in obj,
// but still have from-space pointers in the table. Explicitly update the table once.
diff --git a/openjdkjvmti/jvmti_weak_table.h b/openjdkjvmti/jvmti_weak_table.h
index ea0d023..afa2d1d 100644
--- a/openjdkjvmti/jvmti_weak_table.h
+++ b/openjdkjvmti/jvmti_weak_table.h
@@ -152,7 +152,7 @@
// Performance optimization: To avoid multiple table updates, ensure that during GC we
// only update once. See the comment on the implementation of GetTagSlowPath.
- if (art::kUseReadBarrier &&
+ if (art::gUseReadBarrier &&
self != nullptr &&
self->GetIsGcMarking() &&
!update_since_last_sweep_) {
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index b0b0064..555babe 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -91,7 +91,7 @@
qpoints->SetReadBarrierMarkReg10(is_active ? art_quick_read_barrier_mark_reg10 : nullptr);
qpoints->SetReadBarrierMarkReg11(is_active ? art_quick_read_barrier_mark_reg11 : nullptr);
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// For the alignment check, strip the Thumb mode bit.
DCHECK_ALIGNED(reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection) - 1u,
256u);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8921577..ccf4ff2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2116,7 +2116,7 @@
const bool tracing_enabled = Trace::IsTracingEnabled();
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We do not track new roots for CC.
DCHECK_EQ(0, flags & (kVisitRootFlagNewRoots |
kVisitRootFlagClearRootLog |
@@ -2152,7 +2152,7 @@
root.VisitRoot(visitor, RootInfo(kRootVMInternal));
}
}
- } else if (!kUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) {
+ } else if (!gUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
ObjPtr<mirror::Class> old_ref = root.Read<kWithoutReadBarrier>();
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
@@ -2173,13 +2173,13 @@
}
}
}
- if (!kUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) {
+ if (!gUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) {
new_class_roots_.clear();
new_bss_roots_boot_oat_files_.clear();
}
- if (!kUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
+ if (!gUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
log_new_roots_ = true;
- } else if (!kUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
+ } else if (!gUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
log_new_roots_ = false;
}
// We deliberately ignore the class roots in the image since we
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 9fa9c5d..e136073 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -305,7 +305,7 @@
}
#define TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS() \
- if (!kEmitCompilerReadBarrier || !kUseBakerReadBarrier) { \
+ if (!gUseReadBarrier || !kUseBakerReadBarrier) { \
printf("WARNING: TEST DISABLED FOR GC WITHOUT BAKER READ BARRIER\n"); \
return; \
}
@@ -317,7 +317,7 @@
}
#define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING_WITHOUT_READ_BARRIERS() \
- if (kRunningOnMemoryTool && kPoisonHeapReferences && !kEmitCompilerReadBarrier) { \
+ if (kRunningOnMemoryTool && kPoisonHeapReferences && !gUseReadBarrier) { \
printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING WITHOUT READ BARRIERS\n"); \
return; \
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 0bc9138..03fd423 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -448,7 +448,7 @@
}
static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
uint32_t monitor_offset = mirror::Object::MonitorOffset().Uint32Value();
if (kUseBakerReadBarrier &&
(kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64)) {
@@ -483,7 +483,7 @@
}
case Instruction::IGET_OBJECT:
- if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
+ if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
return true;
}
FALLTHROUGH_INTENDED;
@@ -507,7 +507,7 @@
}
case Instruction::AGET_OBJECT:
- if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
+ if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
return true;
}
FALLTHROUGH_INTENDED;
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index e7b3623..81a9e21 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -435,7 +435,7 @@
}
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return ReadBarrier::Mark(obj);
}
@@ -443,14 +443,12 @@
mirror::Object* obj,
uint32_t offset) {
// Used only in connection with non-volatile loads.
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
mirror::HeapReference<mirror::Object>* ref_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
- constexpr ReadBarrierOption kReadBarrierOption =
- kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier>(
obj,
MemberOffset(offset),
ref_addr);
@@ -458,7 +456,7 @@
}
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return root->Read();
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 8569124..fafa3c7 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -47,7 +47,7 @@
static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
extern "C" void artJniReadBarrier(ArtMethod* method) {
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
mirror::CompressedReference<mirror::Object>* declaring_class =
method->GetDeclaringClassAddressWithoutBarrier();
if (kUseBakerReadBarrier) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 91d252d..9e2100738 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1941,7 +1941,7 @@
// The declaring class must be marked.
auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
method->GetDeclaringClassAddressWithoutBarrier());
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
artJniReadBarrier(method);
}
sm_.AdvancePointer(declaring_class);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 561eae7..9586e9d 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -141,13 +141,13 @@
}
void AllocRecordObjectMap::AllowNewAllocationRecords() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
allow_new_record_ = true;
new_record_condition_.Broadcast(Thread::Current());
}
void AllocRecordObjectMap::DisallowNewAllocationRecords() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
allow_new_record_ = false;
}
@@ -240,8 +240,8 @@
// Since nobody seemed to really notice or care it might not be worth the trouble.
// Wait for GC's sweeping to complete and allow new records.
- while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 2820ae0..0701ceb 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -18,6 +18,7 @@
#include "base/quasi_atomic.h"
#include "base/systrace.h"
+#include "base/utils.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/reference_processor.h"
#include "gc/space/bump_pointer_space.h"
@@ -25,6 +26,7 @@
#include "gc/verification-inl.h"
#include "jit/jit_code_cache.h"
#include "mirror/object-refvisitor-inl.h"
+#include "read_barrier_config.h"
#include "scoped_thread_state_change-inl.h"
#include "sigchain.h"
#include "thread_list.h"
@@ -37,10 +39,6 @@
#include <fstream>
#include <numeric>
-namespace art {
-namespace gc {
-namespace collector {
-
#ifndef __BIONIC__
#ifndef MREMAP_DONTUNMAP
#define MREMAP_DONTUNMAP 4
@@ -59,7 +57,52 @@
#endif
#endif // __NR_userfaultfd
#endif // __BIONIC__
-// Turn of kCheckLocks when profiling the GC as it slows down the GC
+
+namespace art {
+
+#ifndef ART_FORCE_USE_READ_BARRIER
+static bool ShouldUseUserfaultfd() {
+#if !defined(__linux__)
+ return false;
+#elif !defined(ART_TARGET)
+ // We require MREMAP_DONTUNMAP functionality in mremap syscall, which was
+ // introduced in 5.13 kernel version. Check for that on host. Not required
+ // checking on target as MREMAP_DONTUNMAP and userfaultfd were enabled
+ // together.
+ if (!IsKernelVersionAtLeast(5, 13)) {
+ return false;
+ }
+#endif
+ int fd = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY);
+#ifndef ART_TARGET
+ // On host we may not have the kernel patches that restrict userfaultfd to
+ // user mode. But that is not a security concern as we are on host.
+ // Therefore, attempt one more time without UFFD_USER_MODE_ONLY.
+ if (fd == -1 && errno == EINVAL) {
+ fd = syscall(__NR_userfaultfd, O_CLOEXEC);
+ }
+#endif
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ } else {
+ return false;
+ }
+}
+#endif
+
+#ifdef ART_FORCE_USE_READ_BARRIER
+const bool gUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier;
+#else
+const bool gUseReadBarrier = (kUseBakerReadBarrier || kUseTableLookupReadBarrier)
+ && !ShouldUseUserfaultfd();
+#endif
+const bool gUseUserfaultfd = !gUseReadBarrier;
+
+namespace gc {
+namespace collector {
+
+// Turn off kCheckLocks when profiling the GC as it slows down the GC
// significantly.
static constexpr bool kCheckLocks = kDebugLocking;
static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 992a0ba..8fdb524 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -65,11 +65,11 @@
std::ostream& operator<<(std::ostream& os, CollectorType collector_type);
static constexpr CollectorType kCollectorTypeDefault =
-#if ART_DEFAULT_GC_TYPE_IS_CMS
- kCollectorTypeCMS
+#if ART_DEFAULT_GC_TYPE_IS_CMC
+ kCollectorTypeCMC
#elif ART_DEFAULT_GC_TYPE_IS_SS
kCollectorTypeSS
-#else
+#elif ART_DEFAULT_GC_TYPE_IS_CMS
kCollectorTypeCMS
#error "ART default GC type must be set"
#endif
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9e1524e..9c76060 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -209,10 +209,8 @@
}
// IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for the
// BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
- // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant
- // since the allocator_type should be constant propagated.
- if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()
- && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+ // optimized out.
+ if (IsGcConcurrent() && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
need_gc = true;
}
GetMetrics()->TotalBytesAllocated()->Add(bytes_tl_bulk_allocated);
@@ -442,7 +440,7 @@
return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type ATTRIBUTE_UNUSED,
size_t alloc_size,
bool grow) {
size_t old_target = target_footprint_.load(std::memory_order_relaxed);
@@ -457,7 +455,7 @@
return true;
}
// We are between target_footprint_ and growth_limit_ .
- if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+ if (IsGcConcurrent()) {
return false;
} else {
if (grow) {
diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h
index e20d981..a235c44 100644
--- a/runtime/gc/heap-visit-objects-inl.h
+++ b/runtime/gc/heap-visit-objects-inl.h
@@ -118,7 +118,7 @@
// For speed reasons, only perform it when Rosalloc could possibly be used.
// (Disabled for read barriers because it never uses Rosalloc).
// (See the DCHECK in RosAllocSpace constructor).
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
// Rosalloc has a race in allocation. Objects can be written into the allocation
// stack before their header writes are visible to this thread.
// See b/28790624 for more details.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 6937d1c..a8195a3 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -417,7 +417,7 @@
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
} else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
@@ -999,7 +999,7 @@
}
void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) {
- if (kUseUserfaultfd) {
+ if (gUseUserfaultfd) {
// Use volatile to ensure that compiler loads from memory to trigger userfaults, if required.
volatile uint8_t volatile_sum;
volatile uint8_t* start = reinterpret_cast<volatile uint8_t*>(obj.Ptr());
@@ -1533,7 +1533,7 @@
VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
}
} else if (desired_collector_type == kCollectorTypeCCBackground) {
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
if (!CareAboutPauseTimes()) {
// Invoke CC full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
@@ -4251,7 +4251,7 @@
}
void Heap::AllowNewAllocationRecords() const {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
AllocRecordObjectMap* allocation_records = GetAllocationRecords();
if (allocation_records != nullptr) {
@@ -4260,7 +4260,7 @@
}
void Heap::DisallowNewAllocationRecords() const {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
AllocRecordObjectMap* allocation_records = GetAllocationRecords();
if (allocation_records != nullptr) {
@@ -4637,7 +4637,7 @@
uint64_t last_adj_time = NanoTime();
next_gc_type_ = NonStickyGcType(); // Always start with a full gc.
- if (kUseUserfaultfd) {
+ if (gUseUserfaultfd) {
DCHECK_NE(mark_compact_, nullptr);
mark_compact_->CreateUserfaultfd(/*post_fork*/true);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 0fc04a9..044999d 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1034,15 +1034,6 @@
allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRegion;
}
- static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
- if (kUseUserfaultfd || kUseReadBarrier) {
- // May have the TLAB allocator but is always concurrent. TODO: clean this up.
- return true;
- }
- return
- allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeBumpPointer;
- }
static bool IsMovingGc(CollectorType collector_type) {
return
collector_type == kCollectorTypeCC ||
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 5e41ee4..772174f 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -90,7 +90,7 @@
ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
ObjPtr<mirror::Reference> reference) {
auto slow_path_required = [this, self]() REQUIRES_SHARED(Locks::mutator_lock_) {
- return kUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
+ return gUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
};
if (!slow_path_required()) {
return reference->GetReferent();
@@ -118,10 +118,10 @@
// Keeping reference_processor_lock_ blocks the broadcast when we try to reenable the fast path.
while (slow_path_required()) {
DCHECK(collector_ != nullptr);
- constexpr bool kOtherReadBarrier = kUseReadBarrier && !kUseBakerReadBarrier;
+ const bool other_read_barrier = !kUseBakerReadBarrier && gUseReadBarrier;
if (UNLIKELY(reference->IsFinalizerReferenceInstance()
|| rp_state_ == RpState::kStarting /* too early to determine mark state */
- || (kOtherReadBarrier && reference->IsPhantomReferenceInstance()))) {
+ || (other_read_barrier && reference->IsPhantomReferenceInstance()))) {
// Odd cases in which it doesn't hurt to just wait, or the wait is likely to be very brief.
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
@@ -210,7 +210,7 @@
}
{
MutexLock mu(self, *Locks::reference_processor_lock_);
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
CHECK_EQ(SlowPathEnabled(), concurrent_) << "Slow path must be enabled iff concurrent";
} else {
// Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent_ == false).
@@ -305,7 +305,7 @@
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
// callback isn't yet set.
- if (!kUseReadBarrier && concurrent_) {
+ if (!gUseReadBarrier && concurrent_) {
// Done processing, disable the slow path and broadcast to the waiters.
DisableSlowPath(self);
}
@@ -418,8 +418,8 @@
void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
// Wait until we are done processing reference.
- while ((!kUseReadBarrier && SlowPathEnabled()) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ while ((!gUseReadBarrier && SlowPathEnabled()) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index ef85b39..77b9548 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -48,7 +48,7 @@
void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
allow_new_system_weak_ = true;
new_weak_condition_.Broadcast(Thread::Current());
@@ -57,7 +57,7 @@
void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
allow_new_system_weak_ = false;
}
@@ -78,8 +78,8 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_) {
// Wait for GC's sweeping to complete and allow new records
- while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY((!gUseReadBarrier && !allow_new_system_weak_) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(&allow_disallow_lock_);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 4973355..10b2d65 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -190,8 +190,8 @@
{
ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
MutexLock mu(self, *Locks::intern_table_lock_);
- while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ while ((!gUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
weak_intern_condition_.Wait(self);
}
}
@@ -218,7 +218,7 @@
if (strong != nullptr) {
return strong;
}
- if (kUseReadBarrier ? self->GetWeakRefAccessEnabled()
+ if (gUseReadBarrier ? self->GetWeakRefAccessEnabled()
: weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
break;
}
@@ -230,7 +230,7 @@
auto h = hs.NewHandleWrapper(&s);
WaitUntilAccessible(self);
}
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
} else {
CHECK(self->GetWeakRefAccessEnabled());
@@ -429,7 +429,7 @@
}
void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
weak_root_state_ = new_state;
if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Broadcast(Thread::Current());
diff --git a/runtime/interpreter/interpreter_cache-inl.h b/runtime/interpreter/interpreter_cache-inl.h
index cea8157..1dda78b 100644
--- a/runtime/interpreter/interpreter_cache-inl.h
+++ b/runtime/interpreter/interpreter_cache-inl.h
@@ -39,7 +39,7 @@
// For simplicity, only update the cache if weak ref accesses are enabled. If
// they are disabled, this means the GC is processing the cache, and is
// reading it concurrently.
- if (kUseReadBarrier && self->GetWeakRefAccessEnabled()) {
+ if (gUseReadBarrier && self->GetWeakRefAccessEnabled()) {
data_[IndexOf(key)] = Entry{key, value};
}
}
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index 5922efd..3938d54 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -34,7 +34,7 @@
namespace interpreter {
bool IsNterpSupported() {
- return !kPoisonHeapReferences && kUseReadBarrier;
+ return !kPoisonHeapReferences && gUseReadBarrier;
}
bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index cec6f1d..4d3e030 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1557,7 +1557,7 @@
mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 5);
// Must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
mirror::HeapReference<mirror::Object>* field_addr =
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 199a00b..ad8cc9a 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -559,7 +559,7 @@
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
- return kUseReadBarrier
+ return gUseReadBarrier
? self->GetWeakRefAccessEnabled()
: is_weak_access_enabled_.load(std::memory_order_seq_cst);
}
@@ -582,13 +582,13 @@
}
void JitCodeCache::AllowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
BroadcastForInlineCacheAccess();
}
void JitCodeCache::DisallowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
}
diff --git a/runtime/jni/java_vm_ext-inl.h b/runtime/jni/java_vm_ext-inl.h
index 29cdf1b..c98a553 100644
--- a/runtime/jni/java_vm_ext-inl.h
+++ b/runtime/jni/java_vm_ext-inl.h
@@ -26,7 +26,7 @@
inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const {
DCHECK(self != nullptr);
- return kUseReadBarrier
+ return gUseReadBarrier
? self->GetWeakRefAccessEnabled()
: allow_accessing_weak_globals_.load(std::memory_order_seq_cst);
}
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index f41b6c0..39d5729 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -729,8 +729,8 @@
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// CMS needs this to block for concurrent reference processing because an object allocated during
// the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
- // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
- if (!kUseReadBarrier) {
+ // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant.
+ if (!gUseReadBarrier) {
WaitForWeakGlobalsAccess(self);
}
std::string error_msg;
@@ -809,7 +809,7 @@
}
void JavaVMExt::DisallowNewWeakGlobals() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* const self = Thread::Current();
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
@@ -820,7 +820,7 @@
}
void JavaVMExt::AllowNewWeakGlobals() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
allow_accessing_weak_globals_.store(true, std::memory_order_seq_cst);
@@ -876,7 +876,7 @@
return DecodeWeakGlobal(self, ref);
}
// self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal().
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK(allow_accessing_weak_globals_.load(std::memory_order_seq_cst));
}
return weak_globals_.SynchronizedGet(ref);
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 84f45c2..599a599 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -183,8 +183,7 @@
LockState GetState() const {
CheckReadBarrierState();
- if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) ||
- (kUseReadBarrier && UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0))) {
+ if (UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0)) {
return kUnlocked;
} else {
uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
@@ -288,7 +287,7 @@
void CheckReadBarrierState() const {
if (kIsDebugBuild && ((value_ >> kStateShift) & kStateMask) != kStateForwardingAddress) {
uint32_t rb_state = ReadBarrierState();
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK_EQ(rb_state, 0U);
} else {
DCHECK(rb_state == ReadBarrier::NonGrayState() ||
diff --git a/runtime/mirror/class-refvisitor-inl.h b/runtime/mirror/class-refvisitor-inl.h
index 9bcfd03..ee5c11f 100644
--- a/runtime/mirror/class-refvisitor-inl.h
+++ b/runtime/mirror/class-refvisitor-inl.h
@@ -55,7 +55,7 @@
void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
VisitFields<kReadBarrierOption>([&](ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) {
field->VisitRoots(visitor);
- if (kIsDebugBuild && !kUseUserfaultfd && IsResolved()) {
+ if (kIsDebugBuild && !gUseUserfaultfd && IsResolved()) {
CHECK_EQ(field->GetDeclaringClass<kReadBarrierOption>(), this)
<< GetStatus() << field->GetDeclaringClass()->PrettyClass() << " != " << PrettyClass();
}
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 74f9ccb..b937c2c 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -60,7 +60,7 @@
return nullptr;
}
mirror::DexCache* dex_cache = this;
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// Several code paths use DexCache without read-barrier for performance.
// We have to check the "to-space" object here to avoid allocating twice.
dex_cache = reinterpret_cast<DexCache*>(ReadBarrier::Mark(dex_cache));
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 0ac71bf..318a811 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -104,7 +104,7 @@
}
inline uint32_t Object::GetMarkBit() {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return GetLockWord(false).MarkBitState();
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index ede1c66..bb9e85d 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -115,7 +115,7 @@
}
}
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We need a RB here. After copying the whole object above, copy references fields one by one
// again with a RB to make sure there are no from space refs. TODO: Optimize this later?
CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 1bd0185..87f24eb 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -121,7 +121,7 @@
if (copy_forward) {
// Forward copy.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -146,7 +146,7 @@
} else {
// Backward copy.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -196,7 +196,7 @@
// We can't use memmove since it does not handle read barriers and may do by per byte copying.
// See b/32012820.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -244,7 +244,7 @@
ObjPtr<T> o = nullptr;
int i = 0;
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index d36a2ab..68d329d 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -205,7 +205,7 @@
// Method to insert a read barrier for accessors to reference fields.
inline void ReadBarrierForVarHandleAccess(ObjPtr<Object> obj, MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We need to ensure that the reference stored in the field is a to-space one before attempting
// the CompareAndSet/CompareAndExchange/Exchange operation otherwise it will fail incorrectly
// if obj is in the process of being moved.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0cad79b..4e64c95 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1139,7 +1139,7 @@
lock_word.GCState()));
// Only this thread pays attention to the count. Thus there is no need for stronger
// than relaxed memory ordering.
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
@@ -1239,7 +1239,7 @@
} else {
new_lw = LockWord::FromDefault(lock_word.GCState());
}
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
// TODO: This really only needs memory_order_release, but we currently have
// no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
@@ -1409,7 +1409,7 @@
{
ObjPtr<mirror::Object> lock_object = thread->GetMonitorEnterObject();
if (lock_object != nullptr) {
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in
// which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward
@@ -1613,13 +1613,13 @@
}
void MonitorList::DisallowNewMonitors() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), monitor_list_lock_);
allow_new_monitors_ = false;
}
void MonitorList::AllowNewMonitors() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
allow_new_monitors_ = true;
@@ -1637,8 +1637,8 @@
MutexLock mu(self, monitor_list_lock_);
// CMS needs this to block for concurrent reference processing because an object allocated during
// the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
- // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
- while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
+ // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!gUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
diff --git a/runtime/monitor_objects_stack_visitor.cc b/runtime/monitor_objects_stack_visitor.cc
index 2e75e37..524c0ec 100644
--- a/runtime/monitor_objects_stack_visitor.cc
+++ b/runtime/monitor_objects_stack_visitor.cc
@@ -90,7 +90,7 @@
void MonitorObjectsStackVisitor::VisitLockedObject(ObjPtr<mirror::Object> o, void* context) {
MonitorObjectsStackVisitor* self = reinterpret_cast<MonitorObjectsStackVisitor*>(context);
if (o != nullptr) {
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
// IdentityHashCode call below will crash. So explicitly mark/forward it here.
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index f23010b..8b5635d 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -37,7 +37,7 @@
}
static jboolean Reference_refersTo0(JNIEnv* env, jobject javaThis, jobject o) {
- if (kUseReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
// Fall back to naive implementation that may block and needlessly preserve javaThis.
return env->IsSameObject(Reference_getReferent(env, javaThis), o);
}
@@ -48,7 +48,7 @@
if (referent == other) {
return JNI_TRUE;
}
- if (!kUseReadBarrier || referent.IsNull() || other.IsNull()) {
+ if (!gUseReadBarrier || referent.IsNull() || other.IsNull()) {
return JNI_FALSE;
}
// Explicitly handle the case in which referent is a from-space pointer. Don't use a
diff --git a/runtime/native/jdk_internal_misc_Unsafe.cc b/runtime/native/jdk_internal_misc_Unsafe.cc
index 307a2fa..e708732 100644
--- a/runtime/native/jdk_internal_misc_Unsafe.cc
+++ b/runtime/native/jdk_internal_misc_Unsafe.cc
@@ -99,7 +99,7 @@
ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
// Note that the read barrier load does NOT need to be volatile.
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index e9c5af0..1781a29 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -69,7 +69,7 @@
ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
// Note that the read barrier load does NOT need to be volatile.
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index e8358b7..e0189a9 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1815,7 +1815,7 @@
store.Put(OatHeader::kCompilerFilter, CompilerFilter::NameOfFilter(CompilerFilter::kVerify));
store.Put(OatHeader::kCompilationReasonKey, "vdex");
store.Put(OatHeader::kConcurrentCopying,
- kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+ gUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
oat_header_.reset(OatHeader::Create(kRuntimeISA,
isa_features.get(),
number_of_dex_files,
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 454ee79..78ab53b 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -545,9 +545,7 @@
// compiled code and are otherwise okay, we should return something like
// kOatRelocationOutOfDate. If they don't contain compiled code, the read
// barrier state doesn't matter.
- const bool is_cc = file.GetOatHeader().IsConcurrentCopying();
- constexpr bool kRuntimeIsCC = kUseReadBarrier;
- if (is_cc != kRuntimeIsCC) {
+ if (file.GetOatHeader().IsConcurrentCopying() != gUseReadBarrier) {
return kOatCannotOpen;
}
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index f028473..ff4693f 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -35,7 +35,7 @@
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -93,7 +93,7 @@
UNREACHABLE();
}
} else if (kReadBarrierOption == kWithFromSpaceBarrier) {
- CHECK(kUseUserfaultfd);
+ CHECK(gUseUserfaultfd);
MirrorType* old = ref_addr->template AsMirrorPtr<kIsVolatile>();
mirror::Object* ref =
Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(old);
@@ -109,7 +109,7 @@
GcRootSource* gc_root_source) {
MirrorType* ref = *root;
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -154,7 +154,7 @@
GcRootSource* gc_root_source) {
MirrorType* ref = root->AsMirrorPtr();
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -199,7 +199,7 @@
inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
// Only read-barrier configurations can have mutators run while
// the GC is marking.
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
return ref;
}
// IsMarked does not handle null, so handle it here.
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 3b89377..be5a9a0 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -94,7 +94,7 @@
// Without the holder object, and only with the read barrier configuration (no-op otherwise).
static void MaybeAssertToSpaceInvariant(mirror::Object* ref)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
AssertToSpaceInvariant(ref);
}
}
diff --git a/runtime/read_barrier_config.h b/runtime/read_barrier_config.h
index 3c5afad..e974b04 100644
--- a/runtime/read_barrier_config.h
+++ b/runtime/read_barrier_config.h
@@ -62,18 +62,8 @@
static constexpr bool kUseTableLookupReadBarrier = false;
#endif
-static constexpr bool kUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier;
-static constexpr bool kUseUserfaultfd = !kUseReadBarrier;
-
-// Debugging flag that forces the generation of read barriers, but
-// does not trigger the use of the concurrent copying GC.
-//
-// TODO: Remove this flag when the read barriers compiler
-// instrumentation is completed.
-static constexpr bool kForceReadBarrier = false;
-// TODO: Likewise, remove this flag when kForceReadBarrier is removed
-// and replace it with kUseReadBarrier.
-static constexpr bool kEmitCompilerReadBarrier = kForceReadBarrier || kUseReadBarrier;
+extern const bool gUseReadBarrier;
+extern const bool gUseUserfaultfd;
// Disabled for performance reasons.
static constexpr bool kCheckDebugDisallowReadBarrierCount = kIsDebugBuild;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 1ae4405..80d8a3b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -201,10 +201,6 @@
static constexpr double kNormalMinLoadFactor = 0.4;
static constexpr double kNormalMaxLoadFactor = 0.7;
-// Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read
-// barrier config.
-static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
-
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -1162,10 +1158,6 @@
}
// Create the thread pools.
- if (!kUseUserfaultfd) {
- // Userfaultfd GC creates the thread-pool on its own.
- heap_->CreateThreadPool();
- }
// Avoid creating the runtime thread pool for system server since it will not be used and would
// waste memory.
if (!is_system_server) {
@@ -1615,9 +1607,11 @@
// If low memory mode, use 1.0 as the multiplier by default.
foreground_heap_growth_multiplier = 1.0f;
} else {
+ // Extra added to the default heap growth multiplier for concurrent GC
+ // compaction algorithms. This is done for historical reasons.
+ // TODO: remove when we revisit heap configurations.
foreground_heap_growth_multiplier =
- runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) +
- kExtraDefaultHeapGrowthMultiplier;
+ runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) + 1.0f;
}
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
@@ -1645,9 +1639,9 @@
image_locations_,
instruction_set_,
// Override the collector type to CC if the read barrier config.
- kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
- kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
- : runtime_options.GetOrDefault(Opt::BackgroundGc),
+ gUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
+ gUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
+ : BackgroundGcOption(xgc_option.collector_type_),
runtime_options.GetOrDefault(Opt::LargeObjectSpace),
runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
runtime_options.GetOrDefault(Opt::ParallelGCThreads),
@@ -2617,7 +2611,7 @@
}
void Runtime::DisallowNewSystemWeaks() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
monitor_list_->DisallowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
@@ -2633,7 +2627,7 @@
}
void Runtime::AllowNewSystemWeaks() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
monitor_list_->AllowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 76d1657..6721834 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -80,7 +80,7 @@
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (bool, AlwaysLogExplicitGcs, true)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
-RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
+RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, true)
RUNTIME_OPTIONS_KEY (bool, UseProfiledJitCompilation, false)
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 324cd37..a3ac1e7 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -373,7 +373,7 @@
}
inline bool Thread::GetWeakRefAccessEnabled() const {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
DCHECK(this == Thread::Current());
WeakRefAccessState s = tls32_.weak_ref_access_enabled.load(std::memory_order_relaxed);
if (LIKELY(s == WeakRefAccessState::kVisiblyEnabled)) {
@@ -428,7 +428,7 @@
int delta,
AtomicInteger* suspend_barrier,
SuspendReason reason) {
- if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
+ if (delta > 0 && ((gUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
// When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
// active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
while (true) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 0561c5b..5492cc8 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -169,7 +169,7 @@
void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tls32_.is_gc_marking = is_marking;
UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
}
@@ -1482,7 +1482,7 @@
return false;
}
- if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
+ if (gUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
// Force retry of a suspend request if it's in the middle of a thread flip to avoid a
// deadlock. b/31683379.
return false;
@@ -2578,7 +2578,7 @@
}
// Mark-stack revocation must be performed at the very end. No
// checkpoint/flip-function or read-barrier should be called after this.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
}
}
@@ -4716,7 +4716,7 @@
mirror::Object* Thread::GetPeerFromOtherThread() const {
DCHECK(tlsPtr_.jpeer == nullptr);
mirror::Object* peer = tlsPtr_.opeer;
- if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
// mark/forward it here.
diff --git a/runtime/thread.h b/runtime/thread.h
index f40f03f..60fd076 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -379,11 +379,11 @@
void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return tlsPtr_.thread_local_mark_stack;
}
void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tlsPtr_.thread_local_mark_stack = stack;
}
@@ -1045,7 +1045,7 @@
}
bool GetIsGcMarking() const {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return tls32_.is_gc_marking;
}
@@ -1058,7 +1058,7 @@
bool GetWeakRefAccessEnabled() const; // Only safe for current thread.
void SetWeakRefAccessEnabled(bool enabled) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
WeakRefAccessState new_state = enabled ?
WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
@@ -2236,13 +2236,13 @@
explicit ScopedTransitioningToRunnable(Thread* self)
: self_(self) {
DCHECK_EQ(self, Thread::Current());
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
self_->SetIsTransitioningToRunnable(true);
}
}
~ScopedTransitioningToRunnable() {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
self_->SetIsTransitioningToRunnable(false);
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 555d2fd..6b23c34 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1270,7 +1270,7 @@
}
CHECK(!Contains(self));
list_.push_back(self);
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
gc::collector::ConcurrentCopying* const cc =
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector();
// Initialize according to the state of the CC collector.