Replace `gUseReadBarrier` with compiler option in compiler.
Leave a few `gUseReadBarrier` uses in JNI macro assemblers.
We shall deaal with these later.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: run-gtests.sh
Test: testrunner.py --target --optimizing
Bug: 289805127
Change-Id: I9d2aa245cee4c650129f169a82beda7dc0dd6a35
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index a37f516..44ea3a5 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -187,6 +187,7 @@
std::unique_ptr<CompilerOptions> CommonCompilerTestImpl::CreateCompilerOptions(
InstructionSet instruction_set, const std::string& variant) {
std::unique_ptr<CompilerOptions> compiler_options = std::make_unique<CompilerOptions>();
+ compiler_options->emit_read_barrier_ = gUseReadBarrier;
compiler_options->instruction_set_ = instruction_set;
std::string error_msg;
compiler_options->instruction_set_features_ =
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index d0770e9..e1245c7 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -52,6 +52,7 @@
image_type_(ImageType::kNone),
multi_image_(false),
compile_art_test_(false),
+ emit_read_barrier_(false),
baseline_(false),
debuggable_(false),
generate_debug_info_(kDefaultGenerateDebugInfo),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index a5b3ae1..7369b61 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -150,6 +150,10 @@
return top_k_profile_threshold_;
}
+ bool EmitReadBarrier() const {
+ return emit_read_barrier_;
+ }
+
bool GetDebuggable() const {
return debuggable_;
}
@@ -421,6 +425,7 @@
ImageType image_type_;
bool multi_image_;
bool compile_art_test_;
+ bool emit_read_barrier_;
bool baseline_;
bool debuggable_;
bool generate_debug_info_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 86c0f80..1d9cd57 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -66,6 +66,9 @@
// JIT is never PIC, no matter what the runtime compiler options specify.
compiler_options_->SetNonPic();
+ // Set the appropriate read barrier option.
+ compiler_options_->emit_read_barrier_ = gUseReadBarrier;
+
// If the options don't provide whether we generate debuggable code, set
// debuggability based on the runtime value.
if (!compiler_options_->GetDebuggable()) {
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 9349d2c..b125d2e 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -109,6 +109,7 @@
// i.e. if the method was annotated with @CriticalNative
const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
+ bool emit_read_barrier = compiler_options.EmitReadBarrier();
bool is_debuggable = compiler_options.GetDebuggable();
bool needs_entry_exit_hooks = is_debuggable && compiler_options.IsJitCompiler();
// We don't support JITing stubs for critical native methods in debuggable runtimes yet.
@@ -208,7 +209,7 @@
// Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
std::unique_ptr<JNIMacroLabel> jclass_read_barrier_slow_path;
std::unique_ptr<JNIMacroLabel> jclass_read_barrier_return;
- if (gUseReadBarrier && is_static && LIKELY(!is_critical_native)) {
+ if (emit_read_barrier && is_static && LIKELY(!is_critical_native)) {
jclass_read_barrier_slow_path = __ CreateLabel();
jclass_read_barrier_return = __ CreateLabel();
@@ -601,7 +602,7 @@
// 8.1. Read barrier slow path for the declaring class in the method for a static call.
// Skip this for @CriticalNative because we're not passing a `jclass` to the native method.
- if (gUseReadBarrier && is_static && !is_critical_native) {
+ if (emit_read_barrier && is_static && !is_critical_native) {
__ Bind(jclass_read_barrier_slow_path.get());
// Construct slow path for read barrier:
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 404a427..80c0f84 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -142,6 +142,22 @@
return true;
}
+bool CodeGenerator::EmitReadBarrier() const {
+ return GetCompilerOptions().EmitReadBarrier();
+}
+
+bool CodeGenerator::EmitBakerReadBarrier() const {
+ return kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
+}
+
+bool CodeGenerator::EmitNonBakerReadBarrier() const {
+ return !kUseBakerReadBarrier && GetCompilerOptions().EmitReadBarrier();
+}
+
+ReadBarrierOption CodeGenerator::GetCompilerReadBarrierOption() const {
+ return EmitReadBarrier() ? kWithReadBarrier : kWithoutReadBarrier;
+}
+
ScopedArenaAllocator* CodeGenerator::GetScopedAllocator() {
DCHECK(code_generation_data_ != nullptr);
return code_generation_data_->GetScopedAllocator();
@@ -1624,8 +1640,7 @@
// When (non-Baker) read barriers are enabled, some instructions
// use a slow path to emit a read barrier, which does not trigger
// GC.
- (gUseReadBarrier &&
- !kUseBakerReadBarrier &&
+ (EmitNonBakerReadBarrier() &&
(instruction->IsInstanceFieldGet() ||
instruction->IsPredicatedInstanceFieldGet() ||
instruction->IsStaticFieldGet() ||
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 7e46966..ee2653e 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -59,13 +59,6 @@
// Maximum value for a primitive long.
static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
-// Depending on configuration, `gUseReadBarrier` can be a static const variable.
-// Static variable initialization order across different compilation units is not defined,
-// so function is used instead of static variable `gCompilerReadBarrierOption`.
-inline ReadBarrierOption GetCompilerReadBarrierOption() {
- return gUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
-}
-
constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
constexpr size_t status_byte_offset =
mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
@@ -275,8 +268,6 @@
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
virtual InstructionSet GetInstructionSet() const = 0;
- const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
-
// Saves the register in the stack. Returns the size taken on stack.
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
// Restores the register from the stack. Returns the size taken on stack.
@@ -383,6 +374,12 @@
// TODO: Replace with a catch-entering instruction that records the environment.
void RecordCatchBlockInfo();
+ const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
+ bool EmitReadBarrier() const;
+ bool EmitBakerReadBarrier() const;
+ bool EmitNonBakerReadBarrier() const;
+ ReadBarrierOption GetCompilerReadBarrierOption() const;
+
// Get the ScopedArenaAllocator used for codegen memory allocation.
ScopedArenaAllocator* GetScopedAllocator();
@@ -454,7 +451,7 @@
Location to2,
DataType::Type type2);
- static bool InstanceOfNeedsReadBarrier(HInstanceOf* instance_of) {
+ bool InstanceOfNeedsReadBarrier(HInstanceOf* instance_of) {
// Used only for kExactCheck, kAbstractClassCheck, kClassHierarchyCheck and kArrayObjectCheck.
DCHECK(instance_of->GetTypeCheckKind() == TypeCheckKind::kExactCheck ||
instance_of->GetTypeCheckKind() == TypeCheckKind::kAbstractClassCheck ||
@@ -464,14 +461,14 @@
// If the target class is in the boot image, it's non-moveable and it doesn't matter
// if we compare it with a from-space or to-space reference, the result is the same.
// It's OK to traverse a class hierarchy jumping between from-space and to-space.
- return gUseReadBarrier && !instance_of->GetTargetClass()->IsInBootImage();
+ return EmitReadBarrier() && !instance_of->GetTargetClass()->IsInBootImage();
}
- static ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) {
+ ReadBarrierOption ReadBarrierOptionForInstanceOf(HInstanceOf* instance_of) {
return InstanceOfNeedsReadBarrier(instance_of) ? kWithReadBarrier : kWithoutReadBarrier;
}
- static bool IsTypeCheckSlowPathFatal(HCheckCast* check_cast) {
+ bool IsTypeCheckSlowPathFatal(HCheckCast* check_cast) {
switch (check_cast->GetTypeCheckKind()) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
@@ -479,7 +476,7 @@
case TypeCheckKind::kArrayObjectCheck:
case TypeCheckKind::kInterfaceCheck: {
bool needs_read_barrier =
- gUseReadBarrier && !check_cast->GetTargetClass()->IsInBootImage();
+ EmitReadBarrier() && !check_cast->GetTargetClass()->IsInBootImage();
// We do not emit read barriers for HCheckCast, so we can get false negatives
// and the slow path shall re-check and simply return if the cast is actually OK.
return !needs_read_barrier;
@@ -494,7 +491,7 @@
UNREACHABLE();
}
- static LocationSummary::CallKind GetCheckCastCallKind(HCheckCast* check_cast) {
+ LocationSummary::CallKind GetCheckCastCallKind(HCheckCast* check_cast) {
return (IsTypeCheckSlowPathFatal(check_cast) && !check_cast->CanThrowIntoCatchBlock())
? LocationSummary::kNoCall // In fact, call on a fatal (non-returning) slow path.
: LocationSummary::kCallOnSlowPath;
@@ -672,7 +669,7 @@
virtual HLoadClass::LoadKind GetSupportedLoadClassKind(
HLoadClass::LoadKind desired_class_load_kind) = 0;
- static LocationSummary::CallKind GetLoadStringCallKind(HLoadString* load) {
+ LocationSummary::CallKind GetLoadStringCallKind(HLoadString* load) {
switch (load->GetLoadKind()) {
case HLoadString::LoadKind::kBssEntry:
DCHECK(load->NeedsEnvironment());
@@ -682,7 +679,7 @@
return LocationSummary::kCallOnMainOnly;
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(!load->NeedsEnvironment());
- return gUseReadBarrier
+ return EmitReadBarrier()
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
break;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index cf5e9d2..bf8db1d 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -587,7 +587,6 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -602,6 +601,7 @@
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
@@ -766,10 +766,10 @@
public:
ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root)
: SlowPathCodeARM64(instruction), out_(out), root_(root) {
- DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
DCHECK(locations->CanCall());
@@ -2176,7 +2176,7 @@
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
bool object_field_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_field_get_with_read_barrier
@@ -2232,8 +2232,7 @@
MemOperand field =
HeapOperand(InputRegisterAt(instruction, receiver_input), field_info.GetFieldOffset());
- if (gUseReadBarrier && kUseBakerReadBarrier &&
- load_type == DataType::Type::kReference) {
+ if (load_type == DataType::Type::kReference && codegen_->EmitBakerReadBarrier()) {
// Object FieldGet with Baker's read barrier case.
// /* HeapReference<Object> */ out = *(base + offset)
Register base = RegisterFrom(base_loc, DataType::Type::kReference);
@@ -2680,7 +2679,7 @@
void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -2736,10 +2735,9 @@
// does not support the HIntermediateAddress instruction.
DCHECK(!((type == DataType::Type::kReference) &&
instruction->GetArray()->IsIntermediateAddress() &&
- gUseReadBarrier &&
- !kUseBakerReadBarrier));
+ codegen_->EmitNonBakerReadBarrier()));
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen_->EmitBakerReadBarrier()) {
// Object ArrayGet with Baker's read barrier case.
// Note that a potential implicit null check is handled in the
// CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
@@ -4066,8 +4064,8 @@
}
// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (gUseReadBarrier &&
+static size_t NumberOfInstanceOfTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (emit_read_barrier &&
(kUseBakerReadBarrier ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -4080,11 +4078,11 @@
// Interface case has 3 temps, one for holding the number of interfaces, one for the current
// interface pointer, one for loading the current interface.
// The other checks have one temp for loading the object's class.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+static size_t NumberOfCheckCastTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
return 3;
}
- return 1 + NumberOfInstanceOfTemps(type_check_kind);
+ return 1 + NumberOfInstanceOfTemps(emit_read_barrier, type_check_kind);
}
void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4096,7 +4094,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck: {
- bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ bool needs_read_barrier = codegen_->InstanceOfNeedsReadBarrier(instruction);
call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
@@ -4127,7 +4125,8 @@
// Note that TypeCheckSlowPathARM64 uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
// Add temps if necessary for read barriers.
- locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -4140,7 +4139,7 @@
: InputRegisterAt(instruction, 1);
Location out_loc = locations->Out();
Register out = OutputRegister(instruction);
- const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ const size_t num_temps = NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 1u);
Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -4160,7 +4159,7 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -4178,7 +4177,7 @@
case TypeCheckKind::kAbstractClassCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -4209,7 +4208,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -4241,7 +4240,7 @@
case TypeCheckKind::kArrayObjectCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -4358,7 +4357,7 @@
void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -4369,7 +4368,7 @@
} else {
locations->SetInAt(1, Location::RequiresRegister());
}
- locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
@@ -4380,7 +4379,7 @@
Register cls = (type_check_kind == TypeCheckKind::kBitstringCheck)
? Register()
: InputRegisterAt(instruction, 1);
- const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ const size_t num_temps = NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_GE(num_temps, 1u);
DCHECK_LE(num_temps, 3u);
Location temp_loc = locations->GetTemp(0);
@@ -4396,7 +4395,7 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
+ bool is_type_check_slow_path_fatal = codegen_->IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeARM64* type_check_slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARM64(
instruction, is_type_check_slow_path_fatal);
@@ -5445,7 +5444,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -5461,11 +5460,11 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry ||
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the type resolution or initialization and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -5487,7 +5486,7 @@
Register out = OutputRegister(cls);
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : GetCompilerReadBarrierOption();
+ cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -5648,7 +5647,7 @@
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
+ LocationSummary::CallKind call_kind = codegen_->GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
InvokeRuntimeCallingConvention calling_convention;
@@ -5656,11 +5655,11 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the pResolveString and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -5710,7 +5709,7 @@
temp,
/* offset placeholder */ 0u,
ldr_label,
- GetCompilerReadBarrierOption());
+ codegen_->GetCompilerReadBarrierOption());
SlowPathCodeARM64* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
@@ -5734,7 +5733,7 @@
out.X(),
/* offset= */ 0,
/* fixup_label= */ nullptr,
- GetCompilerReadBarrierOption());
+ codegen_->GetCompilerReadBarrierOption());
return;
}
default:
@@ -6600,7 +6599,7 @@
DataType::Type type = DataType::Type::kReference;
Register out_reg = RegisterFrom(out, type);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -6641,7 +6640,7 @@
Register out_reg = RegisterFrom(out, type);
Register obj_reg = RegisterFrom(obj, type);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -6676,7 +6675,7 @@
DCHECK(fixup_label == nullptr || offset == 0u);
Register root_reg = RegisterFrom(root, DataType::Type::kReference);
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
@@ -6742,8 +6741,7 @@
void CodeGeneratorARM64::GenerateIntrinsicCasMoveWithBakerReadBarrier(
vixl::aarch64::Register marked_old_value,
vixl::aarch64::Register old_value) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
uint32_t custom_data = EncodeBakerReadBarrierGcRootData(marked_old_value.GetCode());
@@ -6764,8 +6762,7 @@
const vixl::aarch64::MemOperand& src,
bool needs_null_check,
bool use_load_acquire) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
@@ -6860,8 +6857,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
@@ -6938,7 +6934,7 @@
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
- if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (kIsDebugBuild && EmitBakerReadBarrier()) {
// The following condition is a run-time one; it is executed after the
// previous compile-time test, to avoid penalizing non-debug builds.
if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) {
@@ -6967,7 +6963,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the reference load.
//
@@ -6992,7 +6988,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (gUseReadBarrier) {
+ if (EmitReadBarrier()) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -7007,7 +7003,7 @@
void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 76ed335..f7fa54b 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -745,7 +745,6 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -760,6 +759,7 @@
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
@@ -923,10 +923,10 @@
public:
ReadBarrierForRootSlowPathARMVIXL(HInstruction* instruction, Location out, Location root)
: SlowPathCodeARMVIXL(instruction), out_(out), root_(root) {
- DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
vixl32::Register reg_out = RegisterFrom(out_);
DCHECK(locations->CanCall());
@@ -6114,7 +6114,7 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- gUseReadBarrier && (field_info.GetFieldType() == DataType::Type::kReference);
+ (field_info.GetFieldType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
@@ -6284,7 +6284,7 @@
case DataType::Type::kReference: {
// /* HeapReference<Object> */ out = *(base + offset)
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
Location maybe_temp = (locations->GetTempCount() != 0) ? locations->GetTemp(0) : Location();
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier call.
@@ -6594,7 +6594,7 @@
void LocationsBuilderARMVIXL::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -6742,14 +6742,14 @@
// The read barrier instrumentation of object ArrayGet
// instructions does not support the HIntermediateAddress
// instruction.
- DCHECK(!(has_intermediate_address && gUseReadBarrier));
+ DCHECK(!(has_intermediate_address && codegen_->EmitReadBarrier()));
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier call.
DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
@@ -7673,7 +7673,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7689,11 +7689,11 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry ||
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the type resolution or initialization and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -7716,7 +7716,7 @@
vixl32::Register out = OutputRegister(cls);
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : GetCompilerReadBarrierOption();
+ cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -7923,7 +7923,7 @@
}
void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
+ LocationSummary::CallKind call_kind = codegen_->GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
@@ -7931,11 +7931,11 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the pResolveString and marking to save everything we need, including temps.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -7970,7 +7970,7 @@
codegen_->EmitMovwMovtPlaceholder(labels, out);
// All aligned loads are implicitly atomic consume operations on ARM.
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /*offset=*/0, GetCompilerReadBarrierOption());
+ load, out_loc, out, /*offset=*/0, codegen_->GetCompilerReadBarrierOption());
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
@@ -7991,7 +7991,7 @@
load->GetString()));
// /* GcRoot<mirror::String> */ out = *out
codegen_->GenerateGcRootFieldLoad(
- load, out_loc, out, /*offset=*/0, GetCompilerReadBarrierOption());
+ load, out_loc, out, /*offset=*/0, codegen_->GetCompilerReadBarrierOption());
return;
}
default:
@@ -8046,8 +8046,8 @@
}
// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (gUseReadBarrier &&
+static size_t NumberOfInstanceOfTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (emit_read_barrier &&
(kUseBakerReadBarrier ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -8060,11 +8060,11 @@
// Interface case has 3 temps, one for holding the number of interfaces, one for the current
// interface pointer, one for loading the current interface.
// The other checks have one temp for loading the object's class.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+static size_t NumberOfCheckCastTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
return 3;
}
- return 1 + NumberOfInstanceOfTemps(type_check_kind);
+ return 1 + NumberOfInstanceOfTemps(emit_read_barrier, type_check_kind);
}
void LocationsBuilderARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
@@ -8076,7 +8076,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck: {
- bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ bool needs_read_barrier = codegen_->InstanceOfNeedsReadBarrier(instruction);
call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
@@ -8106,7 +8106,8 @@
// The "out" register is used as a temporary, so it overlaps with the inputs.
// Note that TypeCheckSlowPathARM uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorARMVIXL::VisitInstanceOf(HInstanceOf* instruction) {
@@ -8119,7 +8120,7 @@
: InputRegisterAt(instruction, 1);
Location out_loc = locations->Out();
vixl32::Register out = OutputRegister(instruction);
- const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ const size_t num_temps = NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 1u);
Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -8141,7 +8142,7 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -8176,7 +8177,7 @@
case TypeCheckKind::kAbstractClassCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -8204,7 +8205,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -8260,7 +8261,7 @@
case TypeCheckKind::kArrayObjectCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -8389,7 +8390,7 @@
void LocationsBuilderARMVIXL::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -8400,7 +8401,8 @@
} else {
locations->SetInAt(1, Location::RequiresRegister());
}
- locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorARMVIXL::VisitCheckCast(HCheckCast* instruction) {
@@ -8413,7 +8415,7 @@
: InputRegisterAt(instruction, 1);
Location temp_loc = locations->GetTemp(0);
vixl32::Register temp = RegisterFrom(temp_loc);
- const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ const size_t num_temps = NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 3u);
Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
Location maybe_temp3_loc = (num_temps >= 3) ? locations->GetTemp(2) : Location::NoLocation();
@@ -8426,7 +8428,7 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
+ bool is_type_check_slow_path_fatal = codegen_->IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeARMVIXL* type_check_slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathARMVIXL(
instruction, is_type_check_slow_path_fatal);
@@ -8981,7 +8983,7 @@
ReadBarrierOption read_barrier_option) {
vixl32::Register out_reg = RegisterFrom(out);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
@@ -9016,7 +9018,7 @@
vixl32::Register out_reg = RegisterFrom(out);
vixl32::Register obj_reg = RegisterFrom(obj);
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
DCHECK(maybe_temp.IsRegister()) << maybe_temp;
// Load with fast path based Baker's read barrier.
@@ -9045,7 +9047,7 @@
ReadBarrierOption read_barrier_option) {
vixl32::Register root_reg = RegisterFrom(root);
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
@@ -9109,8 +9111,7 @@
void CodeGeneratorARMVIXL::GenerateIntrinsicCasMoveWithBakerReadBarrier(
vixl::aarch32::Register marked_old_value,
vixl::aarch32::Register old_value) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
// For low registers, we can reuse the GC root narrow entrypoint, for high registers
@@ -9143,8 +9144,7 @@
vixl32::Register obj,
const vixl32::MemOperand& src,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
@@ -9236,8 +9236,7 @@
Location index,
Location temp,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
@@ -9302,7 +9301,7 @@
void CodeGeneratorARMVIXL::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
- if (kIsDebugBuild && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (kIsDebugBuild && EmitBakerReadBarrier()) {
// The following condition is a run-time one; it is executed after the
// previous compile-time test, to avoid penalizing non-debug builds.
if (GetCompilerOptions().EmitRunTimeChecksInDebugMode()) {
@@ -9332,7 +9331,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the reference load.
//
@@ -9358,7 +9357,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (gUseReadBarrier) {
+ if (EmitReadBarrier()) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -9373,7 +9372,7 @@
void CodeGeneratorARMVIXL::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index 14e284b..370d048 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -487,10 +487,10 @@
public:
ReadBarrierForRootSlowPathRISCV64(HInstruction* instruction, Location out, Location root)
: SlowPathCodeRISCV64(instruction), out_(out), root_(root) {
- DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
DataType::Type type = DataType::Type::kReference;
XRegister reg_out = out_.AsRegister<XRegister>();
@@ -679,13 +679,13 @@
public:
ReadBarrierMarkSlowPathRISCV64(HInstruction* instruction, Location ref, Location entrypoint)
: SlowPathCodeRISCV64(instruction), ref_(ref), entrypoint_(entrypoint) {
- DCHECK(gUseReadBarrier);
DCHECK(entrypoint.IsRegister());
}
const char* GetDescription() const override { return "ReadBarrierMarkSlowPathRISCV64"; }
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
XRegister ref_reg = ref_.AsRegister<XRegister>();
DCHECK(locations->CanCall());
@@ -1190,7 +1190,7 @@
ReadBarrierOption read_barrier_option) {
XRegister out_reg = out.AsRegister<XRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -1228,7 +1228,7 @@
XRegister out_reg = out.AsRegister<XRegister>();
XRegister obj_reg = obj.AsRegister<XRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -1261,7 +1261,7 @@
DCHECK_IMPLIES(label_low != nullptr, offset == kLinkTimeOffsetPlaceholderLow) << offset;
XRegister root_reg = root.AsRegister<XRegister>();
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Note that we do not actually check the value of `GetIsGcMarking()`
// to decide whether to mark the loaded GC root or not. Instead, we
@@ -1877,6 +1877,7 @@
// TODO(riscv64): Implement checking if the holder is black.
UNUSED(temp);
+ DCHECK(EmitBakerReadBarrier());
XRegister reg = ref.AsRegister<XRegister>();
if (index.IsValid()) {
DCHECK(!needs_null_check);
@@ -1937,7 +1938,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (gUseReadBarrier) {
+ if (EmitReadBarrier()) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorRISCV64::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -1952,7 +1953,7 @@
void CodeGeneratorRISCV64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the GC root load.
//
@@ -2480,7 +2481,7 @@
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
bool object_field_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
instruction,
object_field_get_with_read_barrier
@@ -2537,7 +2538,7 @@
codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
}
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Object> */ dst = *(obj + offset)
Location temp_loc = locations->GetTemp(0);
// Note that a potential implicit null check is handled in this
@@ -2557,7 +2558,7 @@
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
}
- if (type == DataType::Type::kReference && !(gUseReadBarrier && kUseBakerReadBarrier)) {
+ if (type == DataType::Type::kReference && !codegen_->EmitBakerReadBarrier()) {
// If read barriers are enabled, emit read barriers other than
// Baker's using a slow path (and also unpoison the loaded
// reference, if heap poisoning is enabled).
@@ -2724,7 +2725,8 @@
void LocationsBuilderRISCV64::VisitArrayGet(HArrayGet* instruction) {
DataType::Type type = instruction->GetType();
- bool object_array_get_with_read_barrier = gUseReadBarrier && (type == DataType::Type::kReference);
+ bool object_array_get_with_read_barrier =
+ (type == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations = new (GetGraph()->GetAllocator())
LocationSummary(instruction,
object_array_get_with_read_barrier ? LocationSummary::kCallOnSlowPath :
@@ -2787,7 +2789,7 @@
__ Bind(&uncompressed_load);
}
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen_->EmitBakerReadBarrier()) {
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
@@ -2825,7 +2827,7 @@
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (type == DataType::Type::kReference) {
- DCHECK(!(gUseReadBarrier && kUseBakerReadBarrier));
+ DCHECK(!codegen_->EmitBakerReadBarrier());
// If read barriers are enabled, emit read barriers other than Baker's using
// a slow path (and also unpoison the loaded reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
@@ -2839,7 +2841,7 @@
codegen_->MaybeRecordImplicitNullCheck(instruction);
}
if (type == DataType::Type::kReference) {
- DCHECK(!(gUseReadBarrier && kUseBakerReadBarrier));
+ DCHECK(!codegen_->EmitBakerReadBarrier());
// If read barriers are enabled, emit read barriers other than Baker's using
// a slow path (and also unpoison the loaded reference, if heap poisoning is enabled).
codegen_->MaybeGenerateReadBarrierSlow(
@@ -3109,8 +3111,8 @@
}
// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (gUseReadBarrier &&
+static size_t NumberOfInstanceOfTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (emit_read_barrier &&
(kUseBakerReadBarrier ||
type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -3123,16 +3125,16 @@
// Interface case has 3 temps, one for holding the number of interfaces, one for the current
// interface pointer, one for loading the current interface.
// The other checks have one temp for loading the object's class and maybe a temp for read barrier.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
- if (type_check_kind == TypeCheckKind::kInterfaceCheck)
+static size_t NumberOfCheckCastTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
return 3;
-
- return 1 + NumberOfInstanceOfTemps(type_check_kind);
+ }
+ return 1 + NumberOfInstanceOfTemps(emit_read_barrier, type_check_kind);
}
void LocationsBuilderRISCV64::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3143,7 +3145,7 @@
} else {
locations->SetInAt(1, Location::RequiresRegister());
}
- locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorRISCV64::VisitCheckCast(HCheckCast* instruction) {
@@ -3156,7 +3158,7 @@
: locations->InAt(1);
Location temp_loc = locations->GetTemp(0);
XRegister temp = temp_loc.AsRegister<XRegister>();
- const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ const size_t num_temps = NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_GE(num_temps, 1u);
DCHECK_LE(num_temps, 3u);
Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
@@ -3171,7 +3173,7 @@
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Riscv64Label done;
- bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
+ bool is_type_check_slow_path_fatal = codegen_->IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeRISCV64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathRISCV64(
instruction, is_type_check_slow_path_fatal);
@@ -3735,7 +3737,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck: {
- bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ bool needs_read_barrier = codegen_->InstanceOfNeedsReadBarrier(instruction);
call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
@@ -3765,7 +3767,8 @@
// The output does overlap inputs.
// Note that TypeCheckSlowPathRISCV64 uses this register too.
locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
- locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorRISCV64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -3778,7 +3781,7 @@
: locations->InAt(1);
Location out_loc = locations->Out();
XRegister out = out_loc.AsRegister<XRegister>();
- const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ const size_t num_temps = NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 1u);
Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -3798,7 +3801,7 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(
instruction, out_loc, obj_loc, class_offset, maybe_temp_loc, read_barrier_option);
@@ -3810,7 +3813,7 @@
case TypeCheckKind::kAbstractClassCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(
instruction, out_loc, obj_loc, class_offset, maybe_temp_loc, read_barrier_option);
@@ -3830,7 +3833,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(
instruction, out_loc, obj_loc, class_offset, maybe_temp_loc, read_barrier_option);
@@ -3851,7 +3854,7 @@
case TypeCheckKind::kArrayObjectCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// FIXME(riscv64): We currently have marking entrypoints for 29 registers.
// We need to either store entrypoint for register `N` in entry `N-A` where
// `A` can be up to 5 (Zero, RA, SP, GP, TP are not valid registers for
@@ -4144,7 +4147,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = gUseReadBarrier && !instruction->IsInBootImage();
+ const bool requires_read_barrier = !instruction->IsInBootImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (instruction->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -4160,11 +4163,11 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry ||
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barriers we have a temp-clobbering call.
+ } else {
// Rely on the type resolution or initialization and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barriers we have a temp-clobbering call.
}
}
}
@@ -4186,7 +4189,7 @@
Location out_loc = locations->Out();
XRegister out = out_loc.AsRegister<XRegister>();
const ReadBarrierOption read_barrier_option =
- instruction->IsInBootImage() ? kWithoutReadBarrier : GetCompilerReadBarrierOption();
+ instruction->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -4305,7 +4308,7 @@
void LocationsBuilderRISCV64::VisitLoadString(HLoadString* instruction) {
HLoadString::LoadKind load_kind = instruction->GetLoadKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetLoadStringCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
@@ -4315,11 +4318,11 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barriers we have a temp-clobbering call.
+ } else {
// Rely on the pResolveString and marking to save everything we need.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barriers we have a temp-clobbering call.
}
}
}
@@ -4362,7 +4365,7 @@
out_loc,
out,
/* offset= */ kLinkTimeOffsetPlaceholderLow,
- GetCompilerReadBarrierOption(),
+ codegen_->GetCompilerReadBarrierOption(),
&info_low->label);
SlowPathCodeRISCV64* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathRISCV64(instruction);
@@ -4382,7 +4385,8 @@
out,
codegen_->DeduplicateJitStringLiteral(
instruction->GetDexFile(), instruction->GetStringIndex(), instruction->GetString()));
- GenerateGcRootFieldLoad(instruction, out_loc, out, 0, GetCompilerReadBarrierOption());
+ GenerateGcRootFieldLoad(
+ instruction, out_loc, out, 0, codegen_->GetCompilerReadBarrierOption());
return;
default:
break;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a727e6d..041336a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -505,12 +505,12 @@
: SlowPathCode(instruction),
ref_(ref),
unpoison_ref_before_marking_(unpoison_ref_before_marking) {
- DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86"; }
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -592,12 +592,12 @@
field_addr_(field_addr),
unpoison_ref_before_marking_(unpoison_ref_before_marking),
temp_(temp) {
- DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkAndUpdateFieldSlowPathX86"; }
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
Register ref_reg = ref_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -746,7 +746,6 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial object
// has been overwritten by (or after) the heap object reference load
// to be instrumented, e.g.:
@@ -761,6 +760,7 @@
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
@@ -921,10 +921,10 @@
public:
ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root)
: SlowPathCode(instruction), out_(out), root_(root) {
- DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
Register reg_out = out_.AsRegister<Register>();
DCHECK(locations->CanCall());
@@ -1756,7 +1756,7 @@
__ movsd(dst.AsFpuRegister<XmmRegister>(), src);
break;
case DataType::Type::kReference:
- DCHECK(!gUseReadBarrier);
+ DCHECK(!EmitReadBarrier());
__ movl(dst.AsRegister<Register>(), src);
__ MaybeUnpoisonHeapReference(dst.AsRegister<Register>());
break;
@@ -5898,11 +5898,11 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
- gUseReadBarrier
+ codegen_->EmitReadBarrier()
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
@@ -5960,7 +5960,7 @@
if (load_type == DataType::Type::kReference) {
// /* HeapReference<Object> */ out = *(base + offset)
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -6388,7 +6388,7 @@
void LocationsBuilderX86::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -6430,7 +6430,7 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -7252,7 +7252,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -7266,11 +7266,11 @@
}
locations->SetOut(Location::RequiresRegister());
if (call_kind == LocationSummary::kCallOnSlowPath && cls->HasPcRelativeLoadKind()) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the type resolution and/or initialization to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -7303,7 +7303,7 @@
bool generate_null_check = false;
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : GetCompilerReadBarrierOption();
+ cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7471,7 +7471,7 @@
}
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
+ LocationSummary::CallKind call_kind = codegen_->GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
HLoadString::LoadKind load_kind = load->GetLoadKind();
if (load_kind == HLoadString::LoadKind::kBootImageLinkTimePcRelative ||
@@ -7484,11 +7484,11 @@
} else {
locations->SetOut(Location::RequiresRegister());
if (load_kind == HLoadString::LoadKind::kBssEntry) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the pResolveString to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -7533,7 +7533,8 @@
Address address = Address(method_address, CodeGeneratorX86::kPlaceholder32BitOffset);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::String> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, GetCompilerReadBarrierOption());
+ GenerateGcRootFieldLoad(
+ load, out_loc, address, fixup_label, codegen_->GetCompilerReadBarrierOption());
// No need for memory fence, thanks to the x86 memory model.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
@@ -7553,7 +7554,8 @@
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, GetCompilerReadBarrierOption());
+ GenerateGcRootFieldLoad(
+ load, out_loc, address, fixup_label, codegen_->GetCompilerReadBarrierOption());
return;
}
default:
@@ -7602,8 +7604,8 @@
}
// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (gUseReadBarrier &&
+static size_t NumberOfInstanceOfTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (emit_read_barrier &&
!kUseBakerReadBarrier &&
(type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -7616,11 +7618,11 @@
// Interface case has 2 temps, one for holding the number of interfaces, one for the current
// interface pointer, the current interface is compared in memory.
// The other checks have one temp for loading the object's class.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+static size_t NumberOfCheckCastTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
return 2;
}
- return 1 + NumberOfInstanceOfTemps(type_check_kind);
+ return 1 + NumberOfInstanceOfTemps(emit_read_barrier, type_check_kind);
}
void LocationsBuilderX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -7632,7 +7634,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck: {
- bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ bool needs_read_barrier = codegen_->InstanceOfNeedsReadBarrier(instruction);
call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
@@ -7662,7 +7664,8 @@
// Note that TypeCheckSlowPathX86 uses this "out" register too.
locations->SetOut(Location::RequiresRegister());
// When read barriers are enabled, we need a temporary register for some cases.
- locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorX86::VisitInstanceOf(HInstanceOf* instruction) {
@@ -7673,7 +7676,7 @@
Location cls = locations->InAt(1);
Location out_loc = locations->Out();
Register out = out_loc.AsRegister<Register>();
- const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ const size_t num_temps = NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 1u);
Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -7693,7 +7696,7 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7716,7 +7719,7 @@
case TypeCheckKind::kAbstractClassCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7752,7 +7755,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7789,7 +7792,7 @@
case TypeCheckKind::kArrayObjectCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7912,7 +7915,7 @@
void LocationsBuilderX86::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -7927,7 +7930,7 @@
} else {
locations->SetInAt(1, Location::Any());
}
- locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorX86::VisitCheckCast(HCheckCast* instruction) {
@@ -7938,7 +7941,7 @@
Location cls = locations->InAt(1);
Location temp_loc = locations->GetTemp(0);
Register temp = temp_loc.AsRegister<Register>();
- const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ const size_t num_temps = NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_GE(num_temps, 1u);
DCHECK_LE(num_temps, 2u);
Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
@@ -7951,7 +7954,7 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
+ bool is_type_check_slow_path_fatal = codegen_->IsTypeCheckSlowPathFatal(instruction);
SlowPathCode* type_check_slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86(
instruction, is_type_check_slow_path_fatal);
@@ -8374,7 +8377,7 @@
ReadBarrierOption read_barrier_option) {
Register out_reg = out.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -8408,7 +8411,7 @@
Register out_reg = out.AsRegister<Register>();
Register obj_reg = obj.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -8436,7 +8439,7 @@
ReadBarrierOption read_barrier_option) {
Register root_reg = root.AsRegister<Register>();
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
@@ -8500,8 +8503,7 @@
Register obj,
uint32_t offset,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// /* HeapReference<Object> */ ref = *(obj + offset)
Address src(obj, offset);
@@ -8514,8 +8516,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
@@ -8533,8 +8534,7 @@
bool needs_null_check,
bool always_update_field,
Register* temp) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// In slow path based read barriers, the read barrier call is
// inserted after the original load. However, in fast path based
@@ -8614,7 +8614,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the reference load.
//
@@ -8641,7 +8641,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (gUseReadBarrier) {
+ if (EmitReadBarrier()) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -8656,7 +8656,7 @@
void CodeGeneratorX86::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 58d7393..a27cc32 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -512,12 +512,12 @@
: SlowPathCode(instruction),
ref_(ref),
unpoison_ref_before_marking_(unpoison_ref_before_marking) {
- DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override { return "ReadBarrierMarkSlowPathX86_64"; }
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -603,7 +603,6 @@
unpoison_ref_before_marking_(unpoison_ref_before_marking),
temp1_(temp1),
temp2_(temp2) {
- DCHECK(gUseReadBarrier);
}
const char* GetDescription() const override {
@@ -611,6 +610,7 @@
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
CpuRegister ref_cpu_reg = ref_.AsRegister<CpuRegister>();
Register ref_reg = ref_cpu_reg.AsRegister();
@@ -763,7 +763,6 @@
obj_(obj),
offset_(offset),
index_(index) {
- DCHECK(gUseReadBarrier);
// If `obj` is equal to `out` or `ref`, it means the initial
// object has been overwritten by (or after) the heap object
// reference load to be instrumented, e.g.:
@@ -778,6 +777,7 @@
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
CpuRegister reg_out = out_.AsRegister<CpuRegister>();
@@ -940,10 +940,10 @@
public:
ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root)
: SlowPathCode(instruction), out_(out), root_(root) {
- DCHECK(gUseReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitReadBarrier());
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
@@ -5182,7 +5182,7 @@
instruction->IsPredicatedInstanceFieldGet());
bool object_field_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
bool is_predicated = instruction->IsPredicatedInstanceFieldGet();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
@@ -5233,7 +5233,7 @@
if (load_type == DataType::Type::kReference) {
// /* HeapReference<Object> */ out = *(base + offset)
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateFieldLoadWithBakerReadBarrier call.
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -5701,7 +5701,7 @@
void LocationsBuilderX86_64::VisitArrayGet(HArrayGet* instruction) {
bool object_array_get_with_read_barrier =
- gUseReadBarrier && (instruction->GetType() == DataType::Type::kReference);
+ (instruction->GetType() == DataType::Type::kReference) && codegen_->EmitReadBarrier();
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction,
object_array_get_with_read_barrier
@@ -5739,7 +5739,7 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
// /* HeapReference<Object> */ out =
// *(obj + data_offset + index * sizeof(HeapReference<Object>))
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Note that a potential implicit null check is handled in this
// CodeGeneratorX86_64::GenerateArrayLoadWithBakerReadBarrier call.
codegen_->GenerateArrayLoadWithBakerReadBarrier(
@@ -6543,7 +6543,7 @@
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage);
- const bool requires_read_barrier = gUseReadBarrier && !cls->IsInBootImage();
+ const bool requires_read_barrier = !cls->IsInBootImage() && codegen_->EmitReadBarrier();
LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
@@ -6559,11 +6559,11 @@
if (load_kind == HLoadClass::LoadKind::kBssEntry ||
load_kind == HLoadClass::LoadKind::kBssEntryPublic ||
load_kind == HLoadClass::LoadKind::kBssEntryPackage) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the type resolution and/or initialization to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -6595,7 +6595,7 @@
CpuRegister out = out_loc.AsRegister<CpuRegister>();
const ReadBarrierOption read_barrier_option =
- cls->IsInBootImage() ? kWithoutReadBarrier : GetCompilerReadBarrierOption();
+ cls->IsInBootImage() ? kWithoutReadBarrier : codegen_->GetCompilerReadBarrierOption();
bool generate_null_check = false;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
@@ -6735,18 +6735,18 @@
}
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
- LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
+ LocationSummary::CallKind call_kind = codegen_->GetLoadStringCallKind(load);
LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
if (load->GetLoadKind() == HLoadString::LoadKind::kRuntimeCall) {
locations->SetOut(Location::RegisterLocation(RAX));
} else {
locations->SetOut(Location::RequiresRegister());
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
- if (!gUseReadBarrier || kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
+ // For non-Baker read barrier we have a temp-clobbering call.
+ } else {
// Rely on the pResolveString to save everything.
locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
- } else {
- // For non-Baker read barrier we have a temp-clobbering call.
}
}
}
@@ -6790,7 +6790,8 @@
/* no_rip= */ false);
Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
// /* GcRoot<mirror::Class> */ out = *address /* PC-relative */
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, GetCompilerReadBarrierOption());
+ GenerateGcRootFieldLoad(
+ load, out_loc, address, fixup_label, codegen_->GetCompilerReadBarrierOption());
// No need for memory fence, thanks to the x86-64 memory model.
SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
@@ -6811,7 +6812,8 @@
Label* fixup_label = codegen_->NewJitRootStringPatch(
load->GetDexFile(), load->GetStringIndex(), load->GetString());
// /* GcRoot<mirror::String> */ out = *address
- GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, GetCompilerReadBarrierOption());
+ GenerateGcRootFieldLoad(
+ load, out_loc, address, fixup_label, codegen_->GetCompilerReadBarrierOption());
return;
}
default:
@@ -6862,8 +6864,8 @@
}
// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
- if (gUseReadBarrier &&
+static size_t NumberOfInstanceOfTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
+ if (emit_read_barrier &&
!kUseBakerReadBarrier &&
(type_check_kind == TypeCheckKind::kAbstractClassCheck ||
type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
@@ -6876,11 +6878,11 @@
// Interface case has 2 temps, one for holding the number of interfaces, one for the current
// interface pointer, the current interface is compared in memory.
// The other checks have one temp for loading the object's class.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
+static size_t NumberOfCheckCastTemps(bool emit_read_barrier, TypeCheckKind type_check_kind) {
if (type_check_kind == TypeCheckKind::kInterfaceCheck) {
return 2;
}
- return 1 + NumberOfInstanceOfTemps(type_check_kind);
+ return 1 + NumberOfInstanceOfTemps(emit_read_barrier, type_check_kind);
}
void LocationsBuilderX86_64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -6892,7 +6894,7 @@
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
case TypeCheckKind::kArrayObjectCheck: {
- bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ bool needs_read_barrier = codegen_->InstanceOfNeedsReadBarrier(instruction);
call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
@@ -6921,7 +6923,8 @@
}
// Note that TypeCheckSlowPathX86_64 uses this "out" register too.
locations->SetOut(Location::RequiresRegister());
- locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
+ locations->AddRegisterTemps(
+ NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorX86_64::VisitInstanceOf(HInstanceOf* instruction) {
@@ -6932,7 +6935,7 @@
Location cls = locations->InAt(1);
Location out_loc = locations->Out();
CpuRegister out = out_loc.AsRegister<CpuRegister>();
- const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
+ const size_t num_temps = NumberOfInstanceOfTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_LE(num_temps, 1u);
Location maybe_temp_loc = (num_temps >= 1u) ? locations->GetTemp(0) : Location::NoLocation();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
@@ -6952,7 +6955,7 @@
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -6980,7 +6983,7 @@
case TypeCheckKind::kAbstractClassCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7016,7 +7019,7 @@
case TypeCheckKind::kClassHierarchyCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7053,7 +7056,7 @@
case TypeCheckKind::kArrayObjectCheck: {
ReadBarrierOption read_barrier_option =
- CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
+ codegen_->ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
@@ -7182,7 +7185,7 @@
void LocationsBuilderX86_64::VisitCheckCast(HCheckCast* instruction) {
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
+ LocationSummary::CallKind call_kind = codegen_->GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -7197,7 +7200,7 @@
} else {
locations->SetInAt(1, Location::Any());
}
- locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
+ locations->AddRegisterTemps(NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind));
}
void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) {
@@ -7208,7 +7211,7 @@
Location cls = locations->InAt(1);
Location temp_loc = locations->GetTemp(0);
CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
- const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
+ const size_t num_temps = NumberOfCheckCastTemps(codegen_->EmitReadBarrier(), type_check_kind);
DCHECK_GE(num_temps, 1u);
DCHECK_LE(num_temps, 2u);
Location maybe_temp2_loc = (num_temps >= 2u) ? locations->GetTemp(1) : Location::NoLocation();
@@ -7221,7 +7224,7 @@
const uint32_t object_array_data_offset =
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
- bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
+ bool is_type_check_slow_path_fatal = codegen_->IsTypeCheckSlowPathFatal(instruction);
SlowPathCode* type_check_slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathX86_64(
instruction, is_type_check_slow_path_fatal);
@@ -7616,7 +7619,7 @@
ReadBarrierOption read_barrier_option) {
CpuRegister out_reg = out.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(out + offset)
@@ -7650,7 +7653,7 @@
CpuRegister out_reg = out.AsRegister<CpuRegister>();
CpuRegister obj_reg = obj.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- CHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Load with fast path based Baker's read barrier.
// /* HeapReference<Object> */ out = *(obj + offset)
@@ -7678,7 +7681,7 @@
ReadBarrierOption read_barrier_option) {
CpuRegister root_reg = root.AsRegister<CpuRegister>();
if (read_barrier_option == kWithReadBarrier) {
- DCHECK(gUseReadBarrier);
+ DCHECK(codegen_->EmitReadBarrier());
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used:
@@ -7742,8 +7745,7 @@
CpuRegister obj,
uint32_t offset,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// /* HeapReference<Object> */ ref = *(obj + offset)
Address src(obj, offset);
@@ -7756,8 +7758,7 @@
uint32_t data_offset,
Location index,
bool needs_null_check) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
static_assert(
sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
@@ -7776,8 +7777,7 @@
bool always_update_field,
CpuRegister* temp1,
CpuRegister* temp2) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(EmitBakerReadBarrier());
// In slow path based read barriers, the read barrier call is
// inserted after the original load. However, in fast path based
@@ -7858,7 +7858,7 @@
Location obj,
uint32_t offset,
Location index) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the reference load.
//
@@ -7885,7 +7885,7 @@
Location obj,
uint32_t offset,
Location index) {
- if (gUseReadBarrier) {
+ if (EmitReadBarrier()) {
// Baker's read barriers shall be handled by the fast path
// (CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier).
DCHECK(!kUseBakerReadBarrier);
@@ -7900,7 +7900,7 @@
void CodeGeneratorX86_64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
Location out,
Location root) {
- DCHECK(gUseReadBarrier);
+ DCHECK(EmitReadBarrier());
// Insert a slow path based read barrier *after* the GC root load.
//
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index 05a518d..be4371f 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -33,8 +33,9 @@
class InstructionSimplifierArmVisitor final : public HGraphVisitor {
public:
- InstructionSimplifierArmVisitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
+ InstructionSimplifierArmVisitor(
+ HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), codegen_(codegen), stats_(stats) {}
private:
void RecordSimplification() {
@@ -78,6 +79,7 @@
void VisitTypeConversion(HTypeConversion* instruction) override;
void VisitUShr(HUShr* instruction) override;
+ CodeGenerator* codegen_;
OptimizingCompilerStats* stats_;
};
@@ -217,7 +219,8 @@
return;
}
- if (TryExtractArrayAccessAddress(instruction,
+ if (TryExtractArrayAccessAddress(codegen_,
+ instruction,
instruction->GetArray(),
instruction->GetIndex(),
data_offset)) {
@@ -238,7 +241,8 @@
return;
}
- if (TryExtractArrayAccessAddress(instruction,
+ if (TryExtractArrayAccessAddress(codegen_,
+ instruction,
instruction->GetArray(),
instruction->GetIndex(),
data_offset)) {
@@ -300,7 +304,7 @@
}
bool InstructionSimplifierArm::Run() {
- InstructionSimplifierArmVisitor visitor(graph_, stats_);
+ InstructionSimplifierArmVisitor visitor(graph_, codegen_, stats_);
visitor.VisitReversePostOrder();
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier_arm.h b/compiler/optimizing/instruction_simplifier_arm.h
index 0517e4f..25cea7c 100644
--- a/compiler/optimizing/instruction_simplifier_arm.h
+++ b/compiler/optimizing/instruction_simplifier_arm.h
@@ -22,16 +22,23 @@
#include "optimization.h"
namespace art HIDDEN {
+
+class CodeGenerator;
+
namespace arm {
class InstructionSimplifierArm : public HOptimization {
public:
- InstructionSimplifierArm(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, kInstructionSimplifierArmPassName, stats) {}
+ InstructionSimplifierArm(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierArmPassName, stats),
+ codegen_(codegen) {}
static constexpr const char* kInstructionSimplifierArmPassName = "instruction_simplifier_arm";
bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
};
} // namespace arm
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 671900b..2c191dc 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -33,8 +33,9 @@
class InstructionSimplifierArm64Visitor final : public HGraphVisitor {
public:
- InstructionSimplifierArm64Visitor(HGraph* graph, OptimizingCompilerStats* stats)
- : HGraphVisitor(graph), stats_(stats) {}
+ InstructionSimplifierArm64Visitor(
+ HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HGraphVisitor(graph), codegen_(codegen), stats_(stats) {}
private:
void RecordSimplification() {
@@ -84,6 +85,7 @@
void VisitVecLoad(HVecLoad* instruction) override;
void VisitVecStore(HVecStore* instruction) override;
+ CodeGenerator* codegen_;
OptimizingCompilerStats* stats_;
};
@@ -198,7 +200,8 @@
void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
size_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
- if (TryExtractArrayAccessAddress(instruction,
+ if (TryExtractArrayAccessAddress(codegen_,
+ instruction,
instruction->GetArray(),
instruction->GetIndex(),
data_offset)) {
@@ -209,7 +212,8 @@
void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
size_t access_size = DataType::Size(instruction->GetComponentType());
size_t data_offset = mirror::Array::DataOffset(access_size).Uint32Value();
- if (TryExtractArrayAccessAddress(instruction,
+ if (TryExtractArrayAccessAddress(codegen_,
+ instruction,
instruction->GetArray(),
instruction->GetIndex(),
data_offset)) {
@@ -284,7 +288,7 @@
size_t size = DataType::Size(instruction->GetPackedType());
size_t offset = mirror::Array::DataOffset(size).Uint32Value();
if (TryExtractArrayAccessAddress(
- instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
+ codegen_, instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
RecordSimplification();
}
}
@@ -298,14 +302,14 @@
size_t size = DataType::Size(instruction->GetPackedType());
size_t offset = mirror::Array::DataOffset(size).Uint32Value();
if (TryExtractArrayAccessAddress(
- instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
+ codegen_, instruction, instruction->GetArray(), instruction->GetIndex(), offset)) {
RecordSimplification();
}
}
}
bool InstructionSimplifierArm64::Run() {
- InstructionSimplifierArm64Visitor visitor(graph_, stats_);
+ InstructionSimplifierArm64Visitor visitor(graph_, codegen_, stats_);
visitor.VisitReversePostOrder();
return true;
}
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 374638a..5c57484 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -22,16 +22,23 @@
#include "optimization.h"
namespace art HIDDEN {
+
+class CodeGenerator;
+
namespace arm64 {
class InstructionSimplifierArm64 : public HOptimization {
public:
- InstructionSimplifierArm64(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, kInstructionSimplifierArm64PassName, stats) {}
+ InstructionSimplifierArm64(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
+ : HOptimization(graph, kInstructionSimplifierArm64PassName, stats),
+ codegen_(codegen) {}
static constexpr const char* kInstructionSimplifierArm64PassName = "instruction_simplifier_arm64";
bool Run() override;
+
+ private:
+ CodeGenerator* codegen_;
};
} // namespace arm64
diff --git a/compiler/optimizing/instruction_simplifier_shared.cc b/compiler/optimizing/instruction_simplifier_shared.cc
index 34daae2..50ea2b9 100644
--- a/compiler/optimizing/instruction_simplifier_shared.cc
+++ b/compiler/optimizing/instruction_simplifier_shared.cc
@@ -16,6 +16,7 @@
#include "instruction_simplifier_shared.h"
+#include "code_generator.h"
#include "mirror/array-inl.h"
namespace art HIDDEN {
@@ -229,7 +230,8 @@
}
-bool TryExtractArrayAccessAddress(HInstruction* access,
+bool TryExtractArrayAccessAddress(CodeGenerator* codegen,
+ HInstruction* access,
HInstruction* array,
HInstruction* index,
size_t data_offset) {
@@ -244,8 +246,7 @@
// The access may require a runtime call or the original array pointer.
return false;
}
- if (gUseReadBarrier &&
- !kUseBakerReadBarrier &&
+ if (codegen->EmitNonBakerReadBarrier() &&
access->IsArrayGet() &&
access->GetType() == DataType::Type::kReference) {
// For object arrays, the non-Baker read barrier instrumentation requires
diff --git a/compiler/optimizing/instruction_simplifier_shared.h b/compiler/optimizing/instruction_simplifier_shared.h
index 01489f8..68148cf 100644
--- a/compiler/optimizing/instruction_simplifier_shared.h
+++ b/compiler/optimizing/instruction_simplifier_shared.h
@@ -22,6 +22,8 @@
namespace art HIDDEN {
+class CodeGenerator;
+
namespace helpers {
inline bool CanFitInShifterOperand(HInstruction* instruction) {
@@ -64,7 +66,8 @@
// a negated bitwise instruction.
bool TryMergeNegatedInput(HBinaryOperation* op);
-bool TryExtractArrayAccessAddress(HInstruction* access,
+bool TryExtractArrayAccessAddress(CodeGenerator* codegen,
+ HInstruction* access,
HInstruction* array,
HInstruction* index,
size_t data_offset);
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index d84e1cb..b3615e6 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -391,8 +391,8 @@
locations->SetOut(Location::RequiresRegister());
}
-void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke) {
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+void IntrinsicVisitor::CreateReferenceRefersToLocations(HInvoke* invoke, CodeGenerator* codegen) {
+ if (codegen->EmitNonBakerReadBarrier()) {
// Unimplemented for non-Baker read barrier.
return;
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index a16b93d..177eedb 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -136,7 +136,7 @@
static MemberOffset GetReferenceDisableIntrinsicOffset();
static MemberOffset GetReferenceSlowPathEnabledOffset();
static void CreateReferenceGetReferentLocations(HInvoke* invoke, CodeGenerator* codegen);
- static void CreateReferenceRefersToLocations(HInvoke* invoke);
+ static void CreateReferenceRefersToLocations(HInvoke* invoke, CodeGenerator* codegen);
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index f5c6340..33ca1b3 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -91,11 +91,10 @@
public:
ReadBarrierSystemArrayCopySlowPathARM64(HInstruction* instruction, Location tmp)
: SlowPathCodeARM64(instruction), tmp_(tmp) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen_in) override {
+ DCHECK(codegen_in->EmitBakerReadBarrier());
CodeGeneratorARM64* codegen = down_cast<CodeGeneratorARM64*>(codegen_in);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -710,7 +709,7 @@
Location trg_loc = locations->Out();
Register trg = RegisterFrom(trg_loc, type);
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) {
// UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case.
Register temp = WRegisterFrom(locations->GetTemp(0));
MacroAssembler* masm = codegen->GetVIXLAssembler();
@@ -752,8 +751,11 @@
return false;
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorARM64* codegen) {
+ bool can_call =
+ codegen->EmitReadBarrier() && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -793,31 +795,31 @@
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetLongAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeGetObjectAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitUnsafeGet(HInvoke* invoke) {
@@ -1094,8 +1096,10 @@
codegen_);
}
-static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke);
+static void CreateUnsafeCASLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorARM64* codegen) {
+ const bool can_call = codegen->EmitReadBarrier() && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -1447,7 +1451,7 @@
vixl::aarch64::Label* exit_loop = &exit_loop_label;
vixl::aarch64::Label* cmp_failure = &exit_loop_label;
- if (gUseReadBarrier && type == DataType::Type::kReference) {
+ if (type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// We need to store the `old_value` in a non-scratch register to make sure
// the read barrier in the slow path does not clobber it.
old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path.
@@ -1515,19 +1519,19 @@
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetInt(HInvoke* invoke) {
- CreateUnsafeCASLocations(allocator_, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) {
- CreateUnsafeCASLocations(allocator_, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
- CreateUnsafeCASLocations(allocator_, invoke);
- if (gUseReadBarrier) {
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_);
+ if (codegen_->EmitReadBarrier()) {
// We need two non-scratch temporary registers for read barrier.
LocationSummary* locations = invoke->GetLocations();
if (kUseBakerReadBarrier) {
@@ -1577,7 +1581,7 @@
}
void IntrinsicCodeGeneratorARM64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -2897,7 +2901,7 @@
void IntrinsicLocationsBuilderARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
@@ -2949,7 +2953,7 @@
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Temporary register IP0, obtained from the VIXL scratch register
// pool, cannot be used in ReadBarrierSystemArrayCopySlowPathARM64
// (because that register is clobbered by ReadBarrierMarkRegX
@@ -2967,7 +2971,7 @@
void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
MacroAssembler* masm = GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3074,7 +3078,7 @@
UseScratchRegisterScope temps(masm);
Location temp3_loc; // Used only for Baker read barrier.
Register temp3;
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
temp3_loc = locations->GetTemp(2);
temp3 = WRegisterFrom(temp3_loc);
} else {
@@ -3087,7 +3091,7 @@
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
@@ -3248,7 +3252,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
temp1_loc,
@@ -3298,7 +3302,7 @@
__ Cbz(WRegisterFrom(length), &done);
}
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// TODO: Also convert this intrinsic to the IsGcMarking strategy?
// SystemArrayCopy implementation for Baker read barriers (see
@@ -3534,7 +3538,7 @@
void IntrinsicLocationsBuilderARM64::VisitReferenceGetReferent(HInvoke* invoke) {
IntrinsicVisitor::CreateReferenceGetReferentLocations(invoke, codegen_);
- if (gUseReadBarrier && kUseBakerReadBarrier && invoke->GetLocations() != nullptr) {
+ if (codegen_->EmitBakerReadBarrier() && invoke->GetLocations() != nullptr) {
invoke->GetLocations()->AddTemp(Location::RequiresRegister());
}
}
@@ -3549,7 +3553,7 @@
SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke);
codegen_->AddSlowPath(slow_path);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
// Check self->GetWeakRefAccessEnabled().
UseScratchRegisterScope temps(masm);
Register temp = temps.AcquireW();
@@ -3576,7 +3580,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
WRegisterFrom(obj),
@@ -3594,7 +3598,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitReferenceRefersTo(HInvoke* invoke) {
- IntrinsicVisitor::CreateReferenceRefersToLocations(invoke);
+ IntrinsicVisitor::CreateReferenceRefersToLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitReferenceRefersTo(HInvoke* invoke) {
@@ -3616,7 +3620,7 @@
__ Cmp(tmp, other);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
DCHECK(kUseBakerReadBarrier);
vixl::aarch64::Label calculate_result;
@@ -4712,7 +4716,7 @@
field.X(),
ArtField::DeclaringClassOffset().Int32Value(),
/*fixup_label=*/nullptr,
- GetCompilerReadBarrierOption());
+ codegen->GetCompilerReadBarrierOption());
}
}
} else {
@@ -4732,7 +4736,8 @@
}
}
-static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) {
+static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
+ CodeGeneratorARM64* codegen) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
@@ -4766,7 +4771,7 @@
}
// Add a temporary for offset.
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
// To preserve the offset value across the non-Baker read barrier slow path
// for loading the declaring class, use a fixed callee-save register.
@@ -4783,13 +4788,13 @@
return locations;
}
-static void CreateVarHandleGetLocations(HInvoke* invoke) {
+static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorARM64* codegen) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
invoke->GetType() == DataType::Type::kReference &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
@@ -4799,7 +4804,7 @@
return;
}
- CreateVarHandleCommonLocations(invoke);
+ CreateVarHandleCommonLocations(invoke, codegen);
}
static void GenerateVarHandleGet(HInvoke* invoke,
@@ -4829,7 +4834,7 @@
DCHECK(use_load_acquire || order == std::memory_order_relaxed);
// Load the value from the target location.
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) {
// Piggy-back on the field load path using introspection for the Baker read barrier.
// The `target.offset` is a temporary, use it for field address.
Register tmp_ptr = target.offset.X();
@@ -4882,7 +4887,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGet(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGet(HInvoke* invoke) {
@@ -4890,7 +4895,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetOpaque(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetOpaque(HInvoke* invoke) {
@@ -4898,7 +4903,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAcquire(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAcquire(HInvoke* invoke) {
@@ -4906,20 +4911,20 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetVolatile(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetVolatile(HInvoke* invoke) {
GenerateVarHandleGet(invoke, codegen_, std::memory_order_seq_cst);
}
-static void CreateVarHandleSetLocations(HInvoke* invoke) {
+static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorARM64* codegen) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- CreateVarHandleCommonLocations(invoke);
+ CreateVarHandleCommonLocations(invoke, codegen);
}
static void GenerateVarHandleSet(HInvoke* invoke,
@@ -4991,7 +4996,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleSet(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleSet(HInvoke* invoke) {
@@ -4999,7 +5004,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleSetOpaque(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleSetOpaque(HInvoke* invoke) {
@@ -5007,7 +5012,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleSetRelease(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleSetRelease(HInvoke* invoke) {
@@ -5015,14 +5020,16 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleSetVolatile(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleSetVolatile(HInvoke* invoke) {
GenerateVarHandleSet(invoke, codegen_, std::memory_order_seq_cst);
}
-static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, bool return_success) {
+static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke,
+ CodeGeneratorARM64* codegen,
+ bool return_success) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
@@ -5030,8 +5037,7 @@
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u);
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field. This breaks the read barriers
// in slow path in different ways. The marked old value may not actually be a to-space
@@ -5042,9 +5048,9 @@
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
// We need callee-save registers for both the class object and offset instead of
// the temporaries reserved in CreateVarHandleCommonLocations().
static_assert(POPCOUNT(kArm64CalleeSaveRefSpills) >= 2u);
@@ -5085,7 +5091,7 @@
locations->AddTemp(Location::RequiresRegister());
}
}
- if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// Add a temporary for the `old_value_temp` in slow path.
locations->AddTemp(Location::RequiresRegister());
}
@@ -5151,7 +5157,7 @@
// except for references that need the offset for the read barrier.
UseScratchRegisterScope temps(masm);
Register tmp_ptr = target.offset.X();
- if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
tmp_ptr = temps.AcquireX();
}
__ Add(tmp_ptr, target.object.X(), target.offset.X());
@@ -5234,7 +5240,7 @@
vixl::aarch64::Label* exit_loop = &exit_loop_label;
vixl::aarch64::Label* cmp_failure = &exit_loop_label;
- if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// The `old_value_temp` is used first for the marked `old_value` and then for the unmarked
// reloaded old value for subsequent CAS in the slow path. It cannot be a scratch register.
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
@@ -5301,7 +5307,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
@@ -5310,7 +5316,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
@@ -5319,7 +5325,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
@@ -5328,7 +5334,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleCompareAndSet(HInvoke* invoke) {
@@ -5337,7 +5343,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
@@ -5346,7 +5352,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
@@ -5355,7 +5361,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
@@ -5364,7 +5370,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
@@ -5373,21 +5379,21 @@
}
static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke,
+ CodeGeneratorARM64* codegen,
GetAndUpdateOp get_and_update_op) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- invoke->GetType() == DataType::Type::kReference) {
+ if (invoke->GetType() == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field, thus seeing the new value
// that we have just stored. (And it also gets the memory visibility wrong.) b/173104084
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
size_t old_temp_count = locations->GetTempCount();
DCHECK_EQ(old_temp_count, (GetExpectedVarHandleCoordinatesCount(invoke) == 0) ? 2u : 1u);
@@ -5455,8 +5461,7 @@
// except for references that need the offset for the non-Baker read barrier.
UseScratchRegisterScope temps(masm);
Register tmp_ptr = target.offset.X();
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
tmp_ptr = temps.AcquireX();
}
__ Add(tmp_ptr, target.object.X(), target.offset.X());
@@ -5485,8 +5490,7 @@
// the new value unless it is zero bit pattern (+0.0f or +0.0) and need another one
// in GenerateGetAndUpdate(). We have allocated a normal temporary to handle that.
old_value = CPURegisterFrom(locations->GetTemp(1u), load_store_type);
- } else if ((gUseReadBarrier && kUseBakerReadBarrier) &&
- value_type == DataType::Type::kReference) {
+ } else if (value_type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) {
// Load the old value initially to a scratch register.
// We shall move it to `out` later with a read barrier.
old_value = temps.AcquireW();
@@ -5533,7 +5537,7 @@
__ Sxtb(out.W(), old_value.W());
} else if (value_type == DataType::Type::kInt16) {
__ Sxth(out.W(), old_value.W());
- } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
if (kUseBakerReadBarrier) {
codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(out.W(), old_value.W());
} else {
@@ -5554,7 +5558,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndSet(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndSet(HInvoke* invoke) {
@@ -5562,7 +5566,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
@@ -5570,7 +5574,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
@@ -5578,7 +5582,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndAdd(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndAdd(HInvoke* invoke) {
@@ -5586,7 +5590,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
@@ -5594,7 +5598,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
@@ -5602,7 +5606,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
@@ -5610,7 +5614,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
@@ -5618,7 +5622,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
@@ -5626,7 +5630,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
@@ -5634,7 +5638,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
@@ -5642,7 +5646,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
@@ -5650,7 +5654,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
@@ -5658,7 +5662,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
@@ -5666,7 +5670,7 @@
}
void IntrinsicLocationsBuilderARM64::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARM64::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index a63b32a..0cec278 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -120,11 +120,10 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathARMVIXL(HInstruction* instruction)
: SlowPathCodeARMVIXL(instruction) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitBakerReadBarrier());
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
ArmVIXLAssembler* assembler = arm_codegen->GetAssembler();
LocationSummary* locations = instruction_->GetLocations();
@@ -1242,7 +1241,7 @@
void IntrinsicLocationsBuilderARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
@@ -1265,7 +1264,7 @@
if (length != nullptr && !assembler_->ShifterOperandCanAlwaysHold(length->GetValue())) {
locations->SetInAt(4, Location::RequiresRegister());
}
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// Temporary register IP cannot be used in
// ReadBarrierSystemArrayCopySlowPathARM (because that register
// is clobbered by ReadBarrierMarkRegX entry points). Get an extra
@@ -1339,7 +1338,7 @@
void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
ArmVIXLAssembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1453,7 +1452,7 @@
// or the destination is Object[]. If none of these checks succeed, we go to the
// slow path.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
@@ -1584,7 +1583,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, temp2_loc, /* needs_null_check= */ false);
@@ -1621,7 +1620,7 @@
__ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target= */ false);
}
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// TODO: Also convert this intrinsic to the IsGcMarking strategy?
// SystemArrayCopy implementation for Baker read barriers (see
@@ -2511,7 +2510,7 @@
SlowPathCodeARMVIXL* slow_path = new (GetAllocator()) IntrinsicSlowPathARMVIXL(invoke);
codegen_->AddSlowPath(slow_path);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
// Check self->GetWeakRefAccessEnabled().
UseScratchRegisterScope temps(assembler->GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
@@ -2539,7 +2538,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
RegisterFrom(obj),
@@ -2560,7 +2559,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) {
- IntrinsicVisitor::CreateReferenceRefersToLocations(invoke);
+ IntrinsicVisitor::CreateReferenceRefersToLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitReferenceRefersTo(HInvoke* invoke) {
@@ -2587,7 +2586,7 @@
assembler->MaybeUnpoisonHeapReference(tmp);
codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); // `referent` is volatile.
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
DCHECK(kUseBakerReadBarrier);
vixl32::Label calculate_result;
@@ -2613,7 +2612,7 @@
__ Bind(&calculate_result);
} else {
- DCHECK(!gUseReadBarrier);
+ DCHECK(!codegen_->EmitReadBarrier());
__ Sub(out, tmp, other);
}
@@ -2732,7 +2731,7 @@
}
break;
case DataType::Type::kReference:
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen->EmitBakerReadBarrier()) {
// Piggy-back on the field load path using introspection for the Baker read barrier.
vixl32::Register temp = RegisterFrom(maybe_temp);
__ Add(temp, base, offset);
@@ -2777,7 +2776,7 @@
codegen->GenerateMemoryBarrier(
seq_cst_barrier ? MemBarrierKind::kAnyAny : MemBarrierKind::kLoadAny);
}
- if (type == DataType::Type::kReference && !(gUseReadBarrier && kUseBakerReadBarrier)) {
+ if (type == DataType::Type::kReference && !codegen->EmitBakerReadBarrier()) {
Location base_loc = LocationFrom(base);
Location index_loc = LocationFrom(offset);
codegen->MaybeGenerateReadBarrierSlow(invoke, out, out, base_loc, /* offset=*/ 0u, index_loc);
@@ -2802,7 +2801,8 @@
CodeGeneratorARMVIXL* codegen,
DataType::Type type,
bool atomic) {
- bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call =
+ codegen->EmitReadBarrier() && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
ArenaAllocator* allocator = invoke->GetBlock()->GetGraph()->GetAllocator();
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
@@ -2818,7 +2818,7 @@
locations->SetInAt(2, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister(),
(can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
- if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
+ if ((type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) ||
(type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) {
// We need a temporary register for the read barrier marking slow
// path in CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier,
@@ -2837,7 +2837,7 @@
vixl32::Register offset = LowRegisterFrom(locations->InAt(2)); // Long offset, lo part only.
Location out = locations->Out();
Location maybe_temp = Location::NoLocation();
- if ((gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) ||
+ if ((type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) ||
(type == DataType::Type::kInt64 && Use64BitExclusiveLoadStore(atomic, codegen))) {
maybe_temp = locations->GetTemp(0);
}
@@ -3470,7 +3470,7 @@
// branch goes to the read barrier slow path that clobbers `success` anyway.
bool init_failure_for_cmp =
success.IsValid() &&
- !(gUseReadBarrier && type == DataType::Type::kReference && expected.IsRegister());
+ !(type == DataType::Type::kReference && codegen->EmitReadBarrier() && expected.IsRegister());
// Instruction scheduling: Loading a constant between LDREX* and using the loaded value
// is essentially free, so prepare the failure value here if we can.
bool init_failure_for_cmp_early =
@@ -3654,8 +3654,10 @@
SlowPathCodeARMVIXL* update_old_value_slow_path_;
};
-static void CreateUnsafeCASLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- const bool can_call = gUseReadBarrier && IsUnsafeCASObject(invoke);
+static void CreateUnsafeCASLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorARMVIXL* codegen) {
+ const bool can_call = codegen->EmitReadBarrier() && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -3706,7 +3708,7 @@
vixl32::Label* exit_loop = &exit_loop_label;
vixl32::Label* cmp_failure = &exit_loop_label;
- if (gUseReadBarrier && type == DataType::Type::kReference) {
+ if (type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// If marking, check if the stored reference is a from-space reference to the same
// object as the to-space reference `expected`. If so, perform a custom CAS loop.
ReadBarrierCasSlowPathARMVIXL* slow_path =
@@ -3766,15 +3768,15 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetInt(HInvoke* invoke) {
- CreateUnsafeCASLocations(allocator_, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers (b/173104084).
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
- CreateUnsafeCASLocations(allocator_, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_);
}
void IntrinsicCodeGeneratorARMVIXL::VisitUnsafeCASInt(HInvoke* invoke) {
@@ -3798,7 +3800,7 @@
}
void IntrinsicCodeGeneratorARMVIXL::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers (b/173104084).
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
GenUnsafeCas(invoke, DataType::Type::kReference, codegen_);
}
@@ -4351,7 +4353,7 @@
LocationFrom(target.object),
field,
ArtField::DeclaringClassOffset().Int32Value(),
- GetCompilerReadBarrierOption());
+ codegen->GetCompilerReadBarrierOption());
}
}
} else {
@@ -4371,7 +4373,8 @@
}
}
-static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) {
+static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
+ CodeGeneratorARMVIXL* codegen) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
@@ -4403,7 +4406,7 @@
}
// Add a temporary for offset.
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
// To preserve the offset value across the non-Baker read barrier slow path
// for loading the declaring class, use a fixed callee-save register.
@@ -4428,7 +4431,7 @@
return;
}
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
invoke->GetType() == DataType::Type::kReference &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
@@ -4438,7 +4441,7 @@
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
DataType::Type type = invoke->GetType();
if (type == DataType::Type::kFloat64 && Use64BitExclusiveLoadStore(atomic, codegen)) {
@@ -4476,7 +4479,7 @@
Location maybe_temp = Location::NoLocation();
Location maybe_temp2 = Location::NoLocation();
Location maybe_temp3 = Location::NoLocation();
- if (gUseReadBarrier && kUseBakerReadBarrier && type == DataType::Type::kReference) {
+ if (type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) {
// Reuse the offset temporary.
maybe_temp = LocationFrom(target.offset);
} else if (DataType::Is64BitType(type) && Use64BitExclusiveLoadStore(atomic, codegen)) {
@@ -4580,7 +4583,7 @@
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u);
@@ -4741,7 +4744,9 @@
GenerateVarHandleSet(invoke, codegen_, std::memory_order_seq_cst, /*atomic=*/ true);
}
-static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke, bool return_success) {
+static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke,
+ CodeGeneratorARMVIXL* codegen,
+ bool return_success) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
@@ -4749,8 +4754,7 @@
uint32_t number_of_arguments = invoke->GetNumberOfArguments();
DataType::Type value_type = GetDataTypeFromShorty(invoke, number_of_arguments - 1u);
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field. This breaks the read barriers
// in slow path in different ways. The marked old value may not actually be a to-space
@@ -4761,9 +4765,9 @@
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
// We need callee-save registers for both the class object and offset instead of
// the temporaries reserved in CreateVarHandleCommonLocations().
static_assert(POPCOUNT(kArmCalleeSaveRefSpills) >= 2u);
@@ -4799,7 +4803,7 @@
locations->AddRegisterTemps(2u);
}
}
- if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// Add a temporary for store result, also used for the `old_value_temp` in slow path.
locations->AddTemp(Location::RequiresRegister());
}
@@ -4930,7 +4934,7 @@
vixl32::Label* exit_loop = &exit_loop_label;
vixl32::Label* cmp_failure = &exit_loop_label;
- if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
// The `old_value_temp` is used first for the marked `old_value` and then for the unmarked
// reloaded old value for subsequent CAS in the slow path. This must not clobber `old_value`.
vixl32::Register old_value_temp = return_success ? RegisterFrom(out) : store_result;
@@ -5008,7 +5012,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
@@ -5017,7 +5021,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
@@ -5026,7 +5030,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ false);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ false);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
@@ -5035,7 +5039,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleCompareAndSet(HInvoke* invoke) {
@@ -5044,7 +5048,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
@@ -5053,7 +5057,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
@@ -5062,7 +5066,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
@@ -5071,7 +5075,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke, /*return_success=*/ true);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_, /*return_success=*/ true);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
@@ -5080,21 +5084,21 @@
}
static void CreateVarHandleGetAndUpdateLocations(HInvoke* invoke,
+ CodeGeneratorARMVIXL* codegen,
GetAndUpdateOp get_and_update_op) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- invoke->GetType() == DataType::Type::kReference) {
+ if (invoke->GetType() == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
// Unsupported for non-Baker read barrier because the artReadBarrierSlow() ignores
// the passed reference and reloads it from the field, thus seeing the new value
// that we have just stored. (And it also gets the memory visibility wrong.) b/173104084
return;
}
- LocationSummary* locations = CreateVarHandleCommonLocations(invoke);
+ LocationSummary* locations = CreateVarHandleCommonLocations(invoke, codegen);
// We can reuse the declaring class (if present) and offset temporary, except for
// non-Baker read barriers that need them for the slow path.
@@ -5107,8 +5111,7 @@
// Add temps needed to do the GenerateGetAndUpdate() with core registers.
size_t temps_needed = (value_type == DataType::Type::kFloat64) ? 5u : 3u;
locations->AddRegisterTemps(temps_needed - locations->GetTempCount());
- } else if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
- value_type == DataType::Type::kReference) {
+ } else if (value_type == DataType::Type::kReference && codegen->EmitNonBakerReadBarrier()) {
// We need to preserve the declaring class (if present) and offset for read barrier
// slow paths, so we must use a separate temporary for the exclusive store result.
locations->AddTemp(Location::RequiresRegister());
@@ -5213,7 +5216,7 @@
if (byte_swap) {
GenerateReverseBytes(assembler, DataType::Type::kInt32, arg, arg);
}
- } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
if (kUseBakerReadBarrier) {
// Load the old value initially to a temporary register.
// We shall move it to `out` later with a read barrier.
@@ -5296,7 +5299,7 @@
} else {
__ Vmov(SRegisterFrom(out), RegisterFrom(old_value));
}
- } else if (gUseReadBarrier && value_type == DataType::Type::kReference) {
+ } else if (value_type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
if (kUseBakerReadBarrier) {
codegen->GenerateIntrinsicCasMoveWithBakerReadBarrier(RegisterFrom(out),
RegisterFrom(old_value));
@@ -5327,7 +5330,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndSet(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndSet(HInvoke* invoke) {
@@ -5335,7 +5338,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
@@ -5343,7 +5346,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kSet);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kSet);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
@@ -5351,7 +5354,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndAdd(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndAdd(HInvoke* invoke) {
@@ -5359,7 +5362,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
@@ -5367,7 +5370,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAdd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAdd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
@@ -5375,7 +5378,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
@@ -5383,7 +5386,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
@@ -5391,7 +5394,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kAnd);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kAnd);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
@@ -5399,7 +5402,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
@@ -5407,7 +5410,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
@@ -5415,7 +5418,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kOr);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kOr);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
@@ -5423,7 +5426,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
@@ -5431,7 +5434,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
@@ -5439,7 +5442,7 @@
}
void IntrinsicLocationsBuilderARMVIXL::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
- CreateVarHandleGetAndUpdateLocations(invoke, GetAndUpdateOp::kXor);
+ CreateVarHandleGetAndUpdateLocations(invoke, codegen_, GetAndUpdateOp::kXor);
}
void IntrinsicCodeGeneratorARMVIXL::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_riscv64.cc b/compiler/optimizing/intrinsics_riscv64.cc
index ba5a3cd..794c30b 100644
--- a/compiler/optimizing/intrinsics_riscv64.cc
+++ b/compiler/optimizing/intrinsics_riscv64.cc
@@ -904,7 +904,7 @@
Location::RegisterLocation(target.object),
field,
ArtField::DeclaringClassOffset().Int32Value(),
- GetCompilerReadBarrierOption());
+ codegen->GetCompilerReadBarrierOption());
}
}
} else {
@@ -919,7 +919,8 @@
}
}
-static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke) {
+static LocationSummary* CreateVarHandleCommonLocations(HInvoke* invoke,
+ CodeGeneratorRISCV64* codegen) {
size_t expected_coordinates_count = GetExpectedVarHandleCoordinatesCount(invoke);
DataType::Type return_type = invoke->GetType();
@@ -953,7 +954,7 @@
}
// Add a temporary for offset.
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
GetExpectedVarHandleCoordinatesCount(invoke) == 0u) { // For static fields.
// To preserve the offset value across the non-Baker read barrier slow path
// for loading the declaring class, use a fixed callee-save register.
@@ -970,13 +971,13 @@
return locations;
}
-static void CreateVarHandleGetLocations(HInvoke* invoke) {
+static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorRISCV64* codegen) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- if ((gUseReadBarrier && !kUseBakerReadBarrier) &&
+ if (codegen->EmitNonBakerReadBarrier() &&
invoke->GetType() == DataType::Type::kReference &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGet &&
invoke->GetIntrinsic() != Intrinsics::kVarHandleGetOpaque) {
@@ -986,7 +987,7 @@
return;
}
- CreateVarHandleCommonLocations(invoke);
+ CreateVarHandleCommonLocations(invoke, codegen);
}
static void GenerateVarHandleGet(HInvoke* invoke,
@@ -1019,7 +1020,7 @@
}
// Load the value from the target location.
- if (type == DataType::Type::kReference && gUseReadBarrier && kUseBakerReadBarrier) {
+ if (type == DataType::Type::kReference && codegen->EmitBakerReadBarrier()) {
// TODO(riscv64): Revisit when we add checking if the holder is black.
Location index_and_temp_loc = Location::RegisterLocation(target.offset);
codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
@@ -1064,7 +1065,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGet(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGet(HInvoke* invoke) {
@@ -1072,7 +1073,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetOpaque(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetOpaque(HInvoke* invoke) {
@@ -1080,7 +1081,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetAcquire(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetAcquire(HInvoke* invoke) {
@@ -1088,20 +1089,20 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleGetVolatile(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleGetVolatile(HInvoke* invoke) {
GenerateVarHandleGet(invoke, codegen_, std::memory_order_seq_cst);
}
-static void CreateVarHandleSetLocations(HInvoke* invoke) {
+static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorRISCV64* codegen) {
VarHandleOptimizations optimizations(invoke);
if (optimizations.GetDoNotIntrinsify()) {
return;
}
- CreateVarHandleCommonLocations(invoke);
+ CreateVarHandleCommonLocations(invoke, codegen);
}
static void GenerateVarHandleSet(HInvoke* invoke,
@@ -1166,7 +1167,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSet(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSet(HInvoke* invoke) {
@@ -1174,7 +1175,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetOpaque(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetOpaque(HInvoke* invoke) {
@@ -1182,7 +1183,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetRelease(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetRelease(HInvoke* invoke) {
@@ -1190,7 +1191,7 @@
}
void IntrinsicLocationsBuilderRISCV64::VisitVarHandleSetVolatile(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorRISCV64::VisitVarHandleSetVolatile(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index b269f45..0e32315 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -75,11 +75,10 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathX86(HInstruction* instruction)
: SlowPathCode(instruction) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitBakerReadBarrier());
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -1698,7 +1697,7 @@
case DataType::Type::kReference: {
Register output = output_loc.AsRegister<Register>();
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -1754,9 +1753,11 @@
static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
HInvoke* invoke,
+ CodeGeneratorX86* codegen,
DataType::Type type,
bool is_volatile) {
- bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+ bool can_call =
+ codegen->EmitReadBarrier() && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -1825,35 +1826,39 @@
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGet(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt32, /*is_volatile=*/ false);
+ allocator_, invoke, codegen_, DataType::Type::kInt32, /*is_volatile=*/ false);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /*is_volatile=*/ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, codegen_, DataType::Type::kInt32, /*is_volatile=*/ true);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32, /*is_volatile=*/ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, codegen_, DataType::Type::kInt32, /*is_volatile=*/ true);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetLong(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kInt64, /*is_volatile=*/ false);
+ allocator_, invoke, codegen_, DataType::Type::kInt64, /*is_volatile=*/ false);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /*is_volatile=*/ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, codegen_, DataType::Type::kInt64, /*is_volatile=*/ true);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetLongAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64, /*is_volatile=*/ true);
+ CreateIntIntIntToIntLocations(
+ allocator_, invoke, codegen_, DataType::Type::kInt64, /*is_volatile=*/ true);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetObject(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /*is_volatile=*/ false);
+ allocator_, invoke, codegen_, DataType::Type::kReference, /*is_volatile=*/ false);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetObjectVolatile(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /*is_volatile=*/ true);
+ allocator_, invoke, codegen_, DataType::Type::kReference, /*is_volatile=*/ true);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeGetObjectAcquire(HInvoke* invoke) {
CreateIntIntIntToIntLocations(
- allocator_, invoke, DataType::Type::kReference, /*is_volatile=*/ true);
+ allocator_, invoke, codegen_, DataType::Type::kReference, /*is_volatile=*/ true);
}
void IntrinsicCodeGeneratorX86::VisitJdkUnsafeGet(HInvoke* invoke) {
@@ -2100,11 +2105,10 @@
}
static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
+ CodeGeneratorX86* codegen,
DataType::Type type,
HInvoke* invoke) {
- const bool can_call = gUseReadBarrier &&
- kUseBakerReadBarrier &&
- IsUnsafeCASObject(invoke);
+ const bool can_call = codegen->EmitBakerReadBarrier() && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -2165,20 +2169,20 @@
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetInt(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt32, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, codegen_, DataType::Type::kInt32, invoke);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) {
- CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kInt64, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, codegen_, DataType::Type::kInt64, invoke);
}
void IntrinsicLocationsBuilderX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
- CreateIntIntIntIntIntToInt(allocator_, DataType::Type::kReference, invoke);
+ CreateIntIntIntIntIntToInt(allocator_, codegen_, DataType::Type::kReference, invoke);
}
static void GenPrimitiveLockedCmpxchg(DataType::Type type,
@@ -2303,7 +2307,7 @@
DCHECK_EQ(expected, EAX);
DCHECK_NE(temp, temp2);
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen->EmitBakerReadBarrier()) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -2390,7 +2394,7 @@
if (type == DataType::Type::kReference) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
Register temp = locations->GetTemp(0).AsRegister<Register>();
Register temp2 = locations->GetTemp(1).AsRegister<Register>();
@@ -2412,7 +2416,7 @@
void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
// The only read barrier implementation supporting the
// UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2442,7 +2446,7 @@
void IntrinsicCodeGeneratorX86::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -2842,7 +2846,7 @@
void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
@@ -2874,7 +2878,7 @@
void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -2994,7 +2998,7 @@
// slow path.
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -3021,7 +3025,7 @@
__ j(kNotEqual, intrinsic_slow_path->GetEntryLabel());
}
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
if (length.Equals(Location::RegisterLocation(temp3))) {
// When Baker read barriers are enabled, register `temp3`,
// which in the present case contains the `length` parameter,
@@ -3119,7 +3123,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -3150,7 +3154,7 @@
// Compute the base source address in `temp1`.
GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1);
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// If it is needed (in the case of the fast-path loop), the base
// destination address is computed later, as `temp2` is used for
// intermediate computations.
@@ -3376,7 +3380,7 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
codegen_->AddSlowPath(slow_path);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
// Check self->GetWeakRefAccessEnabled().
ThreadOffset32 offset = Thread::WeakRefAccessEnabledOffset<kX86PointerSize>();
__ fs()->cmpl(Address::Absolute(offset),
@@ -3399,7 +3403,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
obj.AsRegister<Register>(),
@@ -3418,7 +3422,7 @@
}
void IntrinsicLocationsBuilderX86::VisitReferenceRefersTo(HInvoke* invoke) {
- IntrinsicVisitor::CreateReferenceRefersToLocations(invoke);
+ IntrinsicVisitor::CreateReferenceRefersToLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitReferenceRefersTo(HInvoke* invoke) {
@@ -3441,7 +3445,7 @@
NearLabel end, return_true, return_false;
__ cmpl(out, other);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
DCHECK(kUseBakerReadBarrier);
__ j(kEqual, &return_true);
@@ -3780,7 +3784,7 @@
Location::RegisterLocation(temp),
Address(temp, declaring_class_offset),
/* fixup_label= */ nullptr,
- GetCompilerReadBarrierOption());
+ codegen->GetCompilerReadBarrierOption());
return temp;
}
@@ -3790,10 +3794,10 @@
return locations->InAt(1).AsRegister<Register>();
}
-static void CreateVarHandleGetLocations(HInvoke* invoke) {
+static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -3835,7 +3839,7 @@
static void GenerateVarHandleGet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -3859,7 +3863,7 @@
Address field_addr(ref, offset, TIMES_1, 0);
// Load the value from the field
- if (type == DataType::Type::kReference && GetCompilerReadBarrierOption() == kWithReadBarrier) {
+ if (type == DataType::Type::kReference && codegen->EmitReadBarrier()) {
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke, out, ref, field_addr, /* needs_null_check= */ false);
} else if (type == DataType::Type::kInt64 &&
@@ -3882,7 +3886,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGet(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGet(HInvoke* invoke) {
@@ -3890,7 +3894,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetVolatile(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetVolatile(HInvoke* invoke) {
@@ -3898,7 +3902,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAcquire(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAcquire(HInvoke* invoke) {
@@ -3906,17 +3910,17 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetOpaque(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetOpaque(HInvoke* invoke) {
GenerateVarHandleGet(invoke, codegen_);
}
-static void CreateVarHandleSetLocations(HInvoke* invoke) {
+static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -3989,7 +3993,7 @@
static void GenerateVarHandleSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4055,7 +4059,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleSet(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleSet(HInvoke* invoke) {
@@ -4063,7 +4067,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleSetVolatile(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleSetVolatile(HInvoke* invoke) {
@@ -4071,7 +4075,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleSetRelease(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleSetRelease(HInvoke* invoke) {
@@ -4079,17 +4083,17 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleSetOpaque(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleSetOpaque(HInvoke* invoke) {
GenerateVarHandleSet(invoke, codegen_);
}
-static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) {
+static void CreateVarHandleGetAndSetLocations(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -4137,7 +4141,7 @@
static void GenerateVarHandleGetAndSet(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4196,7 +4200,7 @@
__ movd(locations->Out().AsFpuRegister<XmmRegister>(), EAX);
break;
case DataType::Type::kReference: {
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen->EmitBakerReadBarrier()) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -4234,7 +4238,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndSet(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndSet(HInvoke* invoke) {
@@ -4242,7 +4246,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
@@ -4250,17 +4254,18 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
GenerateVarHandleGetAndSet(invoke, codegen_);
}
-static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) {
+static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke,
+ CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -4324,7 +4329,7 @@
static void GenerateVarHandleCompareAndSetOrExchange(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4377,7 +4382,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndSet(HInvoke* invoke) {
@@ -4385,7 +4390,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
@@ -4393,7 +4398,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
@@ -4401,7 +4406,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
@@ -4409,7 +4414,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
@@ -4417,7 +4422,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
@@ -4425,7 +4430,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
@@ -4433,17 +4438,17 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
GenerateVarHandleCompareAndSetOrExchange(invoke, codegen_);
}
-static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) {
+static void CreateVarHandleGetAndAddLocations(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -4492,7 +4497,7 @@
static void GenerateVarHandleGetAndAdd(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4567,7 +4572,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndAdd(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndAdd(HInvoke* invoke) {
@@ -4575,7 +4580,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
@@ -4583,17 +4588,17 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
GenerateVarHandleGetAndAdd(invoke, codegen_);
}
-static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke) {
+static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return;
}
@@ -4661,7 +4666,7 @@
static void GenerateVarHandleGetAndBitwiseOp(HInvoke* invoke, CodeGeneratorX86* codegen) {
// The only read barrier implementation supporting the
// VarHandleGet intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4722,7 +4727,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
@@ -4730,7 +4735,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
@@ -4738,7 +4743,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
@@ -4746,7 +4751,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
@@ -4754,7 +4759,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
@@ -4762,7 +4767,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
@@ -4770,7 +4775,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
@@ -4778,7 +4783,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
@@ -4786,7 +4791,7 @@
}
void IntrinsicLocationsBuilderX86::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 8b4b05d..404fd3b 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -71,11 +71,10 @@
public:
explicit ReadBarrierSystemArrayCopySlowPathX86_64(HInstruction* instruction)
: SlowPathCode(instruction) {
- DCHECK(gUseReadBarrier);
- DCHECK(kUseBakerReadBarrier);
}
void EmitNativeCode(CodeGenerator* codegen) override {
+ DCHECK(codegen->EmitBakerReadBarrier());
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(locations->CanCall());
@@ -836,7 +835,7 @@
void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
@@ -887,7 +886,7 @@
void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) {
// The only read barrier implementation supporting the
// SystemArrayCopy intrinsic is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
X86_64Assembler* assembler = GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -1002,7 +1001,7 @@
// slow path.
bool did_unpoison = false;
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = dest->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, dest, class_offset, /* needs_null_check= */ false);
@@ -1034,7 +1033,7 @@
if (!optimizations.GetDestinationIsNonPrimitiveArray()) {
// Bail out if the destination is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ TMP = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, TMP_loc, temp1, component_offset, /* needs_null_check= */ false);
@@ -1055,7 +1054,7 @@
if (!optimizations.GetSourceIsNonPrimitiveArray()) {
// Bail out if the source is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// For the same reason given earlier, `temp1` is not trashed by the
// read barrier emitted by GenerateFieldLoadWithBakerReadBarrier below.
// /* HeapReference<Class> */ TMP = temp2->component_type_
@@ -1081,7 +1080,7 @@
if (optimizations.GetDestinationIsTypedObjectArray()) {
NearLabel do_copy;
__ j(kEqual, &do_copy);
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = temp1->component_type_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, temp1, component_offset, /* needs_null_check= */ false);
@@ -1109,7 +1108,7 @@
} else if (!optimizations.GetSourceIsNonPrimitiveArray()) {
DCHECK(optimizations.GetDestinationIsNonPrimitiveArray());
// Bail out if the source is not a non primitive array.
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// /* HeapReference<Class> */ temp1 = src->klass_
codegen_->GenerateFieldLoadWithBakerReadBarrier(
invoke, temp1_loc, src, class_offset, /* needs_null_check= */ false);
@@ -1141,7 +1140,7 @@
GenSystemArrayCopyAddresses(
GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3);
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
// SystemArrayCopy implementation for Baker read barriers (see
// also CodeGeneratorX86_64::GenerateReferenceLoadWithBakerReadBarrier):
//
@@ -1888,7 +1887,7 @@
break;
case DataType::Type::kReference: {
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
if (kUseBakerReadBarrier) {
Address src(base, offset, ScaleFactor::TIMES_1, 0);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -1929,8 +1928,11 @@
return false;
}
-static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
- bool can_call = gUseReadBarrier && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
+static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
+ bool can_call =
+ codegen->EmitReadBarrier() && UnsafeGetIntrinsicOnCallList(invoke->GetIntrinsic());
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -1967,31 +1969,31 @@
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGet(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetLong(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetLongVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetLongAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetObject(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetObjectVolatile(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeGetObjectAcquire(HInvoke* invoke) {
- CreateIntIntIntToIntLocations(allocator_, invoke);
+ CreateIntIntIntToIntLocations(allocator_, invoke, codegen_);
}
@@ -2228,11 +2230,10 @@
}
static void CreateUnsafeCASLocations(ArenaAllocator* allocator,
- DataType::Type type,
- HInvoke* invoke) {
- const bool can_call = gUseReadBarrier &&
- kUseBakerReadBarrier &&
- IsUnsafeCASObject(invoke);
+ HInvoke* invoke,
+ CodeGeneratorX86_64* codegen,
+ DataType::Type type) {
+ const bool can_call = codegen->EmitBakerReadBarrier() && IsUnsafeCASObject(invoke);
LocationSummary* locations =
new (allocator) LocationSummary(invoke,
can_call
@@ -2253,7 +2254,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too.
locations->AddTemp(Location::RequiresRegister());
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
// Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -2289,20 +2290,20 @@
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetInt(HInvoke* invoke) {
- CreateUnsafeCASLocations(allocator_, DataType::Type::kInt32, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_, DataType::Type::kInt32);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetLong(HInvoke* invoke) {
- CreateUnsafeCASLocations(allocator_, DataType::Type::kInt64, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_, DataType::Type::kInt64);
}
void IntrinsicLocationsBuilderX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen_->EmitNonBakerReadBarrier()) {
return;
}
- CreateUnsafeCASLocations(allocator_, DataType::Type::kReference, invoke);
+ CreateUnsafeCASLocations(allocator_, invoke, codegen_, DataType::Type::kReference);
}
// Convert ZF into the Boolean result.
@@ -2438,7 +2439,7 @@
CpuRegister temp3,
bool is_cmpxchg) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
@@ -2447,7 +2448,7 @@
codegen->MarkGCCard(temp1, temp2, base, value, value_can_be_null);
Address field_addr(base, offset, TIMES_1, 0);
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen->EmitBakerReadBarrier()) {
// Need to make sure the reference stored in the field is a to-space
// one before attempting the CAS or the CAS could fail incorrectly.
codegen->GenerateReferenceLoadWithBakerReadBarrier(
@@ -2556,7 +2557,7 @@
CpuRegister new_value_reg = new_value.AsRegister<CpuRegister>();
CpuRegister temp1 = locations->GetTemp(temp1_index).AsRegister<CpuRegister>();
CpuRegister temp2 = locations->GetTemp(temp2_index).AsRegister<CpuRegister>();
- CpuRegister temp3 = gUseReadBarrier
+ CpuRegister temp3 = codegen->EmitReadBarrier()
? locations->GetTemp(temp3_index).AsRegister<CpuRegister>()
: CpuRegister(kNoRegister);
DCHECK(RegsAreAllDifferent({base, offset, temp1, temp2, temp3}));
@@ -2624,7 +2625,7 @@
void IntrinsicCodeGeneratorX86_64::VisitJdkUnsafeCompareAndSetObject(HInvoke* invoke) {
// The only supported read barrier implementation is the Baker-style read barriers.
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen_->EmitReadBarrier(), kUseBakerReadBarrier);
GenCAS(DataType::Type::kReference, invoke, codegen_);
}
@@ -3128,7 +3129,7 @@
SlowPathCode* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke);
codegen_->AddSlowPath(slow_path);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
// Check self->GetWeakRefAccessEnabled().
ThreadOffset64 offset = Thread::WeakRefAccessEnabledOffset<kX86_64PointerSize>();
__ gs()->cmpl(Address::Absolute(offset, /* no_rip= */ true),
@@ -3150,7 +3151,7 @@
// Load the value from the field.
uint32_t referent_offset = mirror::Reference::ReferentOffset().Uint32Value();
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
codegen_->GenerateFieldLoadWithBakerReadBarrier(invoke,
out,
obj.AsRegister<CpuRegister>(),
@@ -3169,7 +3170,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitReferenceRefersTo(HInvoke* invoke) {
- IntrinsicVisitor::CreateReferenceRefersToLocations(invoke);
+ IntrinsicVisitor::CreateReferenceRefersToLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitReferenceRefersTo(HInvoke* invoke) {
@@ -3191,7 +3192,7 @@
__ cmpl(out, other);
- if (gUseReadBarrier) {
+ if (codegen_->EmitReadBarrier()) {
DCHECK(kUseBakerReadBarrier);
NearLabel calculate_result;
@@ -3771,7 +3772,7 @@
Location::RegisterLocation(target.object),
Address(field, ArtField::DeclaringClassOffset()),
/*fixup_label=*/nullptr,
- GetCompilerReadBarrierOption());
+ codegen->GetCompilerReadBarrierOption());
}
}
} else {
@@ -3788,9 +3789,9 @@
}
}
-static bool HasVarHandleIntrinsicImplementation(HInvoke* invoke) {
+static bool HasVarHandleIntrinsicImplementation(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
// The only supported read barrier implementation is the Baker-style read barriers.
- if (gUseReadBarrier && !kUseBakerReadBarrier) {
+ if (codegen->EmitNonBakerReadBarrier()) {
return false;
}
@@ -3839,8 +3840,8 @@
return locations;
}
-static void CreateVarHandleGetLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleGetLocations(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -3876,7 +3877,7 @@
Location out = locations->Out();
if (type == DataType::Type::kReference) {
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
DCHECK(kUseBakerReadBarrier);
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke, out, CpuRegister(target.object), src, /* needs_null_check= */ false);
@@ -3900,7 +3901,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGet(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGet(HInvoke* invoke) {
@@ -3908,7 +3909,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAcquire(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAcquire(HInvoke* invoke) {
@@ -3917,7 +3918,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetOpaque(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetOpaque(HInvoke* invoke) {
@@ -3926,7 +3927,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetVolatile(HInvoke* invoke) {
- CreateVarHandleGetLocations(invoke);
+ CreateVarHandleGetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetVolatile(HInvoke* invoke) {
@@ -3934,8 +3935,8 @@
GenerateVarHandleGet(invoke, codegen_);
}
-static void CreateVarHandleSetLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleSetLocations(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -4008,7 +4009,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleSet(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleSet(HInvoke* invoke) {
@@ -4016,7 +4017,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleSetOpaque(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleSetOpaque(HInvoke* invoke) {
@@ -4024,7 +4025,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleSetRelease(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleSetRelease(HInvoke* invoke) {
@@ -4032,15 +4033,16 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleSetVolatile(HInvoke* invoke) {
- CreateVarHandleSetLocations(invoke);
+ CreateVarHandleSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleSetVolatile(HInvoke* invoke) {
GenerateVarHandleSet(invoke, codegen_, /*is_volatile=*/ true, /*is_atomic=*/ true);
}
-static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleCompareAndSetOrExchangeLocations(HInvoke* invoke,
+ CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -4073,7 +4075,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
// Need three temporaries for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -4088,7 +4090,7 @@
CodeGeneratorX86_64* codegen,
bool is_cmpxchg,
bool byte_swap = false) {
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4133,7 +4135,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleCompareAndSet(HInvoke* invoke) {
@@ -4141,7 +4143,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleWeakCompareAndSet(HInvoke* invoke) {
@@ -4149,7 +4151,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleWeakCompareAndSetPlain(HInvoke* invoke) {
@@ -4157,7 +4159,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleWeakCompareAndSetAcquire(HInvoke* invoke) {
@@ -4165,7 +4167,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleWeakCompareAndSetRelease(HInvoke* invoke) {
@@ -4173,7 +4175,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleCompareAndExchange(HInvoke* invoke) {
@@ -4181,7 +4183,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleCompareAndExchangeAcquire(HInvoke* invoke) {
@@ -4189,15 +4191,15 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
- CreateVarHandleCompareAndSetOrExchangeLocations(invoke);
+ CreateVarHandleCompareAndSetOrExchangeLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleCompareAndExchangeRelease(HInvoke* invoke) {
GenerateVarHandleCompareAndSetOrExchange(invoke, codegen_, /*is_cmpxchg=*/ true);
}
-static void CreateVarHandleGetAndSetLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleGetAndSetLocations(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -4221,7 +4223,7 @@
// Need two temporaries for MarkGCCard.
locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
- if (gUseReadBarrier) {
+ if (codegen->EmitReadBarrier()) {
// Need a third temporary for GenerateReferenceLoadWithBakerReadBarrier.
DCHECK(kUseBakerReadBarrier);
locations->AddTemp(Location::RequiresRegister());
@@ -4270,7 +4272,7 @@
CpuRegister temp2 = locations->GetTemp(temp_count - 2).AsRegister<CpuRegister>();
CpuRegister valreg = value.AsRegister<CpuRegister>();
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen->EmitBakerReadBarrier()) {
codegen->GenerateReferenceLoadWithBakerReadBarrier(
invoke,
locations->GetTemp(temp_count - 3),
@@ -4339,8 +4341,8 @@
}
}
-static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleGetAndBitwiseOpLocations(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -4478,8 +4480,8 @@
}
}
-static void CreateVarHandleGetAndAddLocations(HInvoke* invoke) {
- if (!HasVarHandleIntrinsicImplementation(invoke)) {
+static void CreateVarHandleGetAndAddLocations(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
+ if (!HasVarHandleIntrinsicImplementation(invoke, codegen)) {
return;
}
@@ -4650,7 +4652,7 @@
bool need_any_store_barrier,
bool need_any_any_barrier,
bool byte_swap = false) {
- DCHECK_IMPLIES(gUseReadBarrier, kUseBakerReadBarrier);
+ DCHECK_IMPLIES(codegen->EmitReadBarrier(), kUseBakerReadBarrier);
X86_64Assembler* assembler = codegen->GetAssembler();
LocationSummary* locations = invoke->GetLocations();
@@ -4705,7 +4707,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndSet(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndSet(HInvoke* invoke) {
@@ -4718,7 +4720,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndSetAcquire(HInvoke* invoke) {
@@ -4731,7 +4733,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
- CreateVarHandleGetAndSetLocations(invoke);
+ CreateVarHandleGetAndSetLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndSetRelease(HInvoke* invoke) {
@@ -4744,7 +4746,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndAdd(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndAdd(HInvoke* invoke) {
@@ -4757,7 +4759,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndAddAcquire(HInvoke* invoke) {
@@ -4770,7 +4772,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
- CreateVarHandleGetAndAddLocations(invoke);
+ CreateVarHandleGetAndAddLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndAddRelease(HInvoke* invoke) {
@@ -4783,7 +4785,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseAnd(HInvoke* invoke) {
@@ -4796,7 +4798,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseAndAcquire(HInvoke* invoke) {
@@ -4809,7 +4811,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseAndRelease(HInvoke* invoke) {
@@ -4822,7 +4824,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseOr(HInvoke* invoke) {
@@ -4835,7 +4837,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseOrAcquire(HInvoke* invoke) {
@@ -4848,7 +4850,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseOrRelease(HInvoke* invoke) {
@@ -4861,7 +4863,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseXor(HInvoke* invoke) {
@@ -4874,7 +4876,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseXorAcquire(HInvoke* invoke) {
@@ -4887,7 +4889,7 @@
}
void IntrinsicLocationsBuilderX86_64::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
- CreateVarHandleGetAndBitwiseOpLocations(invoke);
+ CreateVarHandleGetAndBitwiseOpLocations(invoke, codegen_);
}
void IntrinsicCodeGeneratorX86_64::VisitVarHandleGetAndBitwiseXorRelease(HInvoke* invoke) {
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 21b3c85..16045d4 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -300,7 +300,7 @@
#ifdef ART_ENABLE_CODEGEN_arm
case OptimizationPass::kInstructionSimplifierArm:
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
- opt = new (allocator) arm::InstructionSimplifierArm(graph, stats);
+ opt = new (allocator) arm::InstructionSimplifierArm(graph, codegen, stats);
break;
case OptimizationPass::kCriticalNativeAbiFixupArm:
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
@@ -310,7 +310,7 @@
#ifdef ART_ENABLE_CODEGEN_arm64
case OptimizationPass::kInstructionSimplifierArm64:
DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
- opt = new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
+ opt = new (allocator) arm64::InstructionSimplifierArm64(graph, codegen, stats);
break;
#endif
#ifdef ART_ENABLE_CODEGEN_riscv64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e1cbbe5..0069a20 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -415,7 +415,7 @@
std::string("isa:") + GetInstructionSetString(features->GetInstructionSet());
std::string features_string = "isa_features:" + features->GetFeatureString();
std::string read_barrier_type = "none";
- if (gUseReadBarrier) {
+ if (compiler_options.EmitReadBarrier()) {
if (art::kUseBakerReadBarrier)
read_barrier_type = "baker";
else if (art::kUseTableLookupReadBarrier)
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 53ad2b1..ba6e109 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -669,7 +669,7 @@
}
case DataType::Type::kReference: {
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
last_visited_latency_ = kArmLoadWithBakerReadBarrierLatency;
} else {
if (index->IsConstant()) {
@@ -937,7 +937,7 @@
break;
case DataType::Type::kReference:
- if (gUseReadBarrier && kUseBakerReadBarrier) {
+ if (codegen_->EmitBakerReadBarrier()) {
last_visited_internal_latency_ = kArmMemoryLoadLatency + kArmIntegerOpLatency;
last_visited_latency_ = kArmMemoryLoadLatency;
} else {