Revert "ARM/ARM64: Use introspection marking for JITted code."
This reverts commit 450f1d0fa0c40198e63c3e016f02e40ac854b0cb.
Reason for revert: breaks poisoning configuration
Bug: 36141117
Change-Id: I198c20ca1db6d7d7602aa5318616e2b149de8772
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 8a5cbca..760b1dd 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1403,9 +1403,7 @@
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Save the link register (containing the return address) to mimic Quick.
AddAllocatedRegister(LocationFrom(lr));
}
@@ -1420,16 +1418,6 @@
void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
EmitJumpTables();
-
- // Emit JIT baker read barrier slow paths.
- DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
- for (auto& entry : jit_baker_read_barrier_slow_paths_) {
- uint32_t encoded_data = entry.first;
- vixl::aarch64::Label* slow_path_entry = &entry.second.label;
- __ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
- }
-
// Ensure we emit the literal pool.
__ FinalizeCode();
@@ -4746,18 +4734,9 @@
return NewPcRelativePatch(&dex_file, string_index.index_, adrp_label, &string_bss_entry_patches_);
}
-void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
- ExactAssemblyScope guard(GetVIXLAssembler(), 1 * vixl::aarch64::kInstructionSize);
- if (Runtime::Current()->UseJitCompilation()) {
- auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
- vixl::aarch64::Label* slow_path_entry = &it->second.label;
- __ cbnz(mr, slow_path_entry);
- } else {
- baker_read_barrier_patches_.emplace_back(custom_data);
- vixl::aarch64::Label* cbnz_label = &baker_read_barrier_patches_.back().label;
- __ bind(cbnz_label);
- __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
- }
+vixl::aarch64::Label* CodeGeneratorARM64::NewBakerReadBarrierPatch(uint32_t custom_data) {
+ baker_read_barrier_patches_.emplace_back(custom_data);
+ return &baker_read_barrier_patches_.back().label;
}
vixl::aarch64::Label* CodeGeneratorARM64::NewPcRelativePatch(
@@ -6276,14 +6255,14 @@
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
- if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in
// the Marking Register) to decide whether we need to enter
// the slow path to mark the GC root.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the reference
- // and jumps to the entrypoint if needed.
+ // We use link-time generated thunks for the slow path. That thunk
+ // checks the reference and jumps to the entrypoint if needed.
//
// lr = &return_address;
// GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
@@ -6297,18 +6276,20 @@
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode());
+ vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
- ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
+ EmissionCheckScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
vixl::aarch64::Label return_address;
__ adr(lr, &return_address);
if (fixup_label != nullptr) {
- __ bind(fixup_label);
+ __ Bind(fixup_label);
}
static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
"GC root LDR must be 2 instruction (8B) before the return address label.");
__ ldr(root_reg, MemOperand(obj.X(), offset));
- EmitBakerReadBarrierCbnz(custom_data);
- __ bind(&return_address);
+ __ Bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
+ __ Bind(&return_address);
} else {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in
// the Marking Register) to decide whether we need to enter
@@ -6380,17 +6361,18 @@
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
- if (kBakerReadBarrierLinkTimeThunksEnableForFields && !use_load_acquire) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
+ !use_load_acquire &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the holder
- // and jumps to the entrypoint if needed. If the holder is not gray,
- // it creates a fake dependency and returns to the LDR instruction.
+ // We use link-time generated thunks for the slow path. That thunk checks
+ // the holder and jumps to the entrypoint if needed. If the holder is not
+ // gray, it creates a fake dependency and returns to the LDR instruction.
//
// lr = &gray_return_address;
// if (mr) { // Thread::Current()->GetIsGcMarking()
@@ -6416,13 +6398,15 @@
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode());
+ vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
{
- ExactAssemblyScope guard(GetVIXLAssembler(),
+ EmissionCheckScope guard(GetVIXLAssembler(),
(kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
vixl::aarch64::Label return_address;
__ adr(lr, &return_address);
- EmitBakerReadBarrierCbnz(custom_data);
+ __ Bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
static_assert(BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
"Field LDR must be 1 instruction (4B) before the return address label; "
" 2 instructions (8B) for heap poisoning.");
@@ -6432,7 +6416,7 @@
MaybeRecordImplicitNullCheck(instruction);
}
GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ bind(&return_address);
+ __ Bind(&return_address);
}
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
return;
@@ -6468,17 +6452,17 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
size_t scale_factor = DataType::SizeShift(DataType::Type::kReference);
- if (kBakerReadBarrierLinkTimeThunksEnableForArrays) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the holder
- // and jumps to the entrypoint if needed. If the holder is not gray,
- // it creates a fake dependency and returns to the LDR instruction.
+ // We use link-time generated thunks for the slow path. That thunk checks
+ // the holder and jumps to the entrypoint if needed. If the holder is not
+ // gray, it creates a fake dependency and returns to the LDR instruction.
//
// lr = &gray_return_address;
// if (mr) { // Thread::Current()->GetIsGcMarking()
@@ -6499,21 +6483,23 @@
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
uint32_t custom_data = EncodeBakerReadBarrierArrayData(temp.GetCode());
+ vixl::aarch64::Label* cbnz_label = NewBakerReadBarrierPatch(custom_data);
__ Add(temp.X(), obj.X(), Operand(data_offset));
{
- ExactAssemblyScope guard(GetVIXLAssembler(),
+ EmissionCheckScope guard(GetVIXLAssembler(),
(kPoisonHeapReferences ? 4u : 3u) * vixl::aarch64::kInstructionSize);
vixl::aarch64::Label return_address;
__ adr(lr, &return_address);
- EmitBakerReadBarrierCbnz(custom_data);
+ __ Bind(cbnz_label);
+ __ cbnz(mr, static_cast<int64_t>(0)); // Placeholder, patched at link-time.
static_assert(BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET == (kPoisonHeapReferences ? -8 : -4),
"Array LDR must be 1 instruction (4B) before the return address label; "
" 2 instructions (8B) for heap poisoning.");
__ ldr(ref_reg, MemOperand(temp.X(), index_reg.X(), LSL, scale_factor));
DCHECK(!needs_null_check); // The thunk cannot handle the null check.
GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
- __ bind(&return_address);
+ __ Bind(&return_address);
}
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
return;
@@ -7002,12 +6988,7 @@
UNREACHABLE();
}
- // For JIT, the slow path is considered part of the compiled method,
- // so JIT should pass null as `debug_name`. Tests may not have a runtime.
- DCHECK(Runtime::Current() == nullptr ||
- !Runtime::Current()->UseJitCompilation() ||
- debug_name == nullptr);
- if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
switch (kind) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index c07d1ea..93bab31 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -619,9 +619,9 @@
dex::StringIndex string_index,
vixl::aarch64::Label* adrp_label = nullptr);
- // Emit the CBNZ instruction for baker read barrier and record
- // the associated patch for AOT or slow path for JIT.
- void EmitBakerReadBarrierCbnz(uint32_t custom_data);
+ // Add a new baker read barrier patch and return the label to be bound
+ // before the CBNZ instruction.
+ vixl::aarch64::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
vixl::aarch64::Literal<uint32_t>* DeduplicateBootImageAddressLiteral(uint64_t address);
vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -928,19 +928,6 @@
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
- // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
- // Wrap the label to work around vixl::aarch64::Label being non-copyable
- // and non-moveable and as such unusable in ArenaSafeMap<>.
- struct LabelWrapper {
- LabelWrapper(const LabelWrapper& src)
- : label() {
- DCHECK(!src.label.IsLinked() && !src.label.IsBound());
- }
- LabelWrapper() = default;
- vixl::aarch64::Label label;
- };
- ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
-
friend class linker::Arm64RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64);
};
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 836a989..6d6d1a2 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -108,6 +108,14 @@
// Marker that code is yet to be, and must, be implemented.
#define TODO_VIXL32(level) LOG(level) << __PRETTY_FUNCTION__ << " unimplemented "
+static inline void EmitPlaceholderBne(CodeGeneratorARMVIXL* codegen, vixl32::Label* patch_label) {
+ ExactAssemblyScope eas(codegen->GetVIXLAssembler(), kMaxInstructionSizeInBytes);
+ __ bind(patch_label);
+ vixl32::Label placeholder_label;
+ __ b(ne, EncodingSize(Wide), &placeholder_label); // Placeholder, patched at link-time.
+ __ bind(&placeholder_label);
+}
+
static inline bool CanEmitNarrowLdr(vixl32::Register rt, vixl32::Register rn, uint32_t offset) {
return rt.IsLow() && rn.IsLow() && offset < 32u;
}
@@ -2344,9 +2352,7 @@
jit_string_patches_(StringReferenceValueComparator(),
graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
jit_class_patches_(TypeReferenceValueComparator(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
- jit_baker_read_barrier_slow_paths_(std::less<uint32_t>(),
- graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
+ graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
// Always save the LR register to mimic Quick.
AddAllocatedRegister(Location::RegisterLocation(LR));
// Give D30 and D31 as scratch register to VIXL. The register allocator only works on
@@ -2402,16 +2408,6 @@
void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) {
FixJumpTables();
-
- // Emit JIT baker read barrier slow paths.
- DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
- for (auto& entry : jit_baker_read_barrier_slow_paths_) {
- uint32_t encoded_data = entry.first;
- vixl::aarch32::Label* slow_path_entry = &entry.second.label;
- __ Bind(slow_path_entry);
- CompileBakerReadBarrierThunk(*GetAssembler(), encoded_data, /* debug_name */ nullptr);
- }
-
GetAssembler()->FinalizeCode();
CodeGenerator::Finalize(allocator);
@@ -8796,14 +8792,14 @@
if (kUseBakerReadBarrier) {
// Fast path implementation of art::ReadBarrier::BarrierForRoot when
// Baker's read barrier are used.
- if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForGcRoots &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in
// the Marking Register) to decide whether we need to enter
// the slow path to mark the GC root.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the reference
- // and jumps to the entrypoint if needed.
+ // We use link-time generated thunks for the slow path. That thunk
+ // checks the reference and jumps to the entrypoint if needed.
//
// lr = &return_address;
// GcRoot<mirror::Object> root = *(obj+offset); // Original reference load.
@@ -8816,6 +8812,7 @@
temps.Exclude(ip);
bool narrow = CanEmitNarrowLdr(root_reg, obj, offset);
uint32_t custom_data = EncodeBakerReadBarrierGcRootData(root_reg.GetCode(), narrow);
+ vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
vixl::EmissionCheckScope guard(GetVIXLAssembler(), 4 * vixl32::kMaxInstructionSizeInBytes);
vixl32::Label return_address;
@@ -8826,7 +8823,7 @@
DCHECK_LT(offset, kReferenceLoadMinFarOffset);
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
__ ldr(EncodingSize(narrow ? Narrow : Wide), root_reg, MemOperand(obj, offset));
- EmitBakerReadBarrierBne(custom_data);
+ EmitPlaceholderBne(this, bne_label);
__ Bind(&return_address);
DCHECK_EQ(old_offset - GetVIXLAssembler()->GetBuffer()->GetCursorOffset(),
narrow ? BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET
@@ -8889,17 +8886,17 @@
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
- if (kBakerReadBarrierLinkTimeThunksEnableForFields) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForFields &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the holder
- // and jumps to the entrypoint if needed. If the holder is not gray,
- // it creates a fake dependency and returns to the LDR instruction.
+ // We use link-time generated thunks for the slow path. That thunk checks
+ // the holder and jumps to the entrypoint if needed. If the holder is not
+ // gray, it creates a fake dependency and returns to the LDR instruction.
//
// lr = &gray_return_address;
// if (mr) { // Thread::Current()->GetIsGcMarking()
@@ -8928,6 +8925,7 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
temps.Exclude(ip);
uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
+ vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
{
vixl::EmissionCheckScope guard(
@@ -8936,7 +8934,7 @@
vixl32::Label return_address;
EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
__ cmp(mr, Operand(0));
- EmitBakerReadBarrierBne(custom_data);
+ EmitPlaceholderBne(this, bne_label);
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
__ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, MemOperand(base, offset));
if (needs_null_check) {
@@ -8982,17 +8980,17 @@
"art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
ScaleFactor scale_factor = TIMES_4;
- if (kBakerReadBarrierLinkTimeThunksEnableForArrays) {
+ if (kBakerReadBarrierLinkTimeThunksEnableForArrays &&
+ !Runtime::Current()->UseJitCompilation()) {
// Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
// Marking Register) to decide whether we need to enter the slow
// path to mark the reference. Then, in the slow path, check the
// gray bit in the lock word of the reference's holder (`obj`) to
// decide whether to mark `ref` or not.
//
- // We use shared thunks for the slow path; shared within the method
- // for JIT, across methods for AOT. That thunk checks the holder
- // and jumps to the entrypoint if needed. If the holder is not gray,
- // it creates a fake dependency and returns to the LDR instruction.
+ // We use link-time generated thunks for the slow path. That thunk checks
+ // the holder and jumps to the entrypoint if needed. If the holder is not
+ // gray, it creates a fake dependency and returns to the LDR instruction.
//
// lr = &gray_return_address;
// if (mr) { // Thread::Current()->GetIsGcMarking()
@@ -9012,6 +9010,7 @@
UseScratchRegisterScope temps(GetVIXLAssembler());
temps.Exclude(ip);
uint32_t custom_data = EncodeBakerReadBarrierArrayData(data_reg.GetCode());
+ vixl32::Label* bne_label = NewBakerReadBarrierPatch(custom_data);
__ Add(data_reg, obj, Operand(data_offset));
{
@@ -9021,7 +9020,7 @@
vixl32::Label return_address;
EmitAdrCode adr(GetVIXLAssembler(), lr, &return_address);
__ cmp(mr, Operand(0));
- EmitBakerReadBarrierBne(custom_data);
+ EmitPlaceholderBne(this, bne_label);
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
__ ldr(ref_reg, MemOperand(data_reg, index_reg, vixl32::LSL, scale_factor));
DCHECK(!needs_null_check); // The thunk cannot handle the null check.
@@ -9492,20 +9491,9 @@
return &patches->back();
}
-void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
- ExactAssemblyScope eas(GetVIXLAssembler(), 1 * k32BitT32InstructionSizeInBytes);
- if (Runtime::Current()->UseJitCompilation()) {
- auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
- vixl::aarch32::Label* slow_path_entry = &it->second.label;
- __ b(ne, EncodingSize(Wide), slow_path_entry);
- } else {
- baker_read_barrier_patches_.emplace_back(custom_data);
- vixl::aarch32::Label* patch_label = &baker_read_barrier_patches_.back().label;
- __ bind(patch_label);
- vixl32::Label placeholder_label;
- __ b(ne, EncodingSize(Wide), &placeholder_label); // Placeholder, patched at link-time.
- __ bind(&placeholder_label);
- }
+vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_data) {
+ baker_read_barrier_patches_.emplace_back(custom_data);
+ return &baker_read_barrier_patches_.back().label;
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
@@ -10097,12 +10085,7 @@
UNREACHABLE();
}
- // For JIT, the slow path is considered part of the compiled method,
- // so JIT should pass null as `debug_name`. Tests may not have a runtime.
- DCHECK(Runtime::Current() == nullptr ||
- !Runtime::Current()->UseJitCompilation() ||
- debug_name == nullptr);
- if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
+ if (GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
switch (kind) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index ef82f2e..fc8cf98 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -589,9 +589,9 @@
PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
dex::StringIndex string_index);
- // Emit the BNE instruction for baker read barrier and record
- // the associated patch for AOT or slow path for JIT.
- void EmitBakerReadBarrierBne(uint32_t custom_data);
+ // Add a new baker read barrier patch and return the label to be bound
+ // before the BNE instruction.
+ vixl::aarch32::Label* NewBakerReadBarrierPatch(uint32_t custom_data);
VIXLUInt32Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
VIXLUInt32Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
@@ -916,19 +916,6 @@
// Patches for class literals in JIT compiled code.
TypeToLiteralMap jit_class_patches_;
- // Baker read barrier slow paths, mapping custom data (uint32_t) to label.
- // Wrap the label to work around vixl::aarch32::Label being non-copyable
- // and non-moveable and as such unusable in ArenaSafeMap<>.
- struct LabelWrapper {
- LabelWrapper(const LabelWrapper& src)
- : label() {
- DCHECK(!src.label.IsReferenced() && !src.label.IsBound());
- }
- LabelWrapper() = default;
- vixl::aarch32::Label label;
- };
- ArenaSafeMap<uint32_t, LabelWrapper> jit_baker_read_barrier_slow_paths_;
-
friend class linker::Thumb2RelativePatcherTest;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARMVIXL);
};