summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/optimizing/code_generator_arm64.cc759
-rw-r--r--compiler/optimizing/code_generator_arm64.h67
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc261
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h22
-rw-r--r--compiler/optimizing/code_generator_mips.cc15
-rw-r--r--compiler/optimizing/code_generator_mips64.cc15
-rw-r--r--compiler/optimizing/code_generator_x86.cc16
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc16
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc207
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc7
-rw-r--r--compiler/optimizing/stack_map_stream.cc8
-rw-r--r--compiler/optimizing/stack_map_stream.h2
-rw-r--r--compiler/optimizing/stack_map_test.cc6
13 files changed, 355 insertions, 1046 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 7aaa7bf65e..15e3d274a5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -93,16 +93,6 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
// the offset explicitly.
constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
-// Some instructions have special requirements for a temporary, for example
-// LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
-// temp that's not an R0 (to avoid an extra move) and Baker read barrier field
-// loads with large offsets need a fixed register to limit the number of link-time
-// thunks we generate. For these and similar cases, we want to reserve a specific
-// register that's neither callee-save nor an argument register. We choose x15.
-inline Location FixedTempLocation() {
- return Location::RegisterLocation(x15.GetCode());
-}
-
inline Condition ARM64Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return eq;
@@ -609,459 +599,6 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
}
}
-// Abstract base class for read barrier slow paths marking a reference
-// `ref`.
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class ReadBarrierMarkSlowPathBaseARM64 : public SlowPathCodeARM64 {
- protected:
- ReadBarrierMarkSlowPathBaseARM64(HInstruction* instruction, Location ref, Location entrypoint)
- : SlowPathCodeARM64(instruction), ref_(ref), entrypoint_(entrypoint) {
- DCHECK(kEmitCompilerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathBaseARM64"; }
-
- // Generate assembly code calling the read barrier marking runtime
- // entry point (ReadBarrierMarkRegX).
- void GenerateReadBarrierMarkRuntimeCall(CodeGenerator* codegen) {
- // No need to save live registers; it's taken care of by the
- // entrypoint. Also, there is no need to update the stack mask,
- // as this runtime call will not trigger a garbage collection.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- DCHECK_NE(ref_.reg(), LR);
- DCHECK_NE(ref_.reg(), WSP);
- DCHECK_NE(ref_.reg(), WZR);
- // IP0 is used internally by the ReadBarrierMarkRegX entry point
- // as a temporary, it cannot be the entry point's input/output.
- DCHECK_NE(ref_.reg(), IP0);
- DCHECK(0 <= ref_.reg() && ref_.reg() < kNumberOfWRegisters) << ref_.reg();
- // "Compact" slow path, saving two moves.
- //
- // Instead of using the standard runtime calling convention (input
- // and output in W0):
- //
- // W0 <- ref
- // W0 <- ReadBarrierMark(W0)
- // ref <- W0
- //
- // we just use rX (the register containing `ref`) as input and output
- // of a dedicated entrypoint:
- //
- // rX <- ReadBarrierMarkRegX(rX)
- //
- if (entrypoint_.IsValid()) {
- arm64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
- __ Blr(XRegisterFrom(entrypoint_));
- } else {
- // Entrypoint is not already loaded, load from the thread.
- int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(ref_.reg());
- // This runtime call does not require a stack map.
- arm64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset, instruction_, this);
- }
- }
-
- // The location (register) of the marked object reference.
- const Location ref_;
-
- // The location of the entrypoint if it is already loaded.
- const Location entrypoint_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathBaseARM64);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). The field `obj.field` in the object `obj` holding
-// this reference does not get updated by this slow path after marking
-// (see LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64
-// below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierSlowPathARM64 : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
- LoadReferenceWithBakerReadBarrierSlowPathARM64(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire,
- Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- use_load_acquire_(use_load_acquire),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierSlowPathARM64";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(obj_.IsW());
- DCHECK_NE(ref_.reg(), LocationFrom(temp_).reg());
- DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsStaticFieldGet() ||
- instruction_->IsArrayGet() ||
- instruction_->IsArraySet() ||
- instruction_->IsInstanceOf() ||
- instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
- (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
- // The read barrier instrumentation of object ArrayGet
- // instructions does not support the HIntermediateAddress
- // instruction.
- DCHECK(!(instruction_->IsArrayGet() &&
- instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP0 nor IP1, as we may use them to emit the reference
- // load (in the call to GenerateRawReferenceLoad below), and we
- // need the lock word to still be in `temp_` after the reference
- // load.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- DCHECK_NE(LocationFrom(temp_).reg(), IP1);
-
- __ Bind(GetEntryLabel());
-
- // When using MaybeGenerateReadBarrierSlow, the read barrier call is
- // inserted after the original load. However, in fast path based
- // Baker's read barriers, we need to perform the load of
- // mirror::Object::monitor_ *before* the original reference load.
- // This load-load ordering is required by the read barrier.
- // The slow path (for Baker's algorithm) should look like:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- //
- // Note: the original implementation in ReadBarrier::Barrier is
- // slightly more complex as it performs additional checks that we do
- // not do here for performance reasons.
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- __ Ldr(temp_, HeapOperand(obj_, monitor_offset));
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_.X(), obj_.X(), Operand(temp_.X(), LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->GenerateRawReferenceLoad(instruction_,
- ref_,
- obj_,
- offset_,
- index_,
- scale_factor_,
- /* needs_null_check */ false,
- use_load_acquire_);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbz(temp_, LockWord::kReadBarrierStateShift, GetExitLabel());
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- size_t scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // Should this reference load use Load-Acquire semantics?
- bool use_load_acquire_;
- // A temporary register used to hold the lock word of `obj_`.
- Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierSlowPathARM64);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). If needed, this slow path also atomically updates
-// the field `obj.field` in the object `obj` holding this reference
-// after marking (contrary to
-// LoadReferenceWithBakerReadBarrierSlowPathARM64 above, which never
-// tries to update `obj.field`).
-//
-// This means that after the execution of this slow path, both `ref`
-// and `obj.field` will be up-to-date; i.e., after the flip, both will
-// hold the same to-space reference (unless another thread installed
-// another object reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64
- : public ReadBarrierMarkSlowPathBaseARM64 {
- public:
- LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
- HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire,
- Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARM64(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- use_load_acquire_(use_load_acquire),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- Register ref_reg = WRegisterFrom(ref_);
- DCHECK(locations->CanCall());
- DCHECK(ref_.IsRegister()) << ref_;
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_.reg())) << ref_.reg();
- DCHECK(obj_.IsW());
- DCHECK_NE(ref_.reg(), LocationFrom(temp_).reg());
-
- // This slow path is only used by the UnsafeCASObject intrinsic at the moment.
- DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking and field updating slow path: "
- << instruction_->DebugName();
- DCHECK(instruction_->GetLocations()->Intrinsified());
- DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
- DCHECK_EQ(offset_, 0u);
- DCHECK_EQ(scale_factor_, 0u);
- DCHECK_EQ(use_load_acquire_, false);
- // The location of the offset of the marked reference field within `obj_`.
- Location field_offset = index_;
- DCHECK(field_offset.IsRegister()) << field_offset;
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP0 nor IP1, as we may use them to emit the reference
- // load (in the call to GenerateRawReferenceLoad below), and we
- // need the lock word to still be in `temp_` after the reference
- // load.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- DCHECK_NE(LocationFrom(temp_).reg(), IP1);
-
- __ Bind(GetEntryLabel());
-
- // The implementation is similar to LoadReferenceWithBakerReadBarrierSlowPathARM64's:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
- // }
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- __ Ldr(temp_, HeapOperand(obj_, monitor_offset));
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including rb_state,
- // to prevent load-load reordering, and without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_.X(), obj_.X(), Operand(temp_.X(), LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- arm64_codegen->GenerateRawReferenceLoad(instruction_,
- ref_,
- obj_,
- offset_,
- index_,
- scale_factor_,
- /* needs_null_check */ false,
- use_load_acquire_);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the rb_state.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Tbz(temp_, LockWord::kReadBarrierStateShift, GetExitLabel());
-
- // Save the old value of the reference before marking it.
- // Note that we cannot use IP to save the old reference, as IP is
- // used internally by the ReadBarrierMarkRegX entry point, and we
- // need the old reference after the call to that entry point.
- DCHECK_NE(LocationFrom(temp_).reg(), IP0);
- __ Mov(temp_.W(), ref_reg);
-
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- // If the new reference is different from the old reference,
- // update the field in the holder (`*(obj_ + field_offset)`).
- //
- // Note that this field could also hold a different object, if
- // another thread had concurrently changed it. In that case, the
- // LDXR/CMP/BNE sequence of instructions in the compare-and-set
- // (CAS) operation below would abort the CAS, leaving the field
- // as-is.
- __ Cmp(temp_.W(), ref_reg);
- __ B(eq, GetExitLabel());
-
- // Update the the holder's field atomically. This may fail if
- // mutator updates before us, but it's OK. This is achieved
- // using a strong compare-and-set (CAS) operation with relaxed
- // memory synchronization ordering, where the expected value is
- // the old reference and the desired value is the new reference.
-
- MacroAssembler* masm = arm64_codegen->GetVIXLAssembler();
- UseScratchRegisterScope temps(masm);
-
- // Convenience aliases.
- Register base = obj_.W();
- Register offset = XRegisterFrom(field_offset);
- Register expected = temp_.W();
- Register value = ref_reg;
- Register tmp_ptr = temps.AcquireX(); // Pointer to actual memory.
- Register tmp_value = temps.AcquireW(); // Value in memory.
-
- __ Add(tmp_ptr, base.X(), Operand(offset));
-
- if (kPoisonHeapReferences) {
- arm64_codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- arm64_codegen->GetAssembler()->PoisonHeapReference(value);
- }
- }
-
- // do {
- // tmp_value = [tmp_ptr] - expected;
- // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
-
- vixl::aarch64::Label loop_head, comparison_failed, exit_loop;
- __ Bind(&loop_head);
- __ Ldxr(tmp_value, MemOperand(tmp_ptr));
- __ Cmp(tmp_value, expected);
- __ B(&comparison_failed, ne);
- __ Stxr(tmp_value, value, MemOperand(tmp_ptr));
- __ Cbnz(tmp_value, &loop_head);
- __ B(&exit_loop);
- __ Bind(&comparison_failed);
- __ Clrex();
- __ Bind(&exit_loop);
-
- if (kPoisonHeapReferences) {
- arm64_codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- arm64_codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
- }
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- const Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- size_t scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // Should this reference load use Load-Acquire semantics?
- bool use_load_acquire_;
- // A temporary register used to hold the lock word of `obj_`; and
- // also to hold the original reference value, when the reference is
- // marked.
- const Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64);
-};
-
// Slow path generating a read barrier for a heap reference.
class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
public:
@@ -1447,9 +984,12 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
case BakerReadBarrierKind::kGcRoot: {
DCHECK_GE(literal_offset, 4u);
uint32_t prev_insn = GetInsn(literal_offset - 4u);
- // LDR (immediate) with correct root_reg.
const uint32_t root_reg = BakerReadBarrierFirstRegField::Decode(encoded_data);
- CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg);
+ // Usually LDR (immediate) with correct root_reg but
+ // we may have a "MOV marked, old_value" for UnsafeCASObject.
+ if ((prev_insn & 0xffe0ffff) != (0x2a0003e0 | root_reg)) { // MOV?
+ CHECK_EQ(prev_insn & 0xffc0001fu, 0xb9400000u | root_reg); // LDR?
+ }
break;
}
default:
@@ -1634,8 +1174,24 @@ void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_
if (value_can_be_null) {
__ Cbz(value, &done);
}
+ // Load the address of the card table into `card`.
__ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64PointerSize>().Int32Value()));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the STRB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Strb(card, MemOperand(card, temp.X()));
if (value_can_be_null) {
__ Bind(&done);
@@ -4710,7 +4266,7 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewStringBssEntryPatch(
}
void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
- ExactAssemblyScope guard(GetVIXLAssembler(), 1 * vixl::aarch64::kInstructionSize);
+ DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
if (Runtime::Current()->UseJitCompilation()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch64::Label* slow_path_entry = &it->second.label;
@@ -6256,7 +5812,7 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
__ bind(fixup_label);
}
static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
- "GC root LDR must be 2 instruction (8B) before the return address label.");
+ "GC root LDR must be 2 instructions (8B) before the return address label.");
__ ldr(root_reg, MemOperand(obj.X(), offset));
EmitBakerReadBarrierCbnz(custom_data);
__ bind(&return_address);
@@ -6286,11 +5842,29 @@ void CodeGeneratorARM64::GenerateGcRootFieldLoad(
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
}
+void CodeGeneratorARM64::GenerateUnsafeCasOldValueMovWithBakerReadBarrier(
+ vixl::aarch64::Register marked,
+ vixl::aarch64::Register old_value) {
+ DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(kUseBakerReadBarrier);
+
+ // Similar to the Baker RB path in GenerateGcRootFieldLoad(), with a MOV instead of LDR.
+ uint32_t custom_data = EncodeBakerReadBarrierGcRootData(marked.GetCode());
+
+ ExactAssemblyScope guard(GetVIXLAssembler(), 3 * vixl::aarch64::kInstructionSize);
+ vixl::aarch64::Label return_address;
+ __ adr(lr, &return_address);
+ static_assert(BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET == -8,
+ "GC root LDR must be 2 instructions (8B) before the return address label.");
+ __ mov(marked, old_value);
+ EmitBakerReadBarrierCbnz(custom_data);
+ __ bind(&return_address);
+}
+
void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
- Register obj,
- uint32_t offset,
- Location maybe_temp,
+ vixl::aarch64::Register obj,
+ const vixl::aarch64::MemOperand& src,
bool needs_null_check,
bool use_load_acquire) {
DCHECK(kEmitCompilerReadBarrier);
@@ -6317,27 +5891,16 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
// HeapReference<mirror::Object> reference = *(obj+offset);
// gray_return_address:
- DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
- Register base = obj;
- if (use_load_acquire) {
- DCHECK(maybe_temp.IsRegister());
- base = WRegisterFrom(maybe_temp);
- __ Add(base, obj, offset);
- offset = 0u;
- } else if (offset >= kReferenceLoadMinFarOffset) {
- DCHECK(maybe_temp.IsRegister());
- base = WRegisterFrom(maybe_temp);
- static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
- __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
- offset &= (kReferenceLoadMinFarOffset - 1u);
- }
+ DCHECK(src.GetAddrMode() == vixl::aarch64::Offset);
+ DCHECK_ALIGNED(src.GetOffset(), sizeof(mirror::HeapReference<mirror::Object>));
+
UseScratchRegisterScope temps(GetVIXLAssembler());
DCHECK(temps.IsAvailable(ip0));
DCHECK(temps.IsAvailable(ip1));
temps.Exclude(ip0, ip1);
uint32_t custom_data = use_load_acquire
- ? EncodeBakerReadBarrierAcquireData(base.GetCode(), obj.GetCode())
- : EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode());
+ ? EncodeBakerReadBarrierAcquireData(src.GetBaseRegister().GetCode(), obj.GetCode())
+ : EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode());
{
ExactAssemblyScope guard(GetVIXLAssembler(),
@@ -6350,10 +5913,10 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
" 2 instructions (8B) for heap poisoning.");
Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
if (use_load_acquire) {
- DCHECK_EQ(offset, 0u);
- __ ldar(ref_reg, MemOperand(base.X()));
+ DCHECK_EQ(src.GetOffset(), 0);
+ __ ldar(ref_reg, src);
} else {
- __ ldr(ref_reg, MemOperand(base.X(), offset));
+ __ ldr(ref_reg, src);
}
if (needs_null_check) {
MaybeRecordImplicitNullCheck(instruction);
@@ -6368,6 +5931,32 @@ void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* ins
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
}
+void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ Register obj,
+ uint32_t offset,
+ Location maybe_temp,
+ bool needs_null_check,
+ bool use_load_acquire) {
+ DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+ Register base = obj;
+ if (use_load_acquire) {
+ DCHECK(maybe_temp.IsRegister());
+ base = WRegisterFrom(maybe_temp);
+ __ Add(base, obj, offset);
+ offset = 0u;
+ } else if (offset >= kReferenceLoadMinFarOffset) {
+ DCHECK(maybe_temp.IsRegister());
+ base = WRegisterFrom(maybe_temp);
+ static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
+ __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
+ offset &= (kReferenceLoadMinFarOffset - 1u);
+ }
+ MemOperand src(base.X(), offset);
+ GenerateFieldLoadWithBakerReadBarrier(
+ instruction, ref, obj, src, needs_null_check, use_load_acquire);
+}
+
void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
Register obj,
uint32_t data_offset,
@@ -6435,198 +6024,6 @@ void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(Location ref,
MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__, /* temp_loc */ LocationFrom(ip1));
}
-void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- Register temp,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- // If we are emitting an array load, we should not be using a
- // Load Acquire instruction. In other words:
- // `instruction->IsArrayGet()` => `!use_load_acquire`.
- DCHECK(!instruction->IsArrayGet() || !use_load_acquire);
-
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- // } else {
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // }
-
- // Slow path marking the object `ref` when the GC is marking. The
- // entrypoint will be loaded by the slow path code.
- SlowPathCodeARM64* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARM64(
- instruction,
- ref,
- obj,
- offset,
- index,
- scale_factor,
- needs_null_check,
- use_load_acquire,
- temp);
- AddSlowPath(slow_path);
-
- __ Cbnz(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: just load the reference.
- GenerateRawReferenceLoad(
- instruction, ref, obj, offset, index, scale_factor, needs_null_check, use_load_acquire);
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
-}
-
-void CodeGeneratorARM64::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- Register obj,
- Location field_offset,
- Register temp,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- // If we are emitting an array load, we should not be using a
- // Load Acquire instruction. In other words:
- // `instruction->IsArrayGet()` => `!use_load_acquire`.
- DCHECK(!instruction->IsArrayGet() || !use_load_acquire);
-
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to update the reference field within `obj`. Then, in the
- // slow path, check the gray bit in the lock word of the reference's
- // holder (`obj`) to decide whether to mark `ref` and update the
- // field or not.
- //
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *(obj + field_offset); // Reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // old_ref = ref;
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // compareAndSwapObject(obj, field_offset, old_ref, ref);
- // }
- // }
-
- // Slow path updating the object reference at address `obj + field_offset`
- // when the GC is marking. The entrypoint will be loaded by the slow path code.
- SlowPathCodeARM64* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARM64(
- instruction,
- ref,
- obj,
- /* offset */ 0u,
- /* index */ field_offset,
- /* scale_factor */ 0u /* "times 1" */,
- needs_null_check,
- use_load_acquire,
- temp);
- AddSlowPath(slow_path);
-
- __ Cbnz(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: nothing to do (the field is
- // up-to-date, and we don't need to load the reference).
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
-}
-
-void CodeGeneratorARM64::GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire) {
- DCHECK(obj.IsW());
- DataType::Type type = DataType::Type::kReference;
- Register ref_reg = RegisterFrom(ref, type);
-
- // If needed, vixl::EmissionCheckScope guards are used to ensure
- // that no pools are emitted between the load (macro) instruction
- // and MaybeRecordImplicitNullCheck.
-
- if (index.IsValid()) {
- // Load types involving an "index": ArrayGet,
- // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
- // intrinsics.
- if (use_load_acquire) {
- // UnsafeGetObjectVolatile intrinsic case.
- // Register `index` is not an index in an object array, but an
- // offset to an object reference field within object `obj`.
- DCHECK(instruction->IsInvoke()) << instruction->DebugName();
- DCHECK(instruction->GetLocations()->Intrinsified());
- DCHECK(instruction->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile)
- << instruction->AsInvoke()->GetIntrinsic();
- DCHECK_EQ(offset, 0u);
- DCHECK_EQ(scale_factor, 0u);
- DCHECK_EQ(needs_null_check, false);
- // /* HeapReference<mirror::Object> */ ref = *(obj + index)
- MemOperand field = HeapOperand(obj, XRegisterFrom(index));
- LoadAcquire(instruction, ref_reg, field, /* needs_null_check */ false);
- } else {
- // ArrayGet and UnsafeGetObject and UnsafeCASObject intrinsics cases.
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset + (index << scale_factor))
- if (index.IsConstant()) {
- uint32_t computed_offset = offset + (Int64FromLocation(index) << scale_factor);
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, HeapOperand(obj, computed_offset));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- } else {
- UseScratchRegisterScope temps(GetVIXLAssembler());
- Register temp = temps.AcquireW();
- __ Add(temp, obj, offset);
- {
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, HeapOperand(temp, XRegisterFrom(index), LSL, scale_factor));
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
- }
- }
- } else {
- // /* HeapReference<mirror::Object> */ ref = *(obj + offset)
- MemOperand field = HeapOperand(obj, offset);
- if (use_load_acquire) {
- // Implicit null checks are handled by CodeGeneratorARM64::LoadAcquire.
- LoadAcquire(instruction, ref_reg, field, needs_null_check);
- } else {
- EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
- Load(type, ref_reg, field);
- if (needs_null_check) {
- MaybeRecordImplicitNullCheck(instruction);
- }
- }
- }
-
- // Object* ref = ref_addr->AsMirrorPtr()
- GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
-}
-
void CodeGeneratorARM64::MaybeGenerateMarkingRegisterCheck(int code, Location temp_loc) {
// The following condition is a compile-time one, so it does not have a run-time cost.
if (kEmitCompilerReadBarrier && kUseBakerReadBarrier && kIsDebugBuild) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 6a358a4306..4f6a44fe4d 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -92,6 +92,16 @@ const vixl::aarch64::CPURegList runtime_reserved_core_registers =
((kEmitCompilerReadBarrier && kUseBakerReadBarrier) ? mr : vixl::aarch64::NoCPUReg),
vixl::aarch64::lr);
+// Some instructions have special requirements for a temporary, for example
+// LoadClass/kBssEntry and LoadString/kBssEntry for Baker read barrier require
+// temp that's not an R0 (to avoid an extra move) and Baker read barrier field
+// loads with large offsets need a fixed register to limit the number of link-time
+// thunks we generate. For these and similar cases, we want to reserve a specific
+// register that's neither callee-save nor an argument register. We choose x15.
+inline Location FixedTempLocation() {
+ return Location::RegisterLocation(vixl::aarch64::x15.GetCode());
+}
+
// Callee-save registers AAPCS64, without x19 (Thread Register) (nor
// x20 (Marking Register) when emitting Baker read barriers).
const vixl::aarch64::CPURegList callee_saved_core_registers(
@@ -661,6 +671,18 @@ class CodeGeneratorARM64 : public CodeGenerator {
uint32_t offset,
vixl::aarch64::Label* fixup_label,
ReadBarrierOption read_barrier_option);
+ // Generate MOV for the `old_value` in UnsafeCASObject and mark it with Baker read barrier.
+ void GenerateUnsafeCasOldValueMovWithBakerReadBarrier(vixl::aarch64::Register marked,
+ vixl::aarch64::Register old_value);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
+ // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ vixl::aarch64::Register obj,
+ const vixl::aarch64::MemOperand& src,
+ bool needs_null_check,
+ bool use_load_acquire);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -678,51 +700,6 @@ class CodeGeneratorARM64 : public CodeGenerator {
Location index,
vixl::aarch64::Register temp,
bool needs_null_check);
- // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
- // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
- //
- // Load the object reference located at the address
- // `obj + offset + (index << scale_factor)`, held by object `obj`, into
- // `ref`, and mark it if needed.
- void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- vixl::aarch64::Register temp,
- bool needs_null_check,
- bool use_load_acquire);
-
- // Generate code checking whether the the reference field at the
- // address `obj + field_offset`, held by object `obj`, needs to be
- // marked, and if so, marking it and updating the field within `obj`
- // with the marked value.
- //
- // This routine is used for the implementation of the
- // UnsafeCASObject intrinsic with Baker read barriers.
- //
- // This method has a structure similar to
- // GenerateReferenceLoadWithBakerReadBarrier, but note that argument
- // `ref` is only as a temporary here, and thus its value should not
- // be used afterwards.
- void UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- Location field_offset,
- vixl::aarch64::Register temp,
- bool needs_null_check,
- bool use_load_acquire);
-
- // Generate a heap reference load (with no read barrier).
- void GenerateRawReferenceLoad(HInstruction* instruction,
- Location ref,
- vixl::aarch64::Register obj,
- uint32_t offset,
- Location index,
- size_t scale_factor,
- bool needs_null_check,
- bool use_load_acquire);
// Emit code checking the status of the Marking Register, and
// aborting the program if MR does not match the value stored in the
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3e63c2674c..f62421645e 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -786,160 +786,9 @@ class ReadBarrierMarkSlowPathBaseARMVIXL : public SlowPathCodeARMVIXL {
// Slow path loading `obj`'s lock word, loading a reference from
// object `*(obj + offset + (index << scale_factor))` into `ref`, and
// marking `ref` if `obj` is gray according to the lock word (Baker
-// read barrier). The field `obj.field` in the object `obj` holding
-// this reference does not get updated by this slow path after marking
-// (see LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL
-// below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// Argument `entrypoint` must be a register location holding the read
-// barrier marking runtime entry point to be invoked or an empty
-// location; in the latter case, the read barrier marking runtime
-// entry point will be loaded by the slow path code itself.
-class LoadReferenceWithBakerReadBarrierSlowPathARMVIXL : public ReadBarrierMarkSlowPathBaseARMVIXL {
- public:
- LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- bool needs_null_check,
- vixl32::Register temp,
- Location entrypoint = Location::NoLocation())
- : ReadBarrierMarkSlowPathBaseARMVIXL(instruction, ref, entrypoint),
- obj_(obj),
- offset_(offset),
- index_(index),
- scale_factor_(scale_factor),
- needs_null_check_(needs_null_check),
- temp_(temp) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
- }
-
- const char* GetDescription() const OVERRIDE {
- return "LoadReferenceWithBakerReadBarrierSlowPathARMVIXL";
- }
-
- void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
- LocationSummary* locations = instruction_->GetLocations();
- vixl32::Register ref_reg = RegisterFrom(ref_);
- DCHECK(locations->CanCall());
- DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg.GetCode())) << ref_reg;
- DCHECK(instruction_->IsInstanceFieldGet() ||
- instruction_->IsStaticFieldGet() ||
- instruction_->IsArrayGet() ||
- instruction_->IsArraySet() ||
- instruction_->IsInstanceOf() ||
- instruction_->IsCheckCast() ||
- (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
- (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
- << "Unexpected instruction in read barrier marking slow path: "
- << instruction_->DebugName();
- // The read barrier instrumentation of object ArrayGet
- // instructions does not support the HIntermediateAddress
- // instruction.
- DCHECK(!(instruction_->IsArrayGet() &&
- instruction_->AsArrayGet()->GetArray()->IsIntermediateAddress()));
-
- // Temporary register `temp_`, used to store the lock word, must
- // not be IP, as we may use it to emit the reference load (in the
- // call to GenerateRawReferenceLoad below), and we need the lock
- // word to still be in `temp_` after the reference load.
- DCHECK(!temp_.Is(ip));
-
- __ Bind(GetEntryLabel());
-
- // When using MaybeGenerateReadBarrierSlow, the read barrier call is
- // inserted after the original load. However, in fast path based
- // Baker's read barriers, we need to perform the load of
- // mirror::Object::monitor_ *before* the original reference load.
- // This load-load ordering is required by the read barrier.
- // The slow path (for Baker's algorithm) should look like:
- //
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- //
- // Note: the original implementation in ReadBarrier::Barrier is
- // slightly more complex as it performs additional checks that we do
- // not do here for performance reasons.
-
- CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
-
- // /* int32_t */ monitor = obj->monitor_
- uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
- arm_codegen->GetAssembler()->LoadFromOffset(kLoadWord, temp_, obj_, monitor_offset);
- if (needs_null_check_) {
- codegen->MaybeRecordImplicitNullCheck(instruction_);
- }
- // /* LockWord */ lock_word = LockWord(monitor)
- static_assert(sizeof(LockWord) == sizeof(int32_t),
- "art::LockWord and int32_t have different sizes.");
-
- // Introduce a dependency on the lock_word including the rb_state,
- // which shall prevent load-load reordering without using
- // a memory barrier (which would be more expensive).
- // `obj` is unchanged by this operation, but its value now depends
- // on `temp`.
- __ Add(obj_, obj_, Operand(temp_, ShiftType::LSR, 32));
-
- // The actual reference load.
- // A possible implicit null check has already been handled above.
- arm_codegen->GenerateRawReferenceLoad(
- instruction_, ref_, obj_, offset_, index_, scale_factor_, /* needs_null_check */ false);
-
- // Mark the object `ref` when `obj` is gray.
- //
- // if (rb_state == ReadBarrier::GrayState())
- // ref = ReadBarrier::Mark(ref);
- //
- // Given the numeric representation, it's enough to check the low bit of the
- // rb_state. We do that by shifting the bit out of the lock word with LSRS
- // which can be a 16-bit instruction unlike the TST immediate.
- static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0");
- static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
- __ Lsrs(temp_, temp_, LockWord::kReadBarrierStateShift + 1);
- __ B(cc, GetExitLabel()); // Carry flag is the last bit shifted out by LSRS.
- GenerateReadBarrierMarkRuntimeCall(codegen);
-
- __ B(GetExitLabel());
- }
-
- private:
- // The register containing the object holding the marked object reference field.
- vixl32::Register obj_;
- // The offset, index and scale factor to access the reference in `obj_`.
- uint32_t offset_;
- Location index_;
- ScaleFactor scale_factor_;
- // Is a null check required?
- bool needs_null_check_;
- // A temporary register used to hold the lock word of `obj_`.
- vixl32::Register temp_;
-
- DISALLOW_COPY_AND_ASSIGN(LoadReferenceWithBakerReadBarrierSlowPathARMVIXL);
-};
-
-// Slow path loading `obj`'s lock word, loading a reference from
-// object `*(obj + offset + (index << scale_factor))` into `ref`, and
-// marking `ref` if `obj` is gray according to the lock word (Baker
// read barrier). If needed, this slow path also atomically updates
// the field `obj.field` in the object `obj` holding this reference
-// after marking (contrary to
-// LoadReferenceWithBakerReadBarrierSlowPathARMVIXL above, which never
-// tries to update `obj.field`).
+// after marking.
//
// This means that after the execution of this slow path, both `ref`
// and `obj.field` will be up-to-date; i.e., after the flip, both will
@@ -1006,7 +855,7 @@ class LoadReferenceWithBakerReadBarrierAndUpdateFieldSlowPathARMVIXL
__ Bind(GetEntryLabel());
- // The implementation is similar to LoadReferenceWithBakerReadBarrierSlowPathARMVIXL's:
+ // The implementation is:
//
// uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
// lfence; // Load fence or artificial data dependency to prevent load-load reordering
@@ -6995,9 +6844,25 @@ void CodeGeneratorARMVIXL::MarkGCCard(vixl32::Register temp,
if (can_be_null) {
__ CompareAndBranchIfZero(value, &is_null);
}
+ // Load the address of the card table into `card`.
GetAssembler()->LoadFromOffset(
kLoadWord, card, tr, Thread::CardTableOffset<kArmPointerSize>().Int32Value());
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ Lsr(temp, object, Operand::From(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the STRB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Strb(card, MemOperand(card, temp));
if (can_be_null) {
__ Bind(&is_null);
@@ -8796,8 +8661,7 @@ void CodeGeneratorARMVIXL::GenerateGcRootFieldLoad(
void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl32::Register obj,
- uint32_t offset,
- Location temp,
+ const vixl32::MemOperand& src,
bool needs_null_check) {
DCHECK(kEmitCompilerReadBarrier);
DCHECK(kUseBakerReadBarrier);
@@ -8823,23 +8687,15 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
// HeapReference<mirror::Object> reference = *(obj+offset);
// gray_return_address:
- DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+ DCHECK(src.GetAddrMode() == vixl32::Offset);
+ DCHECK_ALIGNED(src.GetOffsetImmediate(), sizeof(mirror::HeapReference<mirror::Object>));
vixl32::Register ref_reg = RegisterFrom(ref, DataType::Type::kReference);
- bool narrow = CanEmitNarrowLdr(ref_reg, obj, offset);
- vixl32::Register base = obj;
- if (offset >= kReferenceLoadMinFarOffset) {
- base = RegisterFrom(temp);
- static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
- __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
- offset &= (kReferenceLoadMinFarOffset - 1u);
- // Use narrow LDR only for small offsets. Generating narrow encoding LDR for the large
- // offsets with `(offset & (kReferenceLoadMinFarOffset - 1u)) < 32u` would most likely
- // increase the overall code size when taking the generated thunks into account.
- DCHECK(!narrow);
- }
+ bool narrow = CanEmitNarrowLdr(ref_reg, src.GetBaseRegister(), src.GetOffsetImmediate());
+
UseScratchRegisterScope temps(GetVIXLAssembler());
temps.Exclude(ip);
- uint32_t custom_data = EncodeBakerReadBarrierFieldData(base.GetCode(), obj.GetCode(), narrow);
+ uint32_t custom_data =
+ EncodeBakerReadBarrierFieldData(src.GetBaseRegister().GetCode(), obj.GetCode(), narrow);
{
vixl::EmissionCheckScope guard(
@@ -8850,7 +8706,7 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
__ cmp(mr, Operand(0));
EmitBakerReadBarrierBne(custom_data);
ptrdiff_t old_offset = GetVIXLAssembler()->GetBuffer()->GetCursorOffset();
- __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, MemOperand(base, offset));
+ __ ldr(EncodingSize(narrow ? Narrow : Wide), ref_reg, src);
if (needs_null_check) {
MaybeRecordImplicitNullCheck(instruction);
}
@@ -8871,6 +8727,24 @@ void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* i
MaybeGenerateMarkingRegisterCheck(/* code */ 20, /* temp_loc */ LocationFrom(ip));
}
+void CodeGeneratorARMVIXL::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ vixl32::Register obj,
+ uint32_t offset,
+ Location temp,
+ bool needs_null_check) {
+ DCHECK_ALIGNED(offset, sizeof(mirror::HeapReference<mirror::Object>));
+ vixl32::Register base = obj;
+ if (offset >= kReferenceLoadMinFarOffset) {
+ base = RegisterFrom(temp);
+ static_assert(IsPowerOfTwo(kReferenceLoadMinFarOffset), "Expecting a power of 2.");
+ __ Add(base, obj, Operand(offset & ~(kReferenceLoadMinFarOffset - 1u)));
+ offset &= (kReferenceLoadMinFarOffset - 1u);
+ }
+ GenerateFieldLoadWithBakerReadBarrier(
+ instruction, ref, obj, MemOperand(base, offset), needs_null_check);
+}
+
void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
vixl32::Register obj,
uint32_t data_offset,
@@ -8938,53 +8812,6 @@ void CodeGeneratorARMVIXL::GenerateArrayLoadWithBakerReadBarrier(Location ref,
MaybeGenerateMarkingRegisterCheck(/* code */ 21, /* temp_loc */ LocationFrom(ip));
}
-void CodeGeneratorARMVIXL::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- Location temp,
- bool needs_null_check) {
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
-
- // Query `art::Thread::Current()->GetIsGcMarking()` (stored in the
- // Marking Register) to decide whether we need to enter the slow
- // path to mark the reference. Then, in the slow path, check the
- // gray bit in the lock word of the reference's holder (`obj`) to
- // decide whether to mark `ref` or not.
- //
- // if (mr) { // Thread::Current()->GetIsGcMarking()
- // // Slow path.
- // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
- // lfence; // Load fence or artificial data dependency to prevent load-load reordering
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // bool is_gray = (rb_state == ReadBarrier::GrayState());
- // if (is_gray) {
- // entrypoint = Thread::Current()->pReadBarrierMarkReg ## root.reg()
- // ref = entrypoint(ref); // ref = ReadBarrier::Mark(ref); // Runtime entry point call.
- // }
- // } else {
- // HeapReference<mirror::Object> ref = *src; // Original reference load.
- // }
-
- vixl32::Register temp_reg = RegisterFrom(temp);
-
- // Slow path marking the object `ref` when the GC is marking. The
- // entrypoint will be loaded by the slow path code.
- SlowPathCodeARMVIXL* slow_path =
- new (GetScopedAllocator()) LoadReferenceWithBakerReadBarrierSlowPathARMVIXL(
- instruction, ref, obj, offset, index, scale_factor, needs_null_check, temp_reg);
- AddSlowPath(slow_path);
-
- __ CompareAndBranchIfNonZero(mr, slow_path->GetEntryLabel());
- // Fast path: the GC is not marking: just load the reference.
- GenerateRawReferenceLoad(instruction, ref, obj, offset, index, scale_factor, needs_null_check);
- __ Bind(slow_path->GetExitLabel());
- MaybeGenerateMarkingRegisterCheck(/* code */ 22);
-}
-
void CodeGeneratorARMVIXL::UpdateReferenceFieldWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl32::Register obj,
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 0106236b17..2fd18cab47 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -624,6 +624,14 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
ReadBarrierOption read_barrier_option);
// Fast path implementation of ReadBarrier::Barrier for a heap
// reference field load when Baker's read barriers are used.
+ // Overload suitable for Unsafe.getObject/-Volatile() intrinsic.
+ void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
+ Location ref,
+ vixl::aarch32::Register obj,
+ const vixl::aarch32::MemOperand& src,
+ bool needs_null_check);
+ // Fast path implementation of ReadBarrier::Barrier for a heap
+ // reference field load when Baker's read barriers are used.
void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
vixl::aarch32::Register obj,
@@ -638,20 +646,6 @@ class CodeGeneratorARMVIXL : public CodeGenerator {
Location index,
Location temp,
bool needs_null_check);
- // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
- // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
- //
- // Load the object reference located at the address
- // `obj + offset + (index << scale_factor)`, held by object `obj`, into
- // `ref`, and mark it if needed.
- void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
- Location ref,
- vixl::aarch32::Register obj,
- uint32_t offset,
- Location index,
- ScaleFactor scale_factor,
- Location temp,
- bool needs_null_check);
// Generate code checking whether the the reference field at the
// address `obj + field_offset`, held by object `obj`, needs to be
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 0ed5756b53..476e8ab944 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1868,12 +1868,27 @@ void CodeGeneratorMIPS::MarkGCCard(Register object,
if (value_can_be_null) {
__ Beqz(value, &done);
}
+ // Load the address of the card table into `card`.
__ LoadFromOffset(kLoadWord,
card,
TR,
Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
+ // Calculate the address of the card corresponding to `object`.
__ Srl(temp, object, gc::accounting::CardTable::kCardShift);
__ Addu(temp, card, temp);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the SB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Sb(card, temp, 0);
if (value_can_be_null) {
__ Bind(&done);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 2b6928eee2..c05f62722c 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1490,12 +1490,27 @@ void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
if (value_can_be_null) {
__ Beqzc(value, &done);
}
+ // Load the address of the card table into `card`.
__ LoadFromOffset(kLoadDoubleword,
card,
TR,
Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
+ // Calculate the address of the card corresponding to `object`.
__ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
__ Daddu(temp, card, temp);
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the SB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ Sb(card, temp, 0);
if (value_can_be_null) {
__ Bind(&done);
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a835aed6b0..63bd8413eb 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5104,9 +5104,25 @@ void CodeGeneratorX86::MarkGCCard(Register temp,
__ testl(value, value);
__ j(kEqual, &is_null);
}
+ // Load the address of the card table into `card`.
__ fs()->movl(card, Address::Absolute(Thread::CardTableOffset<kX86PointerSize>().Int32Value()));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ movl(temp, object);
__ shrl(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the MOVB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ movb(Address(temp, card, TIMES_1, 0),
X86ManagedRegister::FromCpuRegister(card).AsByteRegister());
if (value_can_be_null) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dee891b8de..0bd7319677 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -5436,10 +5436,26 @@ void CodeGeneratorX86_64::MarkGCCard(CpuRegister temp,
__ testl(value, value);
__ j(kEqual, &is_null);
}
+ // Load the address of the card table into `card`.
__ gs()->movq(card, Address::Absolute(Thread::CardTableOffset<kX86_64PointerSize>().Int32Value(),
/* no_rip */ true));
+ // Calculate the offset (in the card table) of the card corresponding to
+ // `object`.
__ movq(temp, object);
__ shrq(temp, Immediate(gc::accounting::CardTable::kCardShift));
+ // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
+ // `object`'s card.
+ //
+ // Register `card` contains the address of the card table. Note that the card
+ // table's base is biased during its creation so that it always starts at an
+ // address whose least-significant byte is equal to `kCardDirty` (see
+ // art::gc::accounting::CardTable::Create). Therefore the MOVB instruction
+ // below writes the `kCardDirty` (byte) value into the `object`'s card
+ // (located at `card + object >> kCardShift`).
+ //
+ // This dual use of the value in register `card` (1. to calculate the location
+ // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
+ // (no need to explicitly load `kCardDirty` as an immediate value).
__ movb(Address(temp, card, TIMES_1, 0), card);
if (value_can_be_null) {
__ Bind(&is_null);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4b2bcc8ca8..74d4a8f63b 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -745,15 +745,15 @@ static void GenUnsafeGet(HInvoke* invoke,
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
// UnsafeGetObject/UnsafeGetObjectVolatile with Baker's read barrier case.
Register temp = WRegisterFrom(locations->GetTemp(0));
- codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
- trg_loc,
- base,
- /* offset */ 0u,
- /* index */ offset_loc,
- /* scale_factor */ 0u,
- temp,
- /* needs_null_check */ false,
- is_volatile);
+ MacroAssembler* masm = codegen->GetVIXLAssembler();
+ // Piggy-back on the field load path using introspection for the Baker read barrier.
+ __ Add(temp, base, offset.W()); // Offset should not exceed 32 bits.
+ codegen->GenerateFieldLoadWithBakerReadBarrier(invoke,
+ trg_loc,
+ base,
+ MemOperand(temp.X()),
+ /* needs_null_check */ false,
+ is_volatile);
} else {
// Other cases.
MemOperand mem_op(base.X(), offset);
@@ -782,9 +782,9 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* in
kIntrinsified);
if (can_call && kUseBakerReadBarrier) {
locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
- // We need a temporary register for the read barrier marking slow
- // path in CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier.
- locations->AddTemp(Location::RequiresRegister());
+ // We need a temporary register for the read barrier load in order to use
+ // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier().
+ locations->AddTemp(FixedTempLocation());
}
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
@@ -984,106 +984,155 @@ static void CreateIntIntIntIntIntToInt(ArenaAllocator* allocator,
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall,
kIntrinsified);
+ if (can_call) {
+ locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ }
locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
locations->SetInAt(1, Location::RequiresRegister());
locations->SetInAt(2, Location::RequiresRegister());
locations->SetInAt(3, Location::RequiresRegister());
locations->SetInAt(4, Location::RequiresRegister());
- // If heap poisoning is enabled, we don't want the unpoisoning
- // operations to potentially clobber the output. Likewise when
- // emitting a (Baker) read barrier, which may call.
- Location::OutputOverlap overlaps =
- ((kPoisonHeapReferences && type == DataType::Type::kReference) || can_call)
- ? Location::kOutputOverlap
- : Location::kNoOutputOverlap;
- locations->SetOut(Location::RequiresRegister(), overlaps);
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- // Temporary register for (Baker) read barrier.
+ // We need two non-scratch temporary registers for (Baker) read barrier.
+ locations->AddTemp(Location::RequiresRegister());
locations->AddTemp(Location::RequiresRegister());
}
}
+class BakerReadBarrierCasSlowPathARM64 : public SlowPathCodeARM64 {
+ public:
+ explicit BakerReadBarrierCasSlowPathARM64(HInvoke* invoke)
+ : SlowPathCodeARM64(invoke) {}
+
+ const char* GetDescription() const OVERRIDE { return "BakerReadBarrierCasSlowPathARM64"; }
+
+ void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
+ Arm64Assembler* assembler = arm64_codegen->GetAssembler();
+ MacroAssembler* masm = assembler->GetVIXLAssembler();
+ __ Bind(GetEntryLabel());
+
+ // Get the locations.
+ LocationSummary* locations = instruction_->GetLocations();
+ Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
+ Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
+ Register expected = WRegisterFrom(locations->InAt(3)); // Expected.
+ Register value = WRegisterFrom(locations->InAt(4)); // Value.
+
+ Register old_value = WRegisterFrom(locations->GetTemp(0)); // The old value from main path.
+ Register marked = WRegisterFrom(locations->GetTemp(1)); // The marked old value.
+
+ // Mark the `old_value` from the main path and compare with `expected`. This clobbers the
+ // `tmp_ptr` scratch register but we do not want to allocate another non-scratch temporary.
+ arm64_codegen->GenerateUnsafeCasOldValueMovWithBakerReadBarrier(marked, old_value);
+ __ Cmp(marked, expected);
+ __ B(GetExitLabel(), ne); // If taken, Z=false indicates failure.
+
+ // The `old_value` we have read did not match `expected` (which is always a to-space reference)
+ // but after the read barrier in GenerateUnsafeCasOldValueMovWithBakerReadBarrier() the marked
+ // to-space value matched, so the `old_value` must be a from-space reference to the same
+ // object. Do the same CAS loop as the main path but check for both `expected` and the unmarked
+ // old value representing the to-space and from-space references for the same object.
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp_ptr = temps.AcquireX();
+ Register tmp = temps.AcquireSameSizeAs(value);
+
+ // Recalculate the `tmp_ptr` clobbered above.
+ __ Add(tmp_ptr, base.X(), Operand(offset));
+
+ // do {
+ // tmp_value = [tmp_ptr];
+ // } while ((tmp_value == expected || tmp == old_value) && failure([tmp_ptr] <- r_new_value));
+ // result = (tmp_value == expected || tmp == old_value);
+
+ vixl::aarch64::Label loop_head;
+ __ Bind(&loop_head);
+ __ Ldaxr(tmp, MemOperand(tmp_ptr));
+ assembler->MaybeUnpoisonHeapReference(tmp);
+ __ Cmp(tmp, expected);
+ __ Ccmp(tmp, old_value, ZFlag, ne);
+ __ B(GetExitLabel(), ne); // If taken, Z=false indicates failure.
+ assembler->MaybePoisonHeapReference(value);
+ __ Stlxr(tmp.W(), value, MemOperand(tmp_ptr));
+ assembler->MaybeUnpoisonHeapReference(value);
+ __ Cbnz(tmp.W(), &loop_head);
+
+ // Z=true from the above CMP+CCMP indicates success.
+ __ B(GetExitLabel());
+ }
+};
+
static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorARM64* codegen) {
- MacroAssembler* masm = codegen->GetVIXLAssembler();
+ Arm64Assembler* assembler = codegen->GetAssembler();
+ MacroAssembler* masm = assembler->GetVIXLAssembler();
LocationSummary* locations = invoke->GetLocations();
- Location out_loc = locations->Out();
- Register out = WRegisterFrom(out_loc); // Boolean result.
-
- Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
- Location offset_loc = locations->InAt(2);
- Register offset = XRegisterFrom(offset_loc); // Long offset.
- Register expected = RegisterFrom(locations->InAt(3), type); // Expected.
- Register value = RegisterFrom(locations->InAt(4), type); // Value.
+ Register out = WRegisterFrom(locations->Out()); // Boolean result.
+ Register base = WRegisterFrom(locations->InAt(1)); // Object pointer.
+ Register offset = XRegisterFrom(locations->InAt(2)); // Long offset.
+ Register expected = RegisterFrom(locations->InAt(3), type); // Expected.
+ Register value = RegisterFrom(locations->InAt(4), type); // Value.
// This needs to be before the temp registers, as MarkGCCard also uses VIXL temps.
if (type == DataType::Type::kReference) {
// Mark card for object assuming new value is stored.
bool value_can_be_null = true; // TODO: Worth finding out this information?
codegen->MarkGCCard(base, value, value_can_be_null);
-
- // The only read barrier implementation supporting the
- // UnsafeCASObject intrinsic is the Baker-style read barriers.
- DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
- Register temp = WRegisterFrom(locations->GetTemp(0));
- // Need to make sure the reference stored in the field is a to-space
- // one before attempting the CAS or the CAS could fail incorrectly.
- codegen->UpdateReferenceFieldWithBakerReadBarrier(
- invoke,
- out_loc, // Unused, used only as a "temporary" within the read barrier.
- base,
- /* field_offset */ offset_loc,
- temp,
- /* needs_null_check */ false,
- /* use_load_acquire */ false);
- }
}
UseScratchRegisterScope temps(masm);
Register tmp_ptr = temps.AcquireX(); // Pointer to actual memory.
- Register tmp_value = temps.AcquireSameSizeAs(value); // Value in memory.
+ Register old_value; // Value in memory.
- Register tmp_32 = tmp_value.W();
+ vixl::aarch64::Label exit_loop_label;
+ vixl::aarch64::Label* exit_loop = &exit_loop_label;
+ vixl::aarch64::Label* failure = &exit_loop_label;
- __ Add(tmp_ptr, base.X(), Operand(offset));
+ if (kEmitCompilerReadBarrier && type == DataType::Type::kReference) {
+ // The only read barrier implementation supporting the
+ // UnsafeCASObject intrinsic is the Baker-style read barriers.
+ DCHECK(kUseBakerReadBarrier);
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->PoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not poison `value`, as it is the same register as
- // `expected`, which has just been poisoned.
- } else {
- codegen->GetAssembler()->PoisonHeapReference(value);
- }
+ BakerReadBarrierCasSlowPathARM64* slow_path =
+ new (codegen->GetScopedAllocator()) BakerReadBarrierCasSlowPathARM64(invoke);
+ codegen->AddSlowPath(slow_path);
+ exit_loop = slow_path->GetExitLabel();
+ failure = slow_path->GetEntryLabel();
+ // We need to store the `old_value` in a non-scratch register to make sure
+ // the Baker read barrier in the slow path does not clobber it.
+ old_value = WRegisterFrom(locations->GetTemp(0));
+ } else {
+ old_value = temps.AcquireSameSizeAs(value);
}
+ __ Add(tmp_ptr, base.X(), Operand(offset));
+
// do {
- // tmp_value = [tmp_ptr] - expected;
- // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
- // result = tmp_value != 0;
+ // tmp_value = [tmp_ptr];
+ // } while (tmp_value == expected && failure([tmp_ptr] <- r_new_value));
+ // result = tmp_value == expected;
- vixl::aarch64::Label loop_head, exit_loop;
+ vixl::aarch64::Label loop_head;
__ Bind(&loop_head);
- __ Ldaxr(tmp_value, MemOperand(tmp_ptr));
- __ Cmp(tmp_value, expected);
- __ B(&exit_loop, ne);
- __ Stlxr(tmp_32, value, MemOperand(tmp_ptr));
- __ Cbnz(tmp_32, &loop_head);
- __ Bind(&exit_loop);
- __ Cset(out, eq);
-
- if (kPoisonHeapReferences && type == DataType::Type::kReference) {
- codegen->GetAssembler()->UnpoisonHeapReference(expected);
- if (value.Is(expected)) {
- // Do not unpoison `value`, as it is the same register as
- // `expected`, which has just been unpoisoned.
- } else {
- codegen->GetAssembler()->UnpoisonHeapReference(value);
- }
+ __ Ldaxr(old_value, MemOperand(tmp_ptr));
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(old_value);
}
+ __ Cmp(old_value, expected);
+ __ B(failure, ne);
+ if (type == DataType::Type::kReference) {
+ assembler->MaybePoisonHeapReference(value);
+ }
+ __ Stlxr(old_value.W(), value, MemOperand(tmp_ptr)); // Reuse `old_value` for STLXR result.
+ if (type == DataType::Type::kReference) {
+ assembler->MaybeUnpoisonHeapReference(value);
+ }
+ __ Cbnz(old_value.W(), &loop_head);
+ __ Bind(exit_loop);
+ __ Cset(out, eq);
}
void IntrinsicLocationsBuilderARM64::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 2963308da8..b92075053e 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -638,8 +638,11 @@ static void GenUnsafeGet(HInvoke* invoke,
if (kEmitCompilerReadBarrier) {
if (kUseBakerReadBarrier) {
Location temp = locations->GetTemp(0);
- codegen->GenerateReferenceLoadWithBakerReadBarrier(
- invoke, trg_loc, base, 0U, offset_loc, TIMES_1, temp, /* needs_null_check */ false);
+ // Piggy-back on the field load path using introspection for the Baker read barrier.
+ __ Add(RegisterFrom(temp), base, Operand(offset));
+ MemOperand src(RegisterFrom(temp), 0);
+ codegen->GenerateFieldLoadWithBakerReadBarrier(
+ invoke, trg_loc, base, src, /* needs_null_check */ false);
if (is_volatile) {
__ Dmb(vixl32::ISH);
}
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 3918b65a62..60ca61c133 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -296,10 +296,10 @@ ScopedArenaVector<uint8_t> StackMapStream::Encode() {
ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
- EncodeVarintBits(out, packed_frame_size_);
- EncodeVarintBits(out, core_spill_mask_);
- EncodeVarintBits(out, fp_spill_mask_);
- EncodeVarintBits(out, num_dex_registers_);
+ out.WriteVarint(packed_frame_size_);
+ out.WriteVarint(core_spill_mask_);
+ out.WriteVarint(fp_spill_mask_);
+ out.WriteVarint(num_dex_registers_);
EncodeTable(out, stack_maps_);
EncodeTable(out, register_masks_);
EncodeTable(out, stack_masks_);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index df11709f03..01c6bf9e0e 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -109,7 +109,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
BitTableBuilder<RegisterMask> register_masks_;
BitmapTableBuilder stack_masks_;
BitmapTableBuilder dex_register_masks_;
- BitTableBuilder<MaskInfo> dex_register_maps_;
+ BitTableBuilder<DexRegisterMapInfo> dex_register_maps_;
BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
ScopedArenaVector<BitVector*> lazy_stack_masks_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index a281bb30f4..d28f09fbba 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -750,9 +750,9 @@ TEST(StackMapTest, TestDedupeBitTables) {
ScopedArenaVector<uint8_t> memory = stream.Encode();
std::vector<uint8_t> out;
- CodeInfo::DedupeMap dedupe_map;
- size_t deduped1 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
- size_t deduped2 = CodeInfo::Dedupe(&out, memory.data(), &dedupe_map);
+ CodeInfo::Deduper deduper(&out);
+ size_t deduped1 = deduper.Dedupe(memory.data());
+ size_t deduped2 = deduper.Dedupe(memory.data());
for (size_t deduped : { deduped1, deduped2 }) {
CodeInfo code_info(out.data() + deduped);