summaryrefslogtreecommitdiff
path: root/compiler/utils/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils/arm64')
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc7
-rw-r--r--compiler/utils/arm64/assembler_arm64.h3
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc238
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h91
-rw-r--r--compiler/utils/arm64/managed_register_arm64.cc2
-rw-r--r--compiler/utils/arm64/managed_register_arm64.h3
-rw-r--r--compiler/utils/arm64/managed_register_arm64_test.cc3
7 files changed, 70 insertions, 277 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 6100ed9855..26dce7c502 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -16,7 +16,6 @@
#include "arch/arm64/instruction_set_features_arm64.h"
#include "assembler_arm64.h"
-#include "base/bit_utils_iterator.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "heap_poisoning.h"
#include "offsets.h"
@@ -24,7 +23,7 @@
using namespace vixl::aarch64; // NOLINT(build/namespaces)
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
#ifdef ___
@@ -187,9 +186,7 @@ void Arm64Assembler::MaybeUnpoisonHeapReference(Register reg) {
}
void Arm64Assembler::GenerateMarkingRegisterCheck(Register temp, int code) {
- // The Marking Register is only used in the Baker read barrier configuration.
- DCHECK(kEmitCompilerReadBarrier);
- DCHECK(kUseBakerReadBarrier);
+ DCHECK(kReserveMarkingRegister);
vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b49a13a067..f8168903bd 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include <android-base/logging.h>
-#include "base/arena_containers.h"
#include "base/bit_utils_iterator.h"
#include "base/macros.h"
#include "dwarf/register.h"
@@ -38,7 +37,7 @@
#include "aarch64/macro-assembler-aarch64.h"
#pragma GCC diagnostic pop
-namespace art {
+namespace art HIDDEN {
class Arm64InstructionSetFeatures;
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 50ca468499..9e9f122cf6 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -17,6 +17,7 @@
#include "jni_macro_assembler_arm64.h"
#include "entrypoints/quick/quick_entrypoints.h"
+#include "indirect_reference_table.h"
#include "lock_word.h"
#include "managed_register_arm64.h"
#include "offsets.h"
@@ -24,7 +25,7 @@
using namespace vixl::aarch64; // NOLINT(build/namespaces)
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
#ifdef ___
@@ -191,46 +192,22 @@ void Arm64JNIMacroAssembler::Store(ManagedRegister m_base,
}
}
-void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
Arm64ManagedRegister src = m_src.AsArm64();
CHECK(src.IsXRegister()) << src;
StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
}
-void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm) {
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- Register scratch = temps.AcquireW();
- ___ Mov(scratch, imm);
- ___ Str(scratch, MEM_OP(reg_x(SP), offs.Int32Value()));
-}
-
-void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, FrameOffset fr_offs) {
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- Register scratch = temps.AcquireX();
- ___ Add(scratch, reg_x(SP), fr_offs.Int32Value());
- ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
-}
-
-void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs, bool tag_sp) {
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register scratch = temps.AcquireX();
___ Mov(scratch, reg_x(SP));
+ if (tag_sp) {
+ ___ Orr(scratch, scratch, 0x2);
+ }
___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
}
-void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off ATTRIBUTE_UNUSED,
- ManagedRegister m_source ATTRIBUTE_UNUSED,
- FrameOffset in_off ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL); // This case is not applicable to ARM64.
-}
-
// Load routines.
void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
if ((cond == al) || (cond == nv)) {
@@ -329,45 +306,6 @@ void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst,
return Load(m_dst.AsArm64(), m_base.AsArm64().AsXRegister(), offs.Int32Value(), size);
}
-void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
- ThreadOffset64 src,
- size_t size) {
- return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
-}
-
-void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
-}
-
-void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
- ManagedRegister m_base,
- MemberOffset offs,
- bool unpoison_reference) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister base = m_base.AsArm64();
- CHECK(dst.IsXRegister() && base.IsXRegister());
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
- offs.Int32Value());
- if (unpoison_reference) {
- WRegister ref_reg = dst.AsOverlappingWRegister();
- asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
- }
-}
-
-void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
- ManagedRegister m_base,
- Offset offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister base = m_base.AsArm64();
- CHECK(dst.IsXRegister() && base.IsXRegister());
- // Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
- ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
-}
-
void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
@@ -640,40 +578,10 @@ void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src,
}
}
-void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- Register scratch = temps.AcquireX();
- ___ Ldr(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
- ___ Str(scratch, MEM_OP(sp, fr_offs.Int32Value()));
-}
-
-void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src) {
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- Register scratch = temps.AcquireW();
- ___ Ldr(scratch, MEM_OP(reg_x(SP), src.Int32Value()));
- ___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value()));
-}
-
-void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest,
- ManagedRegister base,
- MemberOffset offs,
- bool unpoison_reference) {
- UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
- Register scratch = temps.AcquireW();
- ___ Ldr(scratch, MEM_OP(reg_x(base.AsArm64().AsXRegister()), offs.Int32Value()));
- if (unpoison_reference) {
- asm_.MaybeUnpoisonHeapReference(scratch);
- }
- ___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value()));
+void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, size_t value) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ DCHECK(dst.IsXRegister());
+ ___ Mov(reg_x(dst.AsXRegister()), value);
}
void Arm64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size) {
@@ -684,105 +592,6 @@ void Arm64JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, size_t size
___ Str(scratch, MEM_OP(reg_x(SP), dest.Int32Value()));
}
-void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
- ManagedRegister src_base,
- Offset src_offset,
- ManagedRegister m_scratch,
- size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = src_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
- Offset dest_offs,
- FrameOffset src,
- ManagedRegister m_scratch,
- size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = m_dest_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
- dest_offs.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
- FrameOffset /*src_base*/,
- Offset /*src_offset*/,
- ManagedRegister /*mscratch*/,
- size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
- Offset dest_offset,
- ManagedRegister m_src,
- Offset src_offset,
- ManagedRegister m_scratch,
- size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- Arm64ManagedRegister dest = m_dest.AsArm64();
- CHECK(dest.IsXRegister()) << dest;
- CHECK(src.IsXRegister()) << src;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- if (scratch.IsWRegister()) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- } else {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- }
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
- Offset /*dest_offset*/,
- FrameOffset /*src*/,
- Offset /*src_offset*/,
- ManagedRegister /*scratch*/,
- size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
- // TODO: Should we check that m_scratch is IP? - see arm.
- ___ Dmb(InnerShareable, BarrierAll);
-}
-
void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
Arm64ManagedRegister reg = mreg.AsArm64();
CHECK(size == 1 || size == 2) << size;
@@ -882,6 +691,19 @@ void Arm64JNIMacroAssembler::CreateJObject(FrameOffset out_off,
___ Str(scratch, MEM_OP(reg_x(SP), out_off.Int32Value()));
}
+void Arm64JNIMacroAssembler::DecodeJNITransitionOrLocalJObject(ManagedRegister m_reg,
+ JNIMacroLabel* slow_path,
+ JNIMacroLabel* resume) {
+ constexpr uint64_t kGlobalOrWeakGlobalMask = IndirectReferenceTable::GetGlobalOrWeakGlobalMask();
+ constexpr uint64_t kIndirectRefKindMask = IndirectReferenceTable::GetIndirectRefKindMask();
+ constexpr size_t kGlobalOrWeakGlobalBit = WhichPowerOf2(kGlobalOrWeakGlobalMask);
+ Register reg = reg_w(m_reg.AsArm64().AsWRegister());
+ ___ Tbnz(reg.X(), kGlobalOrWeakGlobalBit, Arm64JNIMacroLabel::Cast(slow_path)->AsArm64());
+ ___ And(reg.X(), reg.X(), ~kIndirectRefKindMask);
+ ___ Cbz(reg.X(), Arm64JNIMacroLabel::Cast(resume)->AsArm64()); // Skip load for null.
+ ___ Ldr(reg, MEM_OP(reg.X()));
+}
+
void Arm64JNIMacroAssembler::TryToTransitionFromRunnableToNative(
JNIMacroLabel* label, ArrayRef<const ManagedRegister> scratch_regs ATTRIBUTE_UNUSED) {
constexpr uint32_t kNativeStateValue = Thread::StoredThreadStateValue(ThreadState::kNative);
@@ -989,7 +811,7 @@ void Arm64JNIMacroAssembler::TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCo
UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
Register test_reg;
DCHECK_EQ(Thread::IsGcMarkingSize(), 4u);
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
if (kUseBakerReadBarrier) {
// TestGcMarking() is used in the JNI stub entry when the marking register is up to date.
if (kIsDebugBuild && emit_run_time_checks_in_debug_mode_) {
@@ -1037,6 +859,14 @@ void Arm64JNIMacroAssembler::TestMarkBit(ManagedRegister m_ref,
}
}
+void Arm64JNIMacroAssembler::TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register scratch = temps.AcquireX();
+ ___ Mov(scratch, address);
+ ___ Ldrb(scratch.W(), MEM_OP(scratch, 0));
+ ___ Cbnz(scratch.W(), Arm64JNIMacroLabel::Cast(label)->AsArm64());
+}
+
void Arm64JNIMacroAssembler::Bind(JNIMacroLabel* label) {
CHECK(label != nullptr);
___ Bind(Arm64JNIMacroLabel::Cast(label)->AsArm64());
@@ -1107,7 +937,9 @@ void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
- if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
+ // Emit marking register refresh even with all GCs as we are still using the
+ // register due to nterp's dependency.
+ if (kReserveMarkingRegister) {
vixl::aarch64::Register mr = reg_x(MR); // Marking Register.
vixl::aarch64::Register tr = reg_x(TR); // Thread Register.
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 2c04184848..2836e0947d 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -37,7 +37,7 @@
#include "aarch64/macro-assembler-aarch64.h"
#pragma GCC diagnostic pop
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
@@ -68,23 +68,12 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
// Store routines.
void Store(FrameOffset offs, ManagedRegister src, size_t size) override;
void Store(ManagedRegister base, MemberOffset offs, ManagedRegister src, size_t size) override;
- void StoreRef(FrameOffset dest, ManagedRegister src) override;
void StoreRawPtr(FrameOffset dest, ManagedRegister src) override;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm) override;
- void StoreStackOffsetToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs) override;
- void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off) override;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs, bool tag_sp) override;
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) override;
void Load(ManagedRegister dest, ManagedRegister base, MemberOffset offs, size_t size) override;
- void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) override;
- void LoadRef(ManagedRegister dest, FrameOffset src) override;
- void LoadRef(ManagedRegister dest,
- ManagedRegister base,
- MemberOffset offs,
- bool unpoison_reference) override;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) override;
void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) override;
// Copying routines.
@@ -92,43 +81,7 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
ArrayRef<ArgumentLocation> srcs,
ArrayRef<FrameOffset> refs) override;
void Move(ManagedRegister dest, ManagedRegister src, size_t size) override;
- void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset64 thr_offs) override;
- void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- override;
- void CopyRef(FrameOffset dest, FrameOffset src) override;
- void CopyRef(FrameOffset dest,
- ManagedRegister base,
- MemberOffset offs,
- bool unpoison_reference) override;
- void Copy(FrameOffset dest, FrameOffset src, size_t size) override;
- void Copy(FrameOffset dest,
- ManagedRegister src_base,
- Offset src_offset,
- ManagedRegister scratch,
- size_t size) override;
- void Copy(ManagedRegister dest_base,
- Offset dest_offset,
- FrameOffset src,
- ManagedRegister scratch,
- size_t size) override;
- void Copy(FrameOffset dest,
- FrameOffset src_base,
- Offset src_offset,
- ManagedRegister scratch,
- size_t size) override;
- void Copy(ManagedRegister dest,
- Offset dest_offset,
- ManagedRegister src,
- Offset src_offset,
- ManagedRegister scratch,
- size_t size) override;
- void Copy(FrameOffset dest,
- Offset dest_offset,
- FrameOffset src,
- Offset src_offset,
- ManagedRegister scratch,
- size_t size) override;
- void MemoryBarrier(ManagedRegister scratch) override;
+ void Move(ManagedRegister dest, size_t value) override;
// Sign extension.
void SignExtend(ManagedRegister mreg, size_t size) override;
@@ -140,20 +93,10 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
- // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
- // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
- // stale reference that can be used to avoid loading the spilled value to
- // see if the value is null.
- void CreateJObject(ManagedRegister out_reg,
- FrameOffset spilled_reference_offset,
- ManagedRegister in_reg,
- bool null_allowed) override;
-
- // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
- // or to be null if the value is null and `null_allowed`.
- void CreateJObject(FrameOffset out_off,
- FrameOffset spilled_reference_offset,
- bool null_allowed) override;
+ // Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
+ void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
+ JNIMacroLabel* slow_path,
+ JNIMacroLabel* resume) override;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
@@ -197,6 +140,8 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
void TestGcMarking(JNIMacroLabel* label, JNIMacroUnaryCondition cond) override;
// Emit a conditional jump to the label by applying a unary condition test to object's mark bit.
void TestMarkBit(ManagedRegister ref, JNIMacroLabel* label, JNIMacroUnaryCondition cond) override;
+ // Emit a conditional jump to label if the loaded value from specified locations is not zero.
+ void TestByteAndJumpIfNotZero(uintptr_t address, JNIMacroLabel* label) override;
// Code at this offset will serve as the target for the Jump call.
void Bind(JNIMacroLabel* label) override;
@@ -220,6 +165,24 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
+
+ void Copy(FrameOffset dest, FrameOffset src, size_t size);
+
+ // Set up `out_reg` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`. `in_reg` holds a possibly
+ // stale reference that can be used to avoid loading the spilled value to
+ // see if the value is null.
+ void CreateJObject(ManagedRegister out_reg,
+ FrameOffset spilled_reference_offset,
+ ManagedRegister in_reg,
+ bool null_allowed);
+
+ // Set up `out_off` to hold a `jobject` (`StackReference<Object>*` to a spilled value),
+ // or to be null if the value is null and `null_allowed`.
+ void CreateJObject(FrameOffset out_off,
+ FrameOffset spilled_reference_offset,
+ bool null_allowed);
+
void AddConstant(XRegister rd,
int32_t value,
vixl::aarch64::Condition cond = vixl::aarch64::al);
diff --git a/compiler/utils/arm64/managed_register_arm64.cc b/compiler/utils/arm64/managed_register_arm64.cc
index 5632265646..74a35452db 100644
--- a/compiler/utils/arm64/managed_register_arm64.cc
+++ b/compiler/utils/arm64/managed_register_arm64.cc
@@ -17,7 +17,7 @@
#include "managed_register_arm64.h"
#include "base/globals.h"
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
// TODO: Define convention
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index 8a06f631a1..7e8c976b23 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -20,9 +20,10 @@
#include <android-base/logging.h>
#include "arch/arm64/registers_arm64.h"
+#include "base/macros.h"
#include "utils/managed_register.h"
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
const int kNumberOfXRegIds = kNumberOfXRegisters;
diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc
index d151ac99e7..f250360639 100644
--- a/compiler/utils/arm64/managed_register_arm64_test.cc
+++ b/compiler/utils/arm64/managed_register_arm64_test.cc
@@ -18,9 +18,10 @@
#include "assembler_arm64.h"
#include "base/globals.h"
+#include "base/macros.h"
#include "gtest/gtest.h"
-namespace art {
+namespace art HIDDEN {
namespace arm64 {
TEST(Arm64ManagedRegister, NoRegister) {