summaryrefslogtreecommitdiff
path: root/compiler/utils
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/utils')
-rw-r--r--compiler/utils/arm/assembler_arm.cc502
-rw-r--r--compiler/utils/arm/assembler_arm.h130
-rw-r--r--compiler/utils/arm/assembler_arm32.cc6
-rw-r--r--compiler/utils/arm/assembler_arm32.h2
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc37
-rw-r--r--compiler/utils/arm/assembler_thumb2.h2
-rw-r--r--compiler/utils/arm/assembler_thumb2_test.cc142
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm.cc612
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm.h169
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc686
-rw-r--r--compiler/utils/arm64/assembler_arm64.h172
-rw-r--r--compiler/utils/arm64/constants_arm64.h37
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc754
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h230
-rw-r--r--compiler/utils/arm64/managed_register_arm64.h2
-rw-r--r--compiler/utils/assembler.cc133
-rw-r--r--compiler/utils/assembler.h153
-rw-r--r--compiler/utils/jni_macro_assembler.cc107
-rw-r--r--compiler/utils/jni_macro_assembler.h235
-rw-r--r--compiler/utils/jni_macro_assembler_test.h151
-rw-r--r--compiler/utils/mips/assembler_mips.cc36
-rw-r--r--compiler/utils/mips/assembler_mips.h35
-rw-r--r--compiler/utils/mips64/assembler_mips64.cc28
-rw-r--r--compiler/utils/mips64/assembler_mips64.h33
-rw-r--r--compiler/utils/x86/assembler_x86.cc483
-rw-r--r--compiler/utils/x86/assembler_x86.h128
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc541
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.h162
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.cc541
-rw-r--r--compiler/utils/x86_64/assembler_x86_64.h120
-rw-r--r--compiler/utils/x86_64/assembler_x86_64_test.cc123
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc603
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.h190
33 files changed, 4078 insertions, 3207 deletions
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 1796b3940c..d5cd59d481 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -376,508 +376,6 @@ void ArmAssembler::Pad(uint32_t bytes) {
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::ArmCore(static_cast<int>(reg));
-}
-
-static dwarf::Reg DWARFReg(SRegister reg) {
- return dwarf::Reg::ArmFp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
-
-void ArmAssembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- CHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet
- CHECK_ALIGNED(frame_size, kStackAlignment);
- CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
-
- // Push callee saves and link register.
- RegList core_spill_mask = 1 << LR;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
- PushList(core_spill_mask);
- cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
- if (fp_spill_mask != 0) {
- vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
- cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
- }
-
- // Increase frame to required size.
- int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
- IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
-
- // Write out Method*.
- StoreToOffset(kStoreWord, R0, SP, 0);
-
- // Write out entry spills.
- int32_t offset = frame_size + kFramePointerSize;
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ArmManagedRegister reg = entry_spills.at(i).AsArm();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsCoreRegister()) {
- StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- }
- }
-}
-
-void ArmAssembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
-
- // Compute callee saves to pop and PC.
- RegList core_spill_mask = 1 << PC;
- uint32_t fp_spill_mask = 0;
- for (const ManagedRegister& reg : callee_save_regs) {
- if (reg.AsArm().IsCoreRegister()) {
- core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
- } else {
- fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
- }
- }
-
- // Decrease frame to start of callee saves.
- int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
- CHECK_GT(frame_size, pop_values * kFramePointerSize);
- DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
-
- if (fp_spill_mask != 0) {
- vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
- cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
- cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
- }
-
- // Pop callee saves and PC.
- PopList(core_spill_mask);
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void ArmAssembler::IncreaseFrameSize(size_t adjust) {
- AddConstant(SP, -adjust);
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void ArmAssembler::DecreaseFrameSize(size_t adjust) {
- AddConstant(SP, adjust);
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
- ArmManagedRegister src = msrc.AsArm();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCoreRegister()) {
- CHECK_EQ(4u, size);
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
- StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
- SP, dest.Int32Value() + 4);
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
- }
-}
-
-void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- ArmManagedRegister src = msrc.AsArm();
- CHECK(src.IsCoreRegister()) << src;
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
- FrameOffset in_off, ManagedRegister mscratch) {
- ArmManagedRegister src = msrc.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
-}
-
-void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dst.AsCoreRegister());
- }
-}
-
-void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
-}
-
-void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- ArmManagedRegister dst = mdest.AsArm();
- CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
- base.AsArm().AsCoreRegister(), offs.Int32Value());
-}
-
-void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void ArmAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadImmediate(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
-}
-
-static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
- Register src_register, int32_t src_offset, size_t size) {
- ArmManagedRegister dst = m_dst.AsArm();
- if (dst.IsNoRegister()) {
- CHECK_EQ(0u, size) << dst;
- } else if (dst.IsCoreRegister()) {
- CHECK_EQ(4u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
- } else if (dst.IsRegisterPair()) {
- CHECK_EQ(8u, size) << dst;
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
- assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
- } else if (dst.IsSRegister()) {
- assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
- } else {
- CHECK(dst.IsDRegister()) << dst;
- assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
- }
-}
-
-void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
- return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
-}
-
-void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset32 offs) {
- ArmManagedRegister dst = m_dst.AsArm();
- CHECK(dst.IsCoreRegister()) << dst;
- LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
-}
-
-void ArmAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, fr_offs.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
- TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
- StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
-}
-
-void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
-}
-
-void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
-}
-
-void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
- ArmManagedRegister dst = m_dst.AsArm();
- ArmManagedRegister src = m_src.AsArm();
- if (!dst.Equals(src)) {
- if (dst.IsCoreRegister()) {
- CHECK(src.IsCoreRegister()) << src;
- mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
- } else if (dst.IsDRegister()) {
- CHECK(src.IsDRegister()) << src;
- vmovd(dst.AsDRegister(), src.AsDRegister());
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- vmovs(dst.AsSRegister(), src.AsSRegister());
- } else {
- CHECK(dst.IsRegisterPair()) << dst;
- CHECK(src.IsRegisterPair()) << src;
- // Ensure that the first move doesn't clobber the input of the second.
- if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- } else {
- mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
- mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
- }
- }
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
- }
-}
-
-void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
-}
-
-void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsArm().AsCoreRegister();
- CHECK_EQ(size, 4u);
- LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CHECK_EQ(size, 4u);
- Register scratch = mscratch.AsArm().AsCoreRegister();
- LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
- StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- SP, handle_scope_offset.Int32Value());
- in_reg = out_reg;
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- if (!out_reg.Equals(in_reg)) {
- it(EQ, kItElse);
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
- } else {
- it(NE);
- }
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
-}
-
-void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- if (null_allowed) {
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
- } else {
- AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
- }
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
-}
-
-void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- ArmManagedRegister out_reg = mout_reg.AsArm();
- ArmManagedRegister in_reg = min_reg.AsArm();
- CHECK(out_reg.IsCoreRegister()) << out_reg;
- CHECK(in_reg.IsCoreRegister()) << in_reg;
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
- }
- cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
- it(NE);
- LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
- in_reg.AsCoreRegister(), 0, NE);
-}
-
-void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister base = mbase.AsArm();
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(base.IsCoreRegister()) << base;
- CHECK(scratch.IsCoreRegister()) << scratch;
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- base.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call.
-}
-
-void ArmAssembler::Call(FrameOffset base, Offset offset,
- ManagedRegister mscratch) {
- ArmManagedRegister scratch = mscratch.AsArm();
- CHECK(scratch.IsCoreRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- SP, base.Int32Value());
- LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
- scratch.AsCoreRegister(), offset.Int32Value());
- blx(scratch.AsCoreRegister());
- // TODO: place reference map on call
-}
-
-void ArmAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
- mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
-}
-
-void ArmAssembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister /*scratch*/) {
- StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
-}
-
-void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
- ArmManagedRegister scratch = mscratch.AsArm();
- ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- LoadFromOffset(kLoadWord,
- scratch.AsCoreRegister(),
- TR,
- Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
- cmp(scratch.AsCoreRegister(), ShifterOperand(0));
- b(slow->Entry(), NE);
-}
-
-void ArmExceptionSlowPath::Emit(Assembler* sasm) {
- ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving R0 as this call won't return.
- __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
- // Set up call to Thread::Current()->pDeliverException.
- __ LoadFromOffset(kLoadWord,
- R12,
- TR,
- QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
- __ blx(R12);
-#undef __
-}
-
-
static int LeadingZeros(uint32_t val) {
uint32_t alt;
int32_t n;
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 2b7414d892..ff0bbafb9a 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -23,12 +23,14 @@
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/bit_utils.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "offsets.h"
namespace art {
@@ -880,122 +882,6 @@ class ArmAssembler : public Assembler {
virtual void CompareAndBranchIfZero(Register r, Label* label) = 0;
virtual void CompareAndBranchIfNonZero(Register r, Label* label) = 0;
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
static uint32_t ModifiedImmediate(uint32_t value);
static bool IsLowRegister(Register r) {
@@ -1073,18 +959,6 @@ class ArmAssembler : public Assembler {
ArenaVector<Label*> tracked_labels_;
};
-// Slowpath entered when Thread::Current()->_exception is non-null
-class ArmExceptionSlowPath FINAL : public SlowPath {
- public:
- ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
- void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const ArmManagedRegister scratch_;
- const size_t stack_adjust_;
-};
-
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index c95dfa8066..6f9d5f32af 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1664,12 +1664,6 @@ void Arm32Assembler::StoreDToOffset(DRegister reg,
}
-void Arm32Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Arm32Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf57ff05f; // dmb
Emit(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 554dd2350b..044eaa1edf 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -316,8 +316,6 @@ class Arm32Assembler FINAL : public ArmAssembler {
void Emit(int32_t value);
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 353c729249..ee69698ce8 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -2325,7 +2325,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
}
Register rn = ad.GetRegister();
- if (IsHighRegister(rn) && rn != SP && rn != PC) {
+ if (IsHighRegister(rn) && (byte || half || (rn != SP && rn != PC))) {
must_be_32bit = true;
}
@@ -2337,24 +2337,24 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
// Immediate offset
int32_t offset = ad.GetOffset();
- // The 16 bit SP relative instruction can only have a 10 bit offset.
- if (rn == SP && offset >= (1 << 10)) {
- must_be_32bit = true;
- }
-
if (byte) {
// 5 bit offset, no shift.
- if (offset >= (1 << 5)) {
+ if ((offset & ~0x1f) != 0) {
must_be_32bit = true;
}
} else if (half) {
- // 6 bit offset, shifted by 1.
- if (offset >= (1 << 6)) {
+ // 5 bit offset, shifted by 1.
+ if ((offset & ~(0x1f << 1)) != 0) {
+ must_be_32bit = true;
+ }
+ } else if (rn == SP || rn == PC) {
+ // The 16 bit SP/PC relative instruction can only have an (imm8 << 2) offset.
+ if ((offset & ~(0xff << 2)) != 0) {
must_be_32bit = true;
}
} else {
- // 7 bit offset, shifted by 2.
- if (offset >= (1 << 7)) {
+ // 5 bit offset, shifted by 2.
+ if ((offset & ~(0x1f << 2)) != 0) {
must_be_32bit = true;
}
}
@@ -2370,7 +2370,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
} else {
// 16 bit thumb1.
uint8_t opA = 0;
- bool sp_relative = false;
+ bool sp_or_pc_relative = false;
if (byte) {
opA = 7U /* 0b0111 */;
@@ -2379,7 +2379,10 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
} else {
if (rn == SP) {
opA = 9U /* 0b1001 */;
- sp_relative = true;
+ sp_or_pc_relative = true;
+ } else if (rn == PC) {
+ opA = 4U;
+ sp_or_pc_relative = true;
} else {
opA = 6U /* 0b0110 */;
}
@@ -2388,7 +2391,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
(load ? B11 : 0);
CHECK_GE(offset, 0);
- if (sp_relative) {
+ if (sp_or_pc_relative) {
// SP relative, 10 bit offset.
CHECK_LT(offset, (1 << 10));
CHECK_ALIGNED(offset, 4);
@@ -3860,12 +3863,6 @@ void Thumb2Assembler::StoreDToOffset(DRegister reg,
}
-void Thumb2Assembler::MemoryBarrier(ManagedRegister mscratch) {
- CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
- dmb(SY);
-}
-
-
void Thumb2Assembler::dmb(DmbOptions flavor) {
int32_t encoding = 0xf3bf8f50; // dmb in T1 encoding.
Emit32(encoding | flavor);
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index 4ee23c0e27..1c1c98b52b 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -368,8 +368,6 @@ class Thumb2Assembler FINAL : public ArmAssembler {
void Emit16(int16_t value); // Emit a 16 bit instruction in little endian format.
void Bind(Label* label) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
// Force the assembler to generate 32 bit instructions.
void Force32Bit() {
force_32bit_ = true;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index abb09f726f..3ca37145d5 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -279,6 +279,148 @@ TEST_F(AssemblerThumb2Test, smull) {
DriverStr(expected, "smull");
}
+TEST_F(AssemblerThumb2Test, LoadByteFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedByte;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 31);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 32);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4095);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "ldrb r0, [r7, #0]\n"
+ "ldrb r1, [r7, #31]\n"
+ "ldrb.w r2, [r7, #32]\n"
+ "ldrb.w r3, [r7, #4095]\n"
+ "ldrb.w r4, [sp, #0]\n";
+ DriverStr(expected, "LoadByteFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreByteToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreByte;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 31);
+ __ StoreToOffset(type, arm::R2, arm::R7, 32);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4095);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+
+ const char* expected =
+ "strb r0, [r7, #0]\n"
+ "strb r1, [r7, #31]\n"
+ "strb.w r2, [r7, #32]\n"
+ "strb.w r3, [r7, #4095]\n"
+ "strb.w r4, [sp, #0]\n";
+ DriverStr(expected, "StoreByteToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadHalfFromThumbOffset) {
+ arm::LoadOperandType type = arm::kLoadUnsignedHalfword;
+
+ __ LoadFromOffset(type, arm::R0, arm::R7, 0);
+ __ LoadFromOffset(type, arm::R1, arm::R7, 62);
+ __ LoadFromOffset(type, arm::R2, arm::R7, 64);
+ __ LoadFromOffset(type, arm::R3, arm::R7, 4094);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "ldrh r0, [r7, #0]\n"
+ "ldrh r1, [r7, #62]\n"
+ "ldrh.w r2, [r7, #64]\n"
+ "ldrh.w r3, [r7, #4094]\n"
+ "ldrh.w r4, [sp, #0]\n"
+ "ldrh.w r5, [r7, #1]\n";
+ DriverStr(expected, "LoadHalfFromThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreHalfToThumbOffset) {
+ arm::StoreOperandType type = arm::kStoreHalfword;
+
+ __ StoreToOffset(type, arm::R0, arm::R7, 0);
+ __ StoreToOffset(type, arm::R1, arm::R7, 62);
+ __ StoreToOffset(type, arm::R2, arm::R7, 64);
+ __ StoreToOffset(type, arm::R3, arm::R7, 4094);
+ __ StoreToOffset(type, arm::R4, arm::SP, 0);
+ __ StoreToOffset(type, arm::R5, arm::R7, 1); // Unaligned
+
+ const char* expected =
+ "strh r0, [r7, #0]\n"
+ "strh r1, [r7, #62]\n"
+ "strh.w r2, [r7, #64]\n"
+ "strh.w r3, [r7, #4094]\n"
+ "strh.w r4, [sp, #0]\n"
+ "strh.w r5, [r7, #1]\n";
+ DriverStr(expected, "StoreHalfToThumbOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromSpPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::SP, 0);
+ __ LoadFromOffset(type, arm::R1, arm::SP, 124);
+ __ LoadFromOffset(type, arm::R2, arm::SP, 128);
+ __ LoadFromOffset(type, arm::R3, arm::SP, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::SP, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::SP, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [sp, #0]\n"
+ "ldr r1, [sp, #124]\n"
+ "ldr r2, [sp, #128]\n"
+ "ldr r3, [sp, #1020]\n"
+ "ldr.w r4, [sp, #1024]\n"
+ "ldr.w r5, [sp, #4092]\n"
+ "ldr.w r6, [sp, #1]\n";
+ DriverStr(expected, "LoadWordFromSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, StoreWordToSpPlusOffset) {
+ arm::StoreOperandType type = arm::kStoreWord;
+
+ __ StoreToOffset(type, arm::R0, arm::SP, 0);
+ __ StoreToOffset(type, arm::R1, arm::SP, 124);
+ __ StoreToOffset(type, arm::R2, arm::SP, 128);
+ __ StoreToOffset(type, arm::R3, arm::SP, 1020);
+ __ StoreToOffset(type, arm::R4, arm::SP, 1024);
+ __ StoreToOffset(type, arm::R5, arm::SP, 4092);
+ __ StoreToOffset(type, arm::R6, arm::SP, 1); // Unaligned
+
+ const char* expected =
+ "str r0, [sp, #0]\n"
+ "str r1, [sp, #124]\n"
+ "str r2, [sp, #128]\n"
+ "str r3, [sp, #1020]\n"
+ "str.w r4, [sp, #1024]\n"
+ "str.w r5, [sp, #4092]\n"
+ "str.w r6, [sp, #1]\n";
+ DriverStr(expected, "StoreWordToSpPlusOffset");
+}
+
+TEST_F(AssemblerThumb2Test, LoadWordFromPcPlusOffset) {
+ arm::LoadOperandType type = arm::kLoadWord;
+
+ __ LoadFromOffset(type, arm::R0, arm::PC, 0);
+ __ LoadFromOffset(type, arm::R1, arm::PC, 124);
+ __ LoadFromOffset(type, arm::R2, arm::PC, 128);
+ __ LoadFromOffset(type, arm::R3, arm::PC, 1020);
+ __ LoadFromOffset(type, arm::R4, arm::PC, 1024);
+ __ LoadFromOffset(type, arm::R5, arm::PC, 4092);
+ __ LoadFromOffset(type, arm::R6, arm::PC, 1); // Unaligned
+
+ const char* expected =
+ "ldr r0, [pc, #0]\n"
+ "ldr r1, [pc, #124]\n"
+ "ldr r2, [pc, #128]\n"
+ "ldr r3, [pc, #1020]\n"
+ "ldr.w r4, [pc, #1024]\n"
+ "ldr.w r5, [pc, #4092]\n"
+ "ldr.w r6, [pc, #1]\n";
+ DriverStr(expected, "LoadWordFromPcPlusOffset");
+}
+
TEST_F(AssemblerThumb2Test, StoreWordToThumbOffset) {
arm::StoreOperandType type = arm::kStoreWord;
int32_t offset = 4092;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.cc b/compiler/utils/arm/jni_macro_assembler_arm.cc
new file mode 100644
index 0000000000..c03981653e
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.cc
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm.h"
+
+#include <algorithm>
+
+#include "assembler_arm32.h"
+#include "assembler_thumb2.h"
+#include "base/arena_allocator.h"
+#include "base/bit_utils.h"
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+
+namespace art {
+namespace arm {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath FINAL : public SlowPath {
+ public:
+ ArmExceptionSlowPath(ArmManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {
+ }
+ void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const ArmManagedRegister scratch_;
+ const size_t stack_adjust_;
+};
+
+ArmJNIMacroAssembler::ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa) {
+ switch (isa) {
+ case kArm:
+ asm_.reset(new (arena) Arm32Assembler(arena));
+ break;
+
+ case kThumb2:
+ asm_.reset(new (arena) Thumb2Assembler(arena));
+ break;
+
+ default:
+ LOG(FATAL) << isa;
+ UNREACHABLE();
+ }
+}
+
+ArmJNIMacroAssembler::~ArmJNIMacroAssembler() {
+}
+
+size_t ArmJNIMacroAssembler::CodeSize() const {
+ return asm_->CodeSize();
+}
+
+DebugFrameOpCodeWriterForAssembler& ArmJNIMacroAssembler::cfi() {
+ return asm_->cfi();
+}
+
+void ArmJNIMacroAssembler::FinalizeCode() {
+ asm_->FinalizeCode();
+}
+
+void ArmJNIMacroAssembler::FinalizeInstructions(const MemoryRegion& region) {
+ asm_->FinalizeInstructions(region);
+}
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg));
+}
+
+#define __ asm_->
+
+void ArmJNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_EQ(CodeSize(), 0U); // Nothing emitted yet
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+ // Push callee saves and link register.
+ RegList core_spill_mask = 1 << LR;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+ __ PushList(core_spill_mask);
+ cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
+ if (fp_spill_mask != 0) {
+ __ vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
+ cfi().RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
+ }
+
+ // Increase frame to required size.
+ int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pushed_values * kFramePointerSize); // Must at least have space for Method*.
+ IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize); // handles CFI as well.
+
+ // Write out Method*.
+ __ StoreToOffset(kStoreWord, R0, SP, 0);
+
+ // Write out entry spills.
+ int32_t offset = frame_size + kFramePointerSize;
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ArmManagedRegister reg = entry_spills.at(i).AsArm();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsCoreRegister()) {
+ __ StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsSRegister()) {
+ __ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ __ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+
+ // Compute callee saves to pop and PC.
+ RegList core_spill_mask = 1 << PC;
+ uint32_t fp_spill_mask = 0;
+ for (const ManagedRegister& reg : callee_save_regs) {
+ if (reg.AsArm().IsCoreRegister()) {
+ core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+ } else {
+ fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+ }
+ }
+
+ // Decrease frame to start of callee saves.
+ int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+ CHECK_GT(frame_size, pop_values * kFramePointerSize);
+ DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize)); // handles CFI as well.
+
+ if (fp_spill_mask != 0) {
+ __ vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
+ cfi().AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
+ cfi().RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
+ }
+
+ // Pop callee saves and PC.
+ __ PopList(core_spill_mask);
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void ArmJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ __ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(ArmAssembler* assembler, size_t adjust) {
+ assembler->AddConstant(SP, adjust);
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void ArmJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(asm_.get(), adjust);
+}
+
+void ArmJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+ ArmManagedRegister src = msrc.AsArm();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+ __ StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), SP, dest.Int32Value() + 4);
+ } else if (src.IsSRegister()) {
+ __ StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ __ StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+ }
+}
+
+void ArmJNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ ArmManagedRegister src = msrc.AsArm();
+ CHECK(src.IsCoreRegister()) << src;
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreSpanning(FrameOffset dest,
+ ManagedRegister msrc,
+ FrameOffset in_off,
+ ManagedRegister mscratch) {
+ ArmManagedRegister src = msrc.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + sizeof(uint32_t));
+}
+
+void ArmJNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dst.AsCoreRegister());
+ }
+}
+
+void ArmJNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ ArmManagedRegister dst = mdest.AsArm();
+ CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord,
+ dst.AsCoreRegister(),
+ base.AsArm().AsCoreRegister(),
+ offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadImmediate(scratch.AsCoreRegister(), imm);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+static void EmitLoad(ArmAssembler* assembler,
+ ManagedRegister m_dst,
+ Register src_register,
+ int32_t src_offset,
+ size_t size) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ if (dst.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dst;
+ } else if (dst.IsCoreRegister()) {
+ CHECK_EQ(4u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
+ } else if (dst.IsRegisterPair()) {
+ CHECK_EQ(8u, size) << dst;
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
+ assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
+ } else if (dst.IsSRegister()) {
+ assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
+ }
+}
+
+void ArmJNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, SP, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset32 src, size_t size) {
+ EmitLoad(asm_.get(), m_dst, TR, src.Int32Value(), size);
+}
+
+void ArmJNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset32 offs) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ CHECK(dst.IsCoreRegister()) << dst;
+ __ LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmJNIMacroAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
+}
+
+void ArmJNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
+ ArmManagedRegister dst = m_dst.AsArm();
+ ArmManagedRegister src = m_src.AsArm();
+ if (!dst.Equals(src)) {
+ if (dst.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister()) << src;
+ __ mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else if (dst.IsDRegister()) {
+ CHECK(src.IsDRegister()) << src;
+ __ vmovd(dst.AsDRegister(), src.AsDRegister());
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ __ vmovs(dst.AsSRegister(), src.AsSRegister());
+ } else {
+ CHECK(dst.IsRegisterPair()) << dst;
+ CHECK(src.IsRegisterPair()) << src;
+ // Ensure that the first move doesn't clobber the input of the second.
+ if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ } else {
+ __ mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+ __ mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+ }
+ }
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+ }
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ CHECK_EQ(size, 4u);
+ __ LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+ __ StoreToOffset(kStoreWord,
+ scratch,
+ dest_base.AsArm().AsCoreRegister(),
+ dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ Register scratch = mscratch.AsArm().AsCoreRegister();
+ __ LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+ __ StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmJNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ in_reg = out_reg;
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ __ it(EQ, kItElse);
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ } else {
+ __ it(NE);
+ }
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+}
+
+void ArmJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ if (null_allowed) {
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+ } else {
+ __ AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+ }
+ __ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmJNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ ArmManagedRegister out_reg = mout_reg.AsArm();
+ ArmManagedRegister in_reg = min_reg.AsArm();
+ CHECK(out_reg.IsCoreRegister()) << out_reg;
+ CHECK(in_reg.IsCoreRegister()) << in_reg;
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); // TODO: why EQ?
+ }
+ __ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ __ it(NE);
+ __ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmJNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void ArmJNIMacroAssembler::Call(ManagedRegister mbase, Offset offset,
+ ManagedRegister mscratch) {
+ ArmManagedRegister base = mbase.AsArm();
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ base.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call.
+}
+
+void ArmJNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ __ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ scratch.AsCoreRegister(),
+ offset.Int32Value());
+ __ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+void ArmJNIMacroAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmJNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /*scratch*/) {
+ __ StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmJNIMacroAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
+ ArmManagedRegister scratch = mscratch.AsArm();
+ ArmExceptionSlowPath* slow = new (__ GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ LoadFromOffset(kLoadWord,
+ scratch.AsCoreRegister(),
+ TR,
+ Thread::ExceptionOffset<kArmPointerSize>().Int32Value());
+ __ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ __ b(slow->Entry(), NE);
+}
+
+#undef __
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+ ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving R0 as this call won't return.
+ __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+ // Set up call to Thread::Current()->pDeliverException.
+ __ LoadFromOffset(kLoadWord,
+ R12,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kArmPointerSize, pDeliverException).Int32Value());
+ __ blx(R12);
+#undef __
+}
+
+void ArmJNIMacroAssembler::MemoryBarrier(ManagedRegister mscratch) {
+ CHECK_EQ(mscratch.AsArm().AsCoreRegister(), R12);
+ asm_->dmb(SY);
+}
+
+} // namespace arm
+} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm.h b/compiler/utils/arm/jni_macro_assembler_arm.h
new file mode 100644
index 0000000000..4471906c27
--- /dev/null
+++ b/compiler/utils/arm/jni_macro_assembler_arm.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
+
+#include <memory>
+#include <type_traits>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+namespace art {
+namespace arm {
+
+class ArmAssembler;
+
+class ArmJNIMacroAssembler : public JNIMacroAssembler<PointerSize::k32> {
+ public:
+ ArmJNIMacroAssembler(ArenaAllocator* arena, InstructionSet isa);
+ virtual ~ArmJNIMacroAssembler();
+
+ size_t CodeSize() const OVERRIDE;
+ DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE;
+ void FinalizeCode() OVERRIDE;
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ private:
+ std::unique_ptr<ArmAssembler> asm_;
+};
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM_JNI_MACRO_ASSEMBLER_ARM_H_
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index d82caf57e3..22221e752a 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -28,620 +28,49 @@ namespace arm64 {
#ifdef ___
#error "ARM64 Assembler macro already defined."
#else
-#define ___ vixl_masm_->
+#define ___ vixl_masm_.
#endif
void Arm64Assembler::FinalizeCode() {
- for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
- EmitExceptionPoll(exception.get());
- }
___ FinalizeCode();
}
size_t Arm64Assembler::CodeSize() const {
- return vixl_masm_->GetBufferCapacity() - vixl_masm_->GetRemainingBufferSpace();
+ return vixl_masm_.GetBufferCapacity() - vixl_masm_.GetRemainingBufferSpace();
}
const uint8_t* Arm64Assembler::CodeBufferBaseAddress() const {
- return vixl_masm_->GetStartAddress<uint8_t*>();
+ return vixl_masm_.GetStartAddress<uint8_t*>();
}
void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
// Copy the instructions from the buffer.
- MemoryRegion from(vixl_masm_->GetStartAddress<void*>(), CodeSize());
+ MemoryRegion from(vixl_masm_.GetStartAddress<void*>(), CodeSize());
region.CopyFrom(0, from);
}
-void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
- ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
-}
-
-void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
- StoreToOffset(TR, SP, offset.Int32Value());
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, -adjust);
- cfi().AdjustCFAOffset(adjust);
-}
-
-// See Arm64 PCS Section 5.2.2.1.
-void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- AddConstant(SP, adjust);
- cfi().AdjustCFAOffset(-adjust);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
- AddConstant(rd, rd, value, cond);
-}
-
-void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- // VIXL macro-assembler handles all variants.
- ___ Add(reg_x(rd), reg_x(rn), value);
- } else {
- // temp = rd + value
- // rd = cond ? temp : rn
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(rd), reg_x(rn));
- Register temp = temps.AcquireX();
- ___ Add(temp, reg_x(rn), value);
- ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
- }
-}
-
-void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset) {
- switch (type) {
- case kStoreByte:
- ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreHalfword:
- ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- case kStoreWord:
- ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
- CHECK_NE(source, SP);
- ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
- ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister src = m_src.AsArm64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsWRegister()) {
- CHECK_EQ(4u, size);
- StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
- } else if (src.IsXRegister()) {
- CHECK_EQ(8u, size);
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
- } else if (src.IsSRegister()) {
- StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
- } else {
- CHECK(src.IsDRegister()) << src;
- StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
- }
-}
-
-void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
- Arm64ManagedRegister src = m_src.AsArm64();
- CHECK(src.IsXRegister()) << src;
- StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
- offs.Int32Value());
-}
-
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
- uint32_t imm,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
- UseScratchRegisterScope temps(vixl_masm_);
- Register temp = temps.AcquireX();
- ___ Mov(temp, reg_x(SP));
- ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
-}
-
-void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
- FrameOffset in_off, ManagedRegister m_scratch) {
- Arm64ManagedRegister source = m_source.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
-}
-
-// Load routines.
-void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value,
- Condition cond) {
- if ((cond == al) || (cond == nv)) {
- ___ Mov(reg_x(dest), value);
- } else {
- // temp = value
- // rd = cond ? temp : rd
- if (value != 0) {
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(dest));
- Register temp = temps.AcquireX();
- ___ Mov(temp, value);
- ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
- } else {
- ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
- }
- }
-}
-
-void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
- XRegister base, int32_t offset) {
- switch (type) {
- case kLoadSignedByte:
- ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadSignedHalfword:
- ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedByte:
- ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadUnsignedHalfword:
- ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- case kLoadWord:
- ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
-}
-
-// Note: We can extend this member by adding load type info - see
-// sign extended A64 load variants.
-void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base,
- int32_t offset) {
- CHECK_NE(dest, SP);
- ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base,
- int32_t offset) {
- ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
-}
-
-void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base,
- int32_t offset, size_t size) {
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size) << dest;
- } else if (dest.IsWRegister()) {
- CHECK_EQ(4u, size) << dest;
- ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
- } else if (dest.IsXRegister()) {
- CHECK_NE(dest.AsXRegister(), SP) << dest;
- if (size == 4u) {
- ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK_EQ(8u, size) << dest;
- ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
- }
- } else if (dest.IsSRegister()) {
- ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
- } else {
- CHECK(dest.IsDRegister()) << dest;
- ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
- }
-}
-
-void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
- return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
- return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
-}
-
-void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs,
- bool unpoison_reference) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister base = m_base.AsArm64();
- CHECK(dst.IsXRegister() && base.IsXRegister());
- LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
- offs.Int32Value());
- if (unpoison_reference) {
- WRegister ref_reg = dst.AsOverlappingWRegister();
- MaybeUnpoisonHeapReference(reg_w(ref_reg));
- }
-}
-
void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
Arm64ManagedRegister base = m_base.AsArm64();
CHECK(dst.IsXRegister() && base.IsXRegister());
// Remove dst and base form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- CHECK(dst.IsXRegister()) << dst;
- LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
-}
-
-// Copying routines.
-void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
- Arm64ManagedRegister dst = m_dst.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- if (!dst.Equals(src)) {
- if (dst.IsXRegister()) {
- if (size == 4) {
- CHECK(src.IsWRegister());
- ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
- } else {
- if (src.IsXRegister()) {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
- } else {
- ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
- }
- }
- } else if (dst.IsWRegister()) {
- CHECK(src.IsWRegister()) << src;
- ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
- } else if (dst.IsSRegister()) {
- CHECK(src.IsSRegister()) << src;
- ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
- } else {
- CHECK(dst.IsDRegister()) << dst;
- CHECK(src.IsDRegister()) << src;
- ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
- }
- }
-}
-
-void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 tr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
-}
-
-void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
- SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
- SP, dest.Int32Value());
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = src_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister base = m_dest_base.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
- dest_offs.Int32Value());
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
- StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*mscratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
- ManagedRegister m_src, Offset src_offset,
- ManagedRegister m_scratch, size_t size) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- Arm64ManagedRegister src = m_src.AsArm64();
- Arm64ManagedRegister dest = m_dest.AsArm64();
- CHECK(dest.IsXRegister()) << dest;
- CHECK(src.IsXRegister()) << src;
- CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
- CHECK(size == 4 || size == 8) << size;
- if (size == 4) {
- if (scratch.IsWRegister()) {
- LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- } else {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
- src_offset.Int32Value());
- StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
- dest_offset.Int32Value());
- }
- } else if (size == 8) {
- LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
- StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
- } else {
- UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
- }
-}
-
-void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
- FrameOffset /*src*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
-}
-
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
- // TODO: Should we check that m_scratch is IP? - see arm.
- ___ Dmb(InnerShareable, BarrierAll);
-}
-
-void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- Arm64ManagedRegister reg = mreg.AsArm64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsWRegister()) << reg;
- if (size == 1) {
- ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- } else {
- ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
- }
-}
-
-void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references.
-}
-
-void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister base = m_base.AsArm64();
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(base.IsXRegister()) << base;
- CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
Arm64ManagedRegister base = m_base.AsArm64();
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(base.IsXRegister()) << base;
CHECK(scratch.IsXRegister()) << scratch;
// Remove base and scratch form the temp list - higher level API uses IP1, IP0.
- UseScratchRegisterScope temps(vixl_masm_);
+ UseScratchRegisterScope temps(&vixl_masm_);
temps.Exclude(reg_x(base.AsXRegister()), reg_x(scratch.AsXRegister()));
___ Ldr(reg_x(scratch.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
___ Br(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- // Call *(*(SP + base) + offset)
- LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
- LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
- ___ Blr(reg_x(scratch.AsXRegister()));
-}
-
-void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(
- ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg,
- bool null_allowed) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- // For now we only hold stale handle scope entries in x registers.
- CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
- CHECK(out_reg.IsXRegister()) << out_reg;
- if (null_allowed) {
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
- if (in_reg.IsNoRegister()) {
- LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
- handle_scope_offs.Int32Value());
- in_reg = out_reg;
- }
- ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
- if (!out_reg.Equals(in_reg)) {
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
- } else {
- AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
- }
-}
-
-void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
- ManagedRegister m_scratch, bool null_allowed) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
- if (null_allowed) {
- LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
- handle_scope_offset.Int32Value());
- // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
- // the address in the handle scope holding the reference.
- // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
- ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
- // Move this logic in add constants with flags.
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
- } else {
- AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
- }
- StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
-}
-
-void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
- ManagedRegister m_in_reg) {
- Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
- Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
- CHECK(out_reg.IsXRegister()) << out_reg;
- CHECK(in_reg.IsXRegister()) << in_reg;
- vixl::aarch64::Label exit;
- if (!out_reg.Equals(in_reg)) {
- // FIXME: Who sets the flags here?
- LoadImmediate(out_reg.AsXRegister(), 0, eq);
- }
- ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
- LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
- ___ Bind(&exit);
-}
-
-void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
- CHECK_ALIGNED(stack_adjust, kStackAlignment);
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
- LoadFromOffset(scratch.AsXRegister(),
- TR,
- Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
- ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
-}
-
-void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
- UseScratchRegisterScope temps(vixl_masm_);
- temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
- Register temp = temps.AcquireX();
-
- // Bind exception poll entry.
- ___ Bind(exception->Entry());
- if (exception->stack_adjust_ != 0) { // Fix up the frame.
- DecreaseFrameSize(exception->stack_adjust_);
- }
- // Pass exception object as argument.
- // Don't care about preserving X0 as this won't return.
- ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp,
- MEM_OP(reg_x(TR),
- QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
-
- ___ Blr(temp);
- // Call should never return.
- ___ Brk();
-}
-
static inline dwarf::Reg DWARFReg(CPURegister reg) {
if (reg.IsFPRegister()) {
return dwarf::Reg::Arm64Fp(reg.GetCode());
@@ -653,7 +82,7 @@ static inline dwarf::Reg DWARFReg(CPURegister reg) {
void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Since we are operating on register pairs, we would like to align on
// double the standard size; on the other hand, we don't want to insert
// an extra store, which will happen if the number of registers is even.
@@ -681,7 +110,7 @@ void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
int size = registers.GetRegisterSizeInBytes();
- const Register sp = vixl_masm_->StackPointer();
+ const Register sp = vixl_masm_.StackPointer();
// Be consistent with the logic for spilling registers.
if (!IsAlignedParam(offset, 2 * size) && registers.GetCount() % 2 != 0) {
const CPURegister& dst0 = registers.PopLowestIndex();
@@ -705,105 +134,6 @@ void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) {
DCHECK(registers.IsEmpty());
}
-void Arm64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // Increase frame to required size.
- DCHECK_ALIGNED(frame_size, kStackAlignment);
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- IncreaseFrameSize(frame_size);
-
- // Save callee-saves.
- SpillRegisters(core_reg_list, frame_size - core_reg_size);
- SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- // Write ArtMethod*
- DCHECK(X0 == method_reg.AsArm64().AsXRegister());
- StoreToOffset(X0, SP, 0);
-
- // Write out entry spills
- int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
- if (reg.IsNoRegister()) {
- // only increment stack offset.
- ManagedRegisterSpill spill = entry_spills.at(i);
- offset += spill.getSize();
- } else if (reg.IsXRegister()) {
- StoreToOffset(reg.AsXRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsWRegister()) {
- StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
- offset += 4;
- } else if (reg.IsDRegister()) {
- StoreDToOffset(reg.AsDRegister(), SP, offset);
- offset += 8;
- } else if (reg.IsSRegister()) {
- StoreSToOffset(reg.AsSRegister(), SP, offset);
- offset += 4;
- }
- }
-}
-
-void Arm64Assembler::RemoveFrame(size_t frame_size,
- ArrayRef<const ManagedRegister> callee_save_regs) {
- // Setup VIXL CPURegList for callee-saves.
- CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
- CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
- for (auto r : callee_save_regs) {
- Arm64ManagedRegister reg = r.AsArm64();
- if (reg.IsXRegister()) {
- core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
- } else {
- DCHECK(reg.IsDRegister());
- fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
- }
- }
- size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
- size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
-
- // For now we only check that the size of the frame is large enough to hold spills and method
- // reference.
- DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
- DCHECK_ALIGNED(frame_size, kStackAlignment);
-
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
- cfi_.RememberState();
-
- // Restore callee-saves.
- UnspillRegisters(core_reg_list, frame_size - core_reg_size);
- UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
-
- // Decrease frame size to start of callee saved regs.
- DecreaseFrameSize(frame_size);
-
- // Pop callee saved and return to LR.
- ___ Ret();
-
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
void Arm64Assembler::PoisonHeapReference(Register reg) {
DCHECK(reg.IsW());
// reg = -reg.
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 24b798201a..4e88e640e5 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -23,7 +23,6 @@
#include "base/arena_containers.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
@@ -62,38 +61,13 @@ enum StoreOperandType {
kStoreDWord
};
-class Arm64Exception {
- private:
- Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
- : scratch_(scratch), stack_adjust_(stack_adjust) {
- }
-
- vixl::aarch64::Label* Entry() { return &exception_entry_; }
-
- // Register used for passing Thread::Current()->exception_ .
- const Arm64ManagedRegister scratch_;
-
- // Stack adjust for ExceptionPool.
- const size_t stack_adjust_;
-
- vixl::aarch64::Label exception_entry_;
-
- friend class Arm64Assembler;
- DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
-};
-
class Arm64Assembler FINAL : public Assembler {
public:
- // We indicate the size of the initial code generation buffer to the VIXL
- // assembler. From there we it will automatically manage the buffer.
- explicit Arm64Assembler(ArenaAllocator* arena)
- : Assembler(arena),
- exception_blocks_(arena->Adapter(kArenaAllocAssembler)),
- vixl_masm_(new vixl::aarch64::MacroAssembler(kArm64BaseBufferSize)) {}
+ explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {}
- virtual ~Arm64Assembler() {
- delete vixl_masm_;
- }
+ virtual ~Arm64Assembler() {}
+
+ vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -105,110 +79,14 @@ class Arm64Assembler FINAL : public Assembler {
// Copy instructions out of assembly buffer into the given region of memory.
void FinalizeInstructions(const MemoryRegion& region);
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs);
+
void SpillRegisters(vixl::aarch64::CPURegList registers, int offset);
void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset);
- // Emit code that will create an activation on the stack.
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
- void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
-
- // Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- ManagedRegister scratch,
- bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst.
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
//
// Heap poisoning.
//
@@ -227,7 +105,6 @@ class Arm64Assembler FINAL : public Assembler {
UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64";
}
- private:
static vixl::aarch64::Register reg_x(int code) {
CHECK(code < kNumberOfXRegisters) << code;
if (code == SP) {
@@ -256,40 +133,9 @@ class Arm64Assembler FINAL : public Assembler {
return vixl::aarch64::FPRegister::GetSRegFromCode(code);
}
- // Emits Exception block.
- void EmitExceptionPoll(Arm64Exception *exception);
-
- void StoreWToOffset(StoreOperandType type, WRegister source,
- XRegister base, int32_t offset);
- void StoreToOffset(XRegister source, XRegister base, int32_t offset);
- void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
- void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
-
- void LoadImmediate(XRegister dest,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
- void LoadWFromOffset(LoadOperandType type,
- WRegister dest,
- XRegister base,
- int32_t offset);
- void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
- void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
- void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
- void AddConstant(XRegister rd,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
- void AddConstant(XRegister rd,
- XRegister rn,
- int32_t value,
- vixl::aarch64::Condition cond = vixl::aarch64::al);
-
- // List of exception blocks to generate at the end of the code cache.
- ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
-
- public:
- // Vixl assembler.
- vixl::aarch64::MacroAssembler* const vixl_masm_;
+ private:
+ // VIXL assembler.
+ vixl::aarch64::MacroAssembler vixl_masm_;
// Used for testing.
friend class Arm64ManagedRegister_VixlRegisters_Test;
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
deleted file mode 100644
index 01e8be9de6..0000000000
--- a/compiler/utils/arm64/constants_arm64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-#define ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
-
-#include <stdint.h>
-#include <iosfwd>
-#include "arch/arm64/registers_arm64.h"
-#include "base/casts.h"
-#include "base/logging.h"
-#include "globals.h"
-
-// TODO: Extend this file by adding missing functionality.
-
-namespace art {
-namespace arm64 {
-
-constexpr size_t kArm64BaseBufferSize = 4096;
-
-} // namespace arm64
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM64_CONSTANTS_ARM64_H_
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
new file mode 100644
index 0000000000..dfdcd11893
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -0,0 +1,754 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_arm64.h"
+
+#include "base/logging.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "managed_register_arm64.h"
+#include "offsets.h"
+#include "thread.h"
+
+using namespace vixl::aarch64; // NOLINT(build/namespaces)
+
+namespace art {
+namespace arm64 {
+
+#ifdef ___
+#error "ARM64 Assembler macro already defined."
+#else
+#define ___ asm_.GetVIXLAssembler()->
+#endif
+
+#define reg_x(X) Arm64Assembler::reg_x(X)
+#define reg_w(W) Arm64Assembler::reg_w(W)
+#define reg_d(D) Arm64Assembler::reg_d(D)
+#define reg_s(S) Arm64Assembler::reg_s(S)
+
+Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
+}
+
+void Arm64JNIMacroAssembler::FinalizeCode() {
+ for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) {
+ EmitExceptionPoll(exception.get());
+ }
+ ___ FinalizeCode();
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
+}
+
+void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
+ StoreToOffset(TR, SP, offset.Int32Value());
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, -adjust);
+ cfi().AdjustCFAOffset(adjust);
+}
+
+// See Arm64 PCS Section 5.2.2.1.
+void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ AddConstant(SP, adjust);
+ cfi().AdjustCFAOffset(-adjust);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+void Arm64JNIMacroAssembler::AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ // VIXL macro-assembler handles all variants.
+ ___ Add(reg_x(rd), reg_x(rn), value);
+ } else {
+ // temp = rd + value
+ // rd = cond ? temp : rn
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(rd), reg_x(rn));
+ Register temp = temps.AcquireX();
+ ___ Add(temp, reg_x(rn), value);
+ ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kStoreByte:
+ ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreHalfword:
+ ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ case kStoreWord:
+ ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) {
+ CHECK_NE(source, SP);
+ ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) {
+ ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsWRegister()) {
+ CHECK_EQ(4u, size);
+ StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
+ } else if (src.IsXRegister()) {
+ CHECK_EQ(8u, size);
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+ } else if (src.IsSRegister()) {
+ StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
+ } else {
+ CHECK(src.IsDRegister()) << src;
+ StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
+ }
+}
+
+void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
+ Arm64ManagedRegister src = m_src.AsArm64();
+ CHECK(src.IsXRegister()) << src;
+ StoreToOffset(src.AsXRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs,
+ uint32_t imm,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadImmediate(scratch.AsXRegister(), imm);
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP,
+ offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, reg_x(SP));
+ ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off,
+ ManagedRegister m_source,
+ FrameOffset in_off,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister source = m_source.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8);
+}
+
+// Load routines.
+void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) {
+ if ((cond == al) || (cond == nv)) {
+ ___ Mov(reg_x(dest), value);
+ } else {
+ // temp = value
+ // rd = cond ? temp : rd
+ if (value != 0) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dest));
+ Register temp = temps.AcquireX();
+ ___ Mov(temp, value);
+ ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
+ } else {
+ ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadSignedHalfword:
+ ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedByte:
+ ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadUnsignedHalfword:
+ ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ case kLoadWord:
+ ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Note: We can extend this member by adding load type info - see
+// sign extended A64 load variants.
+void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) {
+ CHECK_NE(dest, SP);
+ ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) {
+ ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
+}
+
+void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest,
+ XRegister base,
+ int32_t offset,
+ size_t size) {
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size) << dest;
+ } else if (dest.IsWRegister()) {
+ CHECK_EQ(4u, size) << dest;
+ ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
+ } else if (dest.IsXRegister()) {
+ CHECK_NE(dest.AsXRegister(), SP) << dest;
+ if (size == 4u) {
+ ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK_EQ(8u, size) << dest;
+ ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset));
+ }
+ } else if (dest.IsSRegister()) {
+ ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
+ } else {
+ CHECK(dest.IsDRegister()) << dest;
+ ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
+ }
+}
+
+void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
+ return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst,
+ ThreadOffset64 src,
+ size_t size) {
+ return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(),
+ offs.Int32Value());
+ if (unpoison_reference) {
+ WRegister ref_reg = dst.AsOverlappingWRegister();
+ asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg));
+ }
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst,
+ ManagedRegister m_base,
+ Offset offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister base = m_base.AsArm64();
+ CHECK(dst.IsXRegister() && base.IsXRegister());
+ // Remove dst and base form the temp list - higher level API uses IP1, IP0.
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister()));
+ ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ CHECK(dst.IsXRegister()) << dst;
+ LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
+}
+
+// Copying routines.
+void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
+ Arm64ManagedRegister dst = m_dst.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ if (!dst.Equals(src)) {
+ if (dst.IsXRegister()) {
+ if (size == 4) {
+ CHECK(src.IsWRegister());
+ ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister()));
+ } else {
+ if (src.IsXRegister()) {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister()));
+ } else {
+ ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister()));
+ }
+ }
+ } else if (dst.IsWRegister()) {
+ CHECK(src.IsWRegister()) << src;
+ ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
+ } else if (dst.IsSRegister()) {
+ CHECK(src.IsSRegister()) << src;
+ ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
+ } else {
+ CHECK(dst.IsDRegister()) << dst;
+ CHECK(src.IsDRegister()) << src;
+ ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(),
+ SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(),
+ SP, dest.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = src_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base,
+ Offset dest_offs,
+ FrameOffset src,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister base = m_dest_base.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(),
+ dest_offs.Int32Value());
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ FrameOffset /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*mscratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest,
+ Offset dest_offset,
+ ManagedRegister m_src,
+ Offset src_offset,
+ ManagedRegister m_scratch,
+ size_t size) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ Arm64ManagedRegister src = m_src.AsArm64();
+ Arm64ManagedRegister dest = m_dest.AsArm64();
+ CHECK(dest.IsXRegister()) << dest;
+ CHECK(src.IsXRegister()) << src;
+ CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch;
+ CHECK(size == 4 || size == 8) << size;
+ if (size == 4) {
+ if (scratch.IsWRegister()) {
+ LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ } else {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(),
+ src_offset.Int32Value());
+ StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(),
+ dest_offset.Int32Value());
+ }
+ } else if (size == 8) {
+ LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value());
+ } else {
+ UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
+ }
+}
+
+void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ Offset /*dest_offset*/,
+ FrameOffset /*src*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
+}
+
+void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
+ // TODO: Should we check that m_scratch is IP? - see arm.
+ ___ Dmb(InnerShareable, BarrierAll);
+}
+
+void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ Arm64ManagedRegister reg = mreg.AsArm64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsWRegister()) << reg;
+ if (size == 1) {
+ ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ } else {
+ ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
+ }
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references.
+}
+
+void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister base = m_base.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(base.IsXRegister()) << base;
+ CHECK(scratch.IsXRegister()) << scratch;
+ LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ // Call *(*(SP + base) + offset)
+ LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value());
+ ___ Blr(reg_x(scratch.AsXRegister()));
+}
+
+void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
+ UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg,
+ FrameOffset handle_scope_offs,
+ ManagedRegister m_in_reg,
+ bool null_allowed) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ // For now we only hold stale handle scope entries in x registers.
+ CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg;
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ if (null_allowed) {
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ if (in_reg.IsNoRegister()) {
+ LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP,
+ handle_scope_offs.Int32Value());
+ in_reg = out_reg;
+ }
+ ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0);
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne);
+ } else {
+ AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al);
+ }
+}
+
+void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister m_scratch,
+ bool null_allowed) {
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(scratch.IsXRegister()) << scratch;
+ if (null_allowed) {
+ LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP,
+ handle_scope_offset.Int32Value());
+ // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
+ // the address in the handle scope holding the reference.
+ // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
+ ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0);
+ // Move this logic in add constants with flags.
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne);
+ } else {
+ AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al);
+ }
+ StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value());
+}
+
+void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
+ ManagedRegister m_in_reg) {
+ Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
+ Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
+ CHECK(out_reg.IsXRegister()) << out_reg;
+ CHECK(in_reg.IsXRegister()) << in_reg;
+ vixl::aarch64::Label exit;
+ if (!out_reg.Equals(in_reg)) {
+ // FIXME: Who sets the flags here?
+ LoadImmediate(out_reg.AsXRegister(), 0, eq);
+ }
+ ___ Cbz(reg_x(in_reg.AsXRegister()), &exit);
+ LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0);
+ ___ Bind(&exit);
+}
+
+void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
+ CHECK_ALIGNED(stack_adjust, kStackAlignment);
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust));
+ LoadFromOffset(scratch.AsXRegister(),
+ TR,
+ Thread::ExceptionOffset<kArm64PointerSize>().Int32Value());
+ ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry());
+}
+
+void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) {
+ UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+ temps.Exclude(reg_x(exception->scratch_.AsXRegister()));
+ Register temp = temps.AcquireX();
+
+ // Bind exception poll entry.
+ ___ Bind(exception->Entry());
+ if (exception->stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSize(exception->stack_adjust_);
+ }
+ // Pass exception object as argument.
+ // Don't care about preserving X0 as this won't return.
+ ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
+ ___ Ldr(temp,
+ MEM_OP(reg_x(TR),
+ QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value()));
+
+ ___ Blr(temp);
+ // Call should never return.
+ ___ Brk();
+}
+
+void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // Increase frame to required size.
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ IncreaseFrameSize(frame_size);
+
+ // Save callee-saves.
+ asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ // Write ArtMethod*
+ DCHECK(X0 == method_reg.AsArm64().AsXRegister());
+ StoreToOffset(X0, SP, 0);
+
+ // Write out entry spills
+ int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
+ if (reg.IsNoRegister()) {
+ // only increment stack offset.
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ offset += spill.getSize();
+ } else if (reg.IsXRegister()) {
+ StoreToOffset(reg.AsXRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsWRegister()) {
+ StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
+ offset += 4;
+ } else if (reg.IsDRegister()) {
+ StoreDToOffset(reg.AsDRegister(), SP, offset);
+ offset += 8;
+ } else if (reg.IsSRegister()) {
+ StoreSToOffset(reg.AsSRegister(), SP, offset);
+ offset += 4;
+ }
+ }
+}
+
+void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> callee_save_regs) {
+ // Setup VIXL CPURegList for callee-saves.
+ CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
+ CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+ for (auto r : callee_save_regs) {
+ Arm64ManagedRegister reg = r.AsArm64();
+ if (reg.IsXRegister()) {
+ core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode());
+ } else {
+ DCHECK(reg.IsDRegister());
+ fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode());
+ }
+ }
+ size_t core_reg_size = core_reg_list.GetTotalSizeInBytes();
+ size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes();
+
+ // For now we only check that the size of the frame is large enough to hold spills and method
+ // reference.
+ DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+ DCHECK_ALIGNED(frame_size, kStackAlignment);
+
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+
+ cfi().RememberState();
+
+ // Restore callee-saves.
+ asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size);
+ asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
+
+ // Decrease frame size to start of callee saved regs.
+ DecreaseFrameSize(frame_size);
+
+ // Pop callee saved and return to LR.
+ ___ Ret();
+
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+#undef ___
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
new file mode 100644
index 0000000000..79ee441144
--- /dev/null
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+#define ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
+
+#include <stdint.h>
+#include <memory>
+#include <vector>
+
+#include "assembler_arm64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+#include "offsets.h"
+
+// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunknown-pragmas"
+#pragma GCC diagnostic ignored "-Wshadow"
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
+#include "a64/macro-assembler-a64.h"
+#pragma GCC diagnostic pop
+
+namespace art {
+namespace arm64 {
+
+class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> {
+ public:
+ explicit Arm64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd(arena),
+ exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {}
+
+ ~Arm64JNIMacroAssembler();
+
+ // Finalize the code.
+ void FinalizeCode() OVERRIDE;
+
+ // Emit code that will create an activation on the stack.
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack.
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines.
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines.
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines.
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
+
+ // Sign extension.
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension.
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current().
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst.
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset].
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ class Arm64Exception {
+ public:
+ Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
+ : scratch_(scratch), stack_adjust_(stack_adjust) {}
+
+ vixl::aarch64::Label* Entry() { return &exception_entry_; }
+
+ // Register used for passing Thread::Current()->exception_ .
+ const Arm64ManagedRegister scratch_;
+
+ // Stack adjust for ExceptionPool.
+ const size_t stack_adjust_;
+
+ vixl::aarch64::Label exception_entry_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
+ };
+
+ // Emits Exception block.
+ void EmitExceptionPoll(Arm64Exception *exception);
+
+ void StoreWToOffset(StoreOperandType type,
+ WRegister source,
+ XRegister base,
+ int32_t offset);
+ void StoreToOffset(XRegister source, XRegister base, int32_t offset);
+ void StoreSToOffset(SRegister source, XRegister base, int32_t offset);
+ void StoreDToOffset(DRegister source, XRegister base, int32_t offset);
+
+ void LoadImmediate(XRegister dest,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size);
+ void LoadWFromOffset(LoadOperandType type,
+ WRegister dest,
+ XRegister base,
+ int32_t offset);
+ void LoadFromOffset(XRegister dest, XRegister base, int32_t offset);
+ void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset);
+ void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset);
+ void AddConstant(XRegister rd,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+ void AddConstant(XRegister rd,
+ XRegister rn,
+ int32_t value,
+ vixl::aarch64::Condition cond = vixl::aarch64::al);
+
+ // List of exception blocks to generate at the end of the code cache.
+ ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_;
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index f7d74d2af4..7378a0a081 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_MANAGED_REGISTER_ARM64_H_
+#include "arch/arm64/registers_arm64.h"
#include "base/logging.h"
-#include "constants_arm64.h"
#include "debug/dwarf/register.h"
#include "utils/managed_register.h"
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 0a1b7334b8..81159e69a0 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -121,137 +121,4 @@ void DebugFrameOpCodeWriterForAssembler::ImplicitlyAdvancePC() {
}
}
-std::unique_ptr<Assembler> Assembler::Create(
- ArenaAllocator* arena,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features) {
- switch (instruction_set) {
-#ifdef ART_ENABLE_CODEGEN_arm
- case kArm:
- return std::unique_ptr<Assembler>(new (arena) arm::Arm32Assembler(arena));
- case kThumb2:
- return std::unique_ptr<Assembler>(new (arena) arm::Thumb2Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_arm64
- case kArm64:
- return std::unique_ptr<Assembler>(new (arena) arm64::Arm64Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips
- case kMips:
- return std::unique_ptr<Assembler>(new (arena) mips::MipsAssembler(
- arena,
- instruction_set_features != nullptr
- ? instruction_set_features->AsMipsInstructionSetFeatures()
- : nullptr));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
- case kMips64:
- return std::unique_ptr<Assembler>(new (arena) mips64::Mips64Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86
- case kX86:
- return std::unique_ptr<Assembler>(new (arena) x86::X86Assembler(arena));
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86_64
- case kX86_64:
- return std::unique_ptr<Assembler>(new (arena) x86_64::X86_64Assembler(arena));
-#endif
- default:
- LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
- return nullptr;
- }
-}
-
-void Assembler::StoreImmediateToThread32(ThreadOffset32 dest ATTRIBUTE_UNUSED,
- uint32_t imm ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreImmediateToThread64(ThreadOffset64 dest ATTRIBUTE_UNUSED,
- uint32_t imm ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackOffsetToThread32(
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackOffsetToThread64(
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackPointerToThread32(
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::StoreStackPointerToThread64(
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset32 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset64 src ATTRIBUTE_UNUSED,
- size_t size ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset32 offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
- ThreadOffset64 offs ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs ATTRIBUTE_UNUSED,
- FrameOffset fr_offs ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
-void Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
- UNIMPLEMENTED(FATAL);
-}
-
} // namespace art
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 89f7947cd5..8981776314 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -356,11 +356,6 @@ class DebugFrameOpCodeWriterForAssembler FINAL
class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
public:
- static std::unique_ptr<Assembler> Create(
- ArenaAllocator* arena,
- InstructionSet instruction_set,
- const InstructionSetFeatures* instruction_set_features = nullptr);
-
// Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
@@ -376,144 +371,6 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
// TODO: Implement with disassembler.
virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
- // Emit code that will create an activation on the stack
- virtual void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) = 0;
-
- // Emit code that will remove an activation from the stack
- virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
-
- virtual void IncreaseFrameSize(size_t adjust) = 0;
- virtual void DecreaseFrameSize(size_t adjust) = 0;
-
- // Store routines
- virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
- virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
- virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
-
- virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
-
- virtual void StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
- ManagedRegister scratch);
- virtual void StoreImmediateToThread64(ThreadOffset64 dest,
- uint32_t imm,
- ManagedRegister scratch);
-
- virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
- virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
-
- virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
- FrameOffset in_off, ManagedRegister scratch) = 0;
-
- // Load routines
- virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
-
- virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
- virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
-
- virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
- // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
- virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) = 0;
-
- virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
-
- virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
- virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
-
- // Copying routines
- virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
-
- virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
-
- virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) = 0;
-
- virtual void MemoryBarrier(ManagedRegister scratch) = 0;
-
- // Sign extension
- virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Zero extension
- virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
-
- // Exploit fast access in managed code to Thread::Current()
- virtual void GetCurrentThread(ManagedRegister tr) = 0;
- virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
- FrameOffset handlescope_offset,
- ManagedRegister in_reg,
- bool null_allowed) = 0;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- virtual void CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handlescope_offset,
- ManagedRegister scratch,
- bool null_allowed) = 0;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
- virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
-
- // Call to address held at [base+offset]
- virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
- virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
- virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
- virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
-
virtual void Bind(Label* label) = 0;
virtual void Jump(Label* label) = 0;
@@ -525,13 +382,17 @@ class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
*/
DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
- protected:
- explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
-
ArenaAllocator* GetArena() {
return buffer_.GetArena();
}
+ AssemblerBuffer* GetBuffer() {
+ return &buffer_;
+ }
+
+ protected:
+ explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
+
AssemblerBuffer buffer_;
DebugFrameOpCodeWriterForAssembler cfi_;
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
new file mode 100644
index 0000000000..797a98cfd5
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler.h"
+
+#include <algorithm>
+#include <vector>
+
+#ifdef ART_ENABLE_CODEGEN_arm
+#include "arm/jni_macro_assembler_arm.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "arm64/jni_macro_assembler_arm64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "mips/assembler_mips.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+#include "mips64/assembler_mips64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "x86/jni_macro_assembler_x86.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+#include "x86_64/jni_macro_assembler_x86_64.h"
+#endif
+#include "base/casts.h"
+#include "globals.h"
+#include "memory_region.h"
+
+namespace art {
+
+using MacroAsm32UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k32>>;
+
+template <>
+MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features) {
+#ifndef ART_ENABLE_CODEGEN_mips
+ UNUSED(instruction_set_features);
+#endif
+
+ switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm
+ case kArm:
+ case kThumb2:
+ return MacroAsm32UniquePtr(new (arena) arm::ArmJNIMacroAssembler(arena, instruction_set));
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ case kMips:
+ return MacroAsm32UniquePtr(new (arena) mips::MipsAssembler(
+ arena,
+ instruction_set_features != nullptr
+ ? instruction_set_features->AsMipsInstructionSetFeatures()
+ : nullptr));
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case kX86:
+ return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena));
+#endif
+ default:
+ LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set;
+ UNREACHABLE();
+ }
+}
+
+using MacroAsm64UniquePtr = std::unique_ptr<JNIMacroAssembler<PointerSize::k64>>;
+
+template <>
+MacroAsm64UniquePtr JNIMacroAssembler<PointerSize::k64>::Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features ATTRIBUTE_UNUSED) {
+ switch (instruction_set) {
+#ifdef ART_ENABLE_CODEGEN_arm64
+ case kArm64:
+ return MacroAsm64UniquePtr(new (arena) arm64::Arm64JNIMacroAssembler(arena));
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips64
+ case kMips64:
+ return MacroAsm64UniquePtr(new (arena) mips64::Mips64Assembler(arena));
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86_64
+ case kX86_64:
+ return MacroAsm64UniquePtr(new (arena) x86_64::X86_64JNIMacroAssembler(arena));
+#endif
+ default:
+ LOG(FATAL) << "Unknown/unsupported 8B InstructionSet: " << instruction_set;
+ UNREACHABLE();
+ }
+}
+
+} // namespace art
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
new file mode 100644
index 0000000000..6f45bd62db
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
+#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
+
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "base/arena_allocator.h"
+#include "base/arena_object.h"
+#include "base/enums.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "managed_register.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+
+namespace art {
+
+class ArenaAllocator;
+class DebugFrameOpCodeWriterForAssembler;
+class InstructionSetFeatures;
+class MemoryRegion;
+
+template <PointerSize kPointerSize>
+class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
+ public:
+ static std::unique_ptr<JNIMacroAssembler<kPointerSize>> Create(
+ ArenaAllocator* arena,
+ InstructionSet instruction_set,
+ const InstructionSetFeatures* instruction_set_features = nullptr);
+
+ // Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
+ virtual void FinalizeCode() = 0;
+
+ // Size of generated code
+ virtual size_t CodeSize() const = 0;
+
+ // Copy instructions out of assembly buffer into the given region of memory
+ virtual void FinalizeInstructions(const MemoryRegion& region) = 0;
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) = 0;
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
+
+ virtual void IncreaseFrameSize(size_t adjust) = 0;
+ virtual void DecreaseFrameSize(size_t adjust) = 0;
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
+
+ virtual void StoreStackOffsetToThread(ThreadOffset<kPointerSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void StoreStackPointerToThread(ThreadOffset<kPointerSize> thr_offs) = 0;
+
+ virtual void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) = 0;
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
+
+ virtual void LoadFromThread(ManagedRegister dest,
+ ThreadOffset<kPointerSize> src,
+ size_t size) = 0;
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
+ // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
+ virtual void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) = 0;
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset<kPointerSize> offs) = 0;
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset<kPointerSize> thr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRawPtrToThread(ThreadOffset<kPointerSize> thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) = 0;
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) = 0;
+
+ virtual void MemoryBarrier(ManagedRegister scratch) = 0;
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr) = 0;
+ virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) = 0;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ virtual void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) = 0;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
+ virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
+ virtual void CallFromThread(ThreadOffset<kPointerSize> offset, ManagedRegister scratch) = 0;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
+
+ virtual ~JNIMacroAssembler() {}
+
+ /**
+ * @brief Buffer of DWARF's Call Frame Information opcodes.
+ * @details It is used by debuggers and other tools to unwind the call stack.
+ */
+ virtual DebugFrameOpCodeWriterForAssembler& cfi() = 0;
+
+ protected:
+ explicit JNIMacroAssembler() {}
+};
+
+template <typename T, PointerSize kPointerSize>
+class JNIMacroAssemblerFwd : public JNIMacroAssembler<kPointerSize> {
+ public:
+ void FinalizeCode() OVERRIDE {
+ asm_.FinalizeCode();
+ }
+
+ size_t CodeSize() const OVERRIDE {
+ return asm_.CodeSize();
+ }
+
+ void FinalizeInstructions(const MemoryRegion& region) OVERRIDE {
+ asm_.FinalizeInstructions(region);
+ }
+
+ DebugFrameOpCodeWriterForAssembler& cfi() OVERRIDE {
+ return asm_.cfi();
+ }
+
+ protected:
+ explicit JNIMacroAssemblerFwd(ArenaAllocator* arena) : asm_(arena) {}
+
+ T asm_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_H_
diff --git a/compiler/utils/jni_macro_assembler_test.h b/compiler/utils/jni_macro_assembler_test.h
new file mode 100644
index 0000000000..829f34b4b7
--- /dev/null
+++ b/compiler/utils/jni_macro_assembler_test.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
+#define ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
+
+#include "jni_macro_assembler.h"
+
+#include "assembler_test_base.h"
+#include "common_runtime_test.h" // For ScratchFile
+
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iterator>
+#include <sys/stat.h>
+
+namespace art {
+
+template<typename Ass>
+class JNIMacroAssemblerTest : public testing::Test {
+ public:
+ Ass* GetAssembler() {
+ return assembler_.get();
+ }
+
+ typedef std::string (*TestFn)(JNIMacroAssemblerTest* assembler_test, Ass* assembler);
+
+ void DriverFn(TestFn f, std::string test_name) {
+ DriverWrapper(f(this, assembler_.get()), test_name);
+ }
+
+ // This driver assumes the assembler has already been called.
+ void DriverStr(std::string assembly_string, std::string test_name) {
+ DriverWrapper(assembly_string, test_name);
+ }
+
+ // This is intended to be run as a test.
+ bool CheckTools() {
+ return test_helper_->CheckTools();
+ }
+
+ protected:
+ explicit JNIMacroAssemblerTest() {}
+
+ void SetUp() OVERRIDE {
+ arena_.reset(new ArenaAllocator(&pool_));
+ assembler_.reset(CreateAssembler(arena_.get()));
+ test_helper_.reset(
+ new AssemblerTestInfrastructure(GetArchitectureString(),
+ GetAssemblerCmdName(),
+ GetAssemblerParameters(),
+ GetObjdumpCmdName(),
+ GetObjdumpParameters(),
+ GetDisassembleCmdName(),
+ GetDisassembleParameters(),
+ GetAssemblyHeader()));
+
+ SetUpHelpers();
+ }
+
+ void TearDown() OVERRIDE {
+ test_helper_.reset(); // Clean up the helper.
+ assembler_.reset();
+ arena_.reset();
+ }
+
+ // Override this to set up any architecture-specific things, e.g., CPU revision.
+ virtual Ass* CreateAssembler(ArenaAllocator* arena) {
+ return new (arena) Ass(arena);
+ }
+
+ // Override this to set up any architecture-specific things, e.g., register vectors.
+ virtual void SetUpHelpers() {}
+
+ // Get the typically used name for this architecture, e.g., aarch64, x86_64, ...
+ virtual std::string GetArchitectureString() = 0;
+
+ // Get the name of the assembler, e.g., "as" by default.
+ virtual std::string GetAssemblerCmdName() {
+ return "as";
+ }
+
+ // Switches to the assembler command. Default none.
+ virtual std::string GetAssemblerParameters() {
+ return "";
+ }
+
+ // Get the name of the objdump, e.g., "objdump" by default.
+ virtual std::string GetObjdumpCmdName() {
+ return "objdump";
+ }
+
+ // Switches to the objdump command. Default is " -h".
+ virtual std::string GetObjdumpParameters() {
+ return " -h";
+ }
+
+ // Get the name of the objdump, e.g., "objdump" by default.
+ virtual std::string GetDisassembleCmdName() {
+ return "objdump";
+ }
+
+ // Switches to the objdump command. As it's a binary, one needs to push the architecture and
+ // such to objdump, so it's architecture-specific and there is no default.
+ virtual std::string GetDisassembleParameters() = 0;
+
+ // If the assembly file needs a header, return it in a sub-class.
+ virtual const char* GetAssemblyHeader() {
+ return nullptr;
+ }
+
+ private:
+ // Override this to pad the code with NOPs to a certain size if needed.
+ virtual void Pad(std::vector<uint8_t>& data ATTRIBUTE_UNUSED) {
+ }
+
+ void DriverWrapper(std::string assembly_text, std::string test_name) {
+ assembler_->FinalizeCode();
+ size_t cs = assembler_->CodeSize();
+ std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*data)[0], data->size());
+ assembler_->FinalizeInstructions(code);
+ Pad(*data);
+ test_helper_->Driver(*data, assembly_text, test_name);
+ }
+
+ ArenaPool pool_;
+ std::unique_ptr<ArenaAllocator> arena_;
+ std::unique_ptr<Ass> assembler_;
+ std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
+
+ DISALLOW_COPY_AND_ASSIGN(JNIMacroAssemblerTest);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_JNI_MACRO_ASSEMBLER_TEST_H_
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index e6b32def55..8b7da3fa77 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -2799,27 +2799,17 @@ void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
+void MipsAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
- // Is this function even referenced anywhere else in the code?
- LoadConst32(scratch.AsCoreRegister(), imm);
- StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value());
-}
-
-void MipsAssembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- MipsManagedRegister scratch = mscratch.AsMips();
- CHECK(scratch.IsCoreRegister()) << scratch;
Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
S1, thr_offs.Int32Value());
}
-void MipsAssembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
+void MipsAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
}
@@ -2836,7 +2826,7 @@ void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void MipsAssembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
+void MipsAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2864,7 +2854,7 @@ void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offs
base.AsMips().AsCoreRegister(), offs.Int32Value());
}
-void MipsAssembler::LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) {
+void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
MipsManagedRegister dest = mdest.AsMips();
CHECK(dest.IsCoreRegister());
LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
@@ -2918,9 +2908,9 @@ void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m
StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
}
-void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -2929,9 +2919,9 @@ void MipsAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
SP, fr_offs.Int32Value());
}
-void MipsAssembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void MipsAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
MipsManagedRegister scratch = mscratch.AsMips();
CHECK(scratch.IsCoreRegister()) << scratch;
LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
@@ -3103,8 +3093,8 @@ void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscrat
// TODO: place reference map on call.
}
-void MipsAssembler::CallFromThread32(ThreadOffset32 offset ATTRIBUTE_UNUSED,
- ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+void MipsAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "no mips implementation";
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 852ced6e25..41b6c6bd32 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -23,12 +23,14 @@
#include "arch/mips/instruction_set_features_mips.h"
#include "base/arena_containers.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
#include "offsets.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "utils/label.h"
namespace art {
@@ -145,7 +147,7 @@ class MipsExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
};
-class MipsAssembler FINAL : public Assembler {
+class MipsAssembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
public:
explicit MipsAssembler(ArenaAllocator* arena,
const MipsInstructionSetFeatures* instruction_set_features = nullptr)
@@ -160,6 +162,9 @@ class MipsAssembler FINAL : public Assembler {
cfi().DelayEmittingAdvancePCs();
}
+ size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+
virtual ~MipsAssembler() {
for (auto& branch : branches_) {
CHECK(branch.IsResolved());
@@ -500,15 +505,11 @@ class MipsAssembler FINAL : public Assembler {
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreImmediateToThread32(ThreadOffset32 dest,
- uint32_t imm,
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest,
ManagedRegister msrc,
@@ -518,7 +519,7 @@ class MipsAssembler FINAL : public Assembler {
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -529,19 +530,19 @@ class MipsAssembler FINAL : public Assembler {
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread32(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
ManagedRegister mscratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
@@ -617,7 +618,7 @@ class MipsAssembler FINAL : public Assembler {
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 3fd77a06b1..a2621cbb30 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -2115,16 +2115,16 @@ void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
}
-void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
+void Mips64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
}
@@ -2141,7 +2141,7 @@ void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size)
return EmitLoad(mdest, SP, src.Int32Value(), size);
}
-void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
+void Mips64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
return EmitLoad(mdest, S1, src.Int32Value(), size);
}
@@ -2174,7 +2174,7 @@ void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
base.AsMips64().AsGpuRegister(), offs.Int32Value());
}
-void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
+void Mips64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
Mips64ManagedRegister dest = mdest.AsMips64();
CHECK(dest.IsGpuRegister());
LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
@@ -2218,18 +2218,18 @@ void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
}
-void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
}
-void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
+void Mips64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
Mips64ManagedRegister scratch = mscratch.AsMips64();
CHECK(scratch.IsGpuRegister()) << scratch;
LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
@@ -2431,8 +2431,8 @@ void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscr
// TODO: place reference map on call
}
-void Mips64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister mscratch ATTRIBUTE_UNUSED) {
+void Mips64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister mscratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 1ad05b038b..a7d350c010 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -20,12 +20,14 @@
#include <utility>
#include <vector>
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_mips64.h"
#include "globals.h"
#include "managed_register_mips64.h"
#include "offsets.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "utils/label.h"
namespace art {
@@ -100,7 +102,7 @@ class Mips64ExceptionSlowPath {
DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
};
-class Mips64Assembler FINAL : public Assembler {
+class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
explicit Mips64Assembler(ArenaAllocator* arena)
: Assembler(arena),
@@ -118,6 +120,9 @@ class Mips64Assembler FINAL : public Assembler {
}
}
+ size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); }
+ DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+
// Emit Machine Instructions.
void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
@@ -383,11 +388,11 @@ class Mips64Assembler FINAL : public Assembler {
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
ManagedRegister mscratch) OVERRIDE;
@@ -395,7 +400,7 @@ class Mips64Assembler FINAL : public Assembler {
// Load routines.
void Load(ManagedRegister mdest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
@@ -404,19 +409,19 @@ class Mips64Assembler FINAL : public Assembler {
void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) OVERRIDE;
-
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
ManagedRegister mscratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) OVERRIDE;
+
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) OVERRIDE;
@@ -471,7 +476,7 @@ class Mips64Assembler FINAL : public Assembler {
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) OVERRIDE;
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index bd5fc4031a..f1a991574b 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1943,489 +1943,6 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode,
EmitOperand(reg_or_opcode, operand);
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86Core(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 4;
-
-void X86Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(4); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- Register spill = spill_regs[i].AsX86().AsCpuRegister();
- pushl(spill);
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill), 0);
- }
-
- // return address then method on stack.
- int32_t adjust = frame_size - gpr_count * kFramePointerSize -
- kFramePointerSize /*method*/ -
- kFramePointerSize /*return address*/;
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
- pushl(method_reg.AsX86().AsCpuRegister());
- cfi_.AdjustCFAOffset(kFramePointerSize);
- DCHECK_EQ(static_cast<size_t>(cfi_.GetCurrentCFAOffset()), frame_size);
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86().IsCpuRegister()) {
- int offset = frame_size + spill.getSpillOffset();
- movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
- } else {
- DCHECK(spill.AsX86().IsXmmRegister());
- if (spill.getSize() == 8) {
- movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
- }
- }
- }
-}
-
-void X86Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- // -kFramePointerSize for ArtMethod*.
- int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- Register spill = spill_regs[i].AsX86().AsCpuRegister();
- popl(spill);
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill));
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(-adjust));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addl(ESP, Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86ManagedRegister src = msrc.AsX86();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(Address(ESP, offs), src.AsCpuRegister());
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(Address(ESP, offs), src.AsRegisterPairLow());
- movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(ESP, offs));
- } else {
- fstpl(Address(ESP, offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(ESP, offs), src.AsXmmRegister());
- } else {
- movsd(Address(ESP, offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86ManagedRegister src = msrc.AsX86();
- CHECK(src.IsCpuRegister());
- movl(Address(ESP, dest), src.AsCpuRegister());
-}
-
-void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(ESP, dest), Immediate(imm));
-}
-
-void X86Assembler::StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister) {
- fs()->movl(Address::Absolute(dest), Immediate(imm));
-}
-
-void X86Assembler::StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::StoreStackPointerToThread32(ThreadOffset32 thr_offs) {
- fs()->movl(Address::Absolute(thr_offs), ESP);
-}
-
-void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(ESP, src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- movl(dest.AsRegisterPairLow(), Address(ESP, src));
- movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(ESP, src));
- } else {
- fldl(Address(ESP, src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(ESP, src));
- } else {
- movsd(dest.AsXmmRegister(), Address(ESP, src));
- }
- }
-}
-
-void X86Assembler::LoadFromThread32(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
- fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- fs()->flds(Address::Absolute(src));
- } else {
- fs()->fldl(Address::Absolute(src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
- } else {
- fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
- }
- }
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(ESP, src));
-}
-
-void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
-}
-
-void X86Assembler::LoadRawPtrFromThread32(ManagedRegister mdest,
- ThreadOffset32 offs) {
- X86ManagedRegister dest = mdest.AsX86();
- CHECK(dest.IsCpuRegister());
- fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
-}
-
-void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86ManagedRegister reg = mreg.AsX86();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86ManagedRegister dest = mdest.AsX86();
- X86ManagedRegister src = msrc.AsX86();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movl(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(ESP, Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(ESP, 0));
- movss(dest.AsXmmRegister(), Address(ESP, 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(ESP, 0));
- movsd(dest.AsXmmRegister(), Address(ESP, 0));
- }
- addl(ESP, Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(ESP, src));
- movl(Address(ESP, dest), scratch.AsCpuRegister());
-}
-
-void X86Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
- ThreadOffset32 thr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
- Store(fr_offs, scratch, 4);
-}
-
-void X86Assembler::CopyRawPtrToThread32(ThreadOffset32 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 4);
- fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
- ManagedRegister mscratch,
- size_t size) {
- X86ManagedRegister scratch = mscratch.AsX86();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushl(Address(ESP, src));
- popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movl(scratch, Address(ESP, src_base));
- movl(scratch, Address(scratch, src_offset));
- movl(Address(ESP, dest), scratch);
-}
-
-void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
- popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
-}
-
-void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movl(scratch, Address(ESP, src));
- pushl(Address(scratch, src_offset));
- popl(Address(scratch, dest_offset));
-}
-
-void X86Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
-}
-
-void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86ManagedRegister scratch = mscratch.AsX86();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- Bind(&null_arg);
- } else {
- leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
- }
- Store(out_off, scratch, 4);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86ManagedRegister out_reg = mout_reg.AsX86();
- X86ManagedRegister in_reg = min_reg.AsX86();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86ManagedRegister base = mbase.AsX86();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- Register scratch = mscratch.AsX86().AsCpuRegister();
- movl(scratch, Address(ESP, base));
- call(Address(scratch, offset));
-}
-
-void X86Assembler::CallFromThread32(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
- fs()->call(Address::Absolute(offset));
-}
-
-void X86Assembler::GetCurrentThread(ManagedRegister tr) {
- fs()->movl(tr.AsX86().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
-}
-
-void X86Assembler::GetCurrentThread(FrameOffset offset,
- ManagedRegister mscratch) {
- X86ManagedRegister scratch = mscratch.AsX86();
- fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
- movl(Address(ESP, offset), scratch.AsCpuRegister());
-}
-
-void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86ExceptionSlowPath::Emit(Assembler *sasm) {
- X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in EAX
- __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
// Generate the data for the literal area.
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 6d519e425f..92a92a58b9 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -21,6 +21,7 @@
#include "base/arena_containers.h"
#include "base/bit_utils.h"
+#include "base/enums.h"
#include "base/macros.h"
#include "constants_x86.h"
#include "globals.h"
@@ -631,124 +632,6 @@ class X86Assembler FINAL : public Assembler {
void Bind(NearLabel* label);
//
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread32(ThreadOffset32 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread32(ThreadOffset32 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
-
- void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset32 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread32(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
- //
// Heap poisoning.
//
@@ -845,15 +728,6 @@ inline void X86Assembler::EmitOperandSizeOverride() {
EmitUint8(0x66);
}
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
} // namespace x86
} // namespace art
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
new file mode 100644
index 0000000000..77af885646
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86.h"
+
+#include "utils/assembler.h"
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86Core(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 4;
+
+#define __ asm_.
+
+void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(4); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ pushl(spill);
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill), 0);
+ }
+
+ // return address then method on stack.
+ int32_t adjust = frame_size - gpr_count * kFramePointerSize -
+ kFramePointerSize /*method*/ -
+ kFramePointerSize /*return address*/;
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+ __ pushl(method_reg.AsX86().AsCpuRegister());
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86().IsCpuRegister()) {
+ int offset = frame_size + spill.getSpillOffset();
+ __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister());
+ } else {
+ DCHECK(spill.AsX86().IsXmmRegister());
+ if (spill.getSize() == 8) {
+ __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ // -kFramePointerSize for ArtMethod*.
+ int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
+ __ addl(ESP, Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ Register spill = spill_regs[i].AsX86().AsCpuRegister();
+ __ popl(spill);
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill));
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addl(ESP, Immediate(-adjust));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addl(ESP, Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(&asm_, adjust);
+}
+
+void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister src = msrc.AsX86();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(ESP, offs), src.AsCpuRegister());
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(Address(ESP, offs), src.AsRegisterPairLow());
+ __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(ESP, offs));
+ } else {
+ __ fstpl(Address(ESP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(ESP, offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(ESP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86ManagedRegister src = msrc.AsX86();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) {
+ __ movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
+ __ fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ movl(dest.AsRegisterPairLow(), Address(ESP, src));
+ __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(ESP, src));
+ } else {
+ __ fldl(Address(ESP, src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(ESP, src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(ESP, src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+ __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ fs()->flds(Address::Absolute(src));
+ } else {
+ __ fs()->fldl(Address::Absolute(src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+ } else {
+ __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+ }
+ }
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest,
+ ManagedRegister base,
+ Offset offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
+ X86ManagedRegister dest = mdest.AsX86();
+ CHECK(dest.IsCpuRegister());
+ __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86ManagedRegister reg = mreg.AsX86();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86ManagedRegister dest = mdest.AsX86();
+ X86ManagedRegister src = msrc.AsX86();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(ESP, Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(ESP, 0));
+ __ movss(dest.AsXmmRegister(), Address(ESP, 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(ESP, 0));
+ __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
+ }
+ __ addl(ESP, Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(ESP, src));
+ __ movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+ Store(fr_offs, scratch, 4);
+}
+
+void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 4);
+ __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushl(Address(ESP, src));
+ __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movl(scratch, Address(ESP, src_base));
+ __ movl(scratch, Address(scratch, src_offset));
+ __ movl(Address(ESP, dest), scratch);
+}
+
+void X86JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+ __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movl(scratch, Address(ESP, src));
+ __ pushl(Address(scratch, src_offset));
+ __ popl(Address(scratch, dest_offset));
+}
+
+void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+}
+
+void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset));
+ }
+ Store(out_off, scratch, 4);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86ManagedRegister out_reg = mout_reg.AsX86();
+ X86ManagedRegister in_reg = min_reg.AsX86();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86ManagedRegister base = mbase.AsX86();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Register scratch = mscratch.AsX86().AsCpuRegister();
+ __ movl(scratch, Address(ESP, base));
+ __ call(Address(scratch, offset));
+}
+
+void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) {
+ __ fs()->call(Address::Absolute(offset));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ fs()->movl(tr.AsX86().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+}
+
+void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister mscratch) {
+ X86ManagedRegister scratch = mscratch.AsX86();
+ __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>()));
+ __ movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(sp_asm, stack_adjust_);
+ }
+ // Pass exception as argument in EAX
+ __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException)));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86
+} // namespace art
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
new file mode 100644
index 0000000000..3f07ede865
--- /dev/null
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+#define ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
+
+#include <vector>
+
+#include "assembler_x86.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86 {
+
+class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> {
+ public:
+ explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {}
+ virtual ~X86JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset32 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler);
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 977ce9dc0b..ddc824425e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -2639,547 +2639,6 @@ void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const
}
}
-static dwarf::Reg DWARFReg(Register reg) {
- return dwarf::Reg::X86_64Core(static_cast<int>(reg));
-}
-static dwarf::Reg DWARFReg(FloatRegister reg) {
- return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 8;
-
-void X86_64Assembler::BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> spill_regs,
- const ManagedRegisterEntrySpills& entry_spills) {
- DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet.
- cfi_.SetCurrentCFAOffset(8); // Return address on stack.
- CHECK_ALIGNED(frame_size, kStackAlignment);
- int gpr_count = 0;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- pushq(spill.AsCpuRegister());
- gpr_count++;
- cfi_.AdjustCFAOffset(kFramePointerSize);
- cfi_.RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
- }
- }
- // return address then method on stack.
- int64_t rest_of_frame = static_cast<int64_t>(frame_size)
- - (gpr_count * kFramePointerSize)
- - kFramePointerSize /*return address*/;
- subq(CpuRegister(RSP), Immediate(rest_of_frame));
- cfi_.AdjustCFAOffset(rest_of_frame);
-
- // spill xmms
- int64_t offset = rest_of_frame;
- for (int i = spill_regs.size() - 1; i >= 0; --i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset -= sizeof(double);
- movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
- cfi_.RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
- }
- }
-
- static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
- "Unexpected frame pointer size.");
-
- movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
-
- for (size_t i = 0; i < entry_spills.size(); ++i) {
- ManagedRegisterSpill spill = entry_spills.at(i);
- if (spill.AsX86_64().IsCpuRegister()) {
- if (spill.getSize() == 8) {
- movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
- spill.AsX86_64().AsCpuRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
- }
- } else {
- if (spill.getSize() == 8) {
- movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- } else {
- CHECK_EQ(spill.getSize(), 4);
- movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
- }
- }
- }
-}
-
-void X86_64Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) {
- CHECK_ALIGNED(frame_size, kStackAlignment);
- cfi_.RememberState();
- int gpr_count = 0;
- // unspill xmms
- int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize;
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsXmmRegister()) {
- offset += sizeof(double);
- movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
- cfi_.Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
- } else {
- gpr_count++;
- }
- }
- int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
- for (size_t i = 0; i < spill_regs.size(); ++i) {
- x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
- if (spill.IsCpuRegister()) {
- popq(spill.AsCpuRegister());
- cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
- cfi_.Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
- }
- }
- ret();
- // The CFI should be restored for any code that follows the exit block.
- cfi_.RestoreState();
- cfi_.DefCFAOffset(frame_size);
-}
-
-void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
- cfi_.AdjustCFAOffset(adjust);
-}
-
-void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
- CHECK_ALIGNED(adjust, kStackAlignment);
- addq(CpuRegister(RSP), Immediate(adjust));
- cfi_.AdjustCFAOffset(-adjust);
-}
-
-void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (src.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (src.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- } else {
- CHECK_EQ(8u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
- }
- } else if (src.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
- movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
- src.AsRegisterPairHigh());
- } else if (src.IsX87Register()) {
- if (size == 4) {
- fstps(Address(CpuRegister(RSP), offs));
- } else {
- fstpl(Address(CpuRegister(RSP), offs));
- }
- } else {
- CHECK(src.IsXmmRegister());
- if (size == 4) {
- movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- } else {
- movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
- }
- }
-}
-
-void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
- X86_64ManagedRegister src = msrc.AsX86_64();
- CHECK(src.IsCpuRegister());
- movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister) {
- movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister) {
- gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
-}
-
-void X86_64Assembler::StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::StoreStackPointerToThread64(ThreadOffset64 thr_offs) {
- gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
-}
-
-void X86_64Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
- FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
- UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
-}
-
-void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- if (size == 4) {
- CHECK_EQ(4u, size);
- movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- } else {
- CHECK_EQ(8u, size);
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
- }
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(0u, size);
- movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
- movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- flds(Address(CpuRegister(RSP), src));
- } else {
- fldl(Address(CpuRegister(RSP), src));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- } else {
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
- }
- }
-}
-
-void X86_64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- if (dest.IsNoRegister()) {
- CHECK_EQ(0u, size);
- } else if (dest.IsCpuRegister()) {
- CHECK_EQ(4u, size);
- gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
- } else if (dest.IsRegisterPair()) {
- CHECK_EQ(8u, size);
- gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
- } else if (dest.IsX87Register()) {
- if (size == 4) {
- gs()->flds(Address::Absolute(src, true));
- } else {
- gs()->fldl(Address::Absolute(src, true));
- }
- } else {
- CHECK(dest.IsXmmRegister());
- if (size == 4) {
- gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
- } else {
- gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
- }
- }
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
-}
-
-void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
- if (unpoison_reference) {
- MaybeUnpoisonHeapReference(dest.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
- Offset offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
- movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
-}
-
-void X86_64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest, ThreadOffset64 offs) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- CHECK(dest.IsCpuRegister());
- gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
-}
-
-void X86_64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
- X86_64ManagedRegister reg = mreg.AsX86_64();
- CHECK(size == 1 || size == 2) << size;
- CHECK(reg.IsCpuRegister()) << reg;
- if (size == 1) {
- movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
- } else {
- movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
- }
-}
-
-void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
- X86_64ManagedRegister dest = mdest.AsX86_64();
- X86_64ManagedRegister src = msrc.AsX86_64();
- if (!dest.Equals(src)) {
- if (dest.IsCpuRegister() && src.IsCpuRegister()) {
- movq(dest.AsCpuRegister(), src.AsCpuRegister());
- } else if (src.IsX87Register() && dest.IsXmmRegister()) {
- // Pass via stack and pop X87 register
- subl(CpuRegister(RSP), Immediate(16));
- if (size == 4) {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstps(Address(CpuRegister(RSP), 0));
- movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- } else {
- CHECK_EQ(src.AsX87Register(), ST0);
- fstpl(Address(CpuRegister(RSP), 0));
- movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
- }
- addq(CpuRegister(RSP), Immediate(16));
- } else {
- // TODO: x87, SSE
- UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
- }
- }
-}
-
-void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
- movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 thr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
- Store(fr_offs, scratch, 8);
-}
-
-void X86_64Assembler::CopyRawPtrToThread64(ThreadOffset64 thr_offs,
- FrameOffset fr_offs,
- ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- Load(scratch, fr_offs, 8);
- gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch,
- size_t size) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- if (scratch.IsCpuRegister() && size == 8) {
- Load(scratch, src, 4);
- Store(dest, scratch, 4);
- Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
- Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
- } else {
- Load(scratch, src, size);
- Store(dest, scratch, size);
- }
-}
-
-void X86_64Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
- ManagedRegister /*scratch*/, size_t /*size*/) {
- UNIMPLEMENTED(FATAL);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size) {
- CHECK(scratch.IsNoRegister());
- CHECK_EQ(size, 4u);
- pushq(Address(CpuRegister(RSP), src));
- popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- movq(scratch, Address(CpuRegister(RSP), src_base));
- movq(scratch, Address(scratch, src_offset));
- movq(Address(CpuRegister(RSP), dest), scratch);
-}
-
-void X86_64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) {
- CHECK_EQ(size, 4u);
- CHECK(scratch.IsNoRegister());
- pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
- popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
-}
-
-void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister mscratch, size_t size) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- CHECK_EQ(size, 4u);
- CHECK_EQ(dest.Int32Value(), src.Int32Value());
- movq(scratch, Address(CpuRegister(RSP), src));
- pushq(Address(scratch, src_offset));
- popq(Address(scratch, dest_offset));
-}
-
-void X86_64Assembler::MemoryBarrier(ManagedRegister) {
- mfence();
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
- FrameOffset handle_scope_offset,
- ManagedRegister min_reg, bool null_allowed) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
- // Use out_reg as indicator of null.
- in_reg = out_reg;
- // TODO: movzwl
- movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- CHECK(in_reg.IsCpuRegister());
- CHECK(out_reg.IsCpuRegister());
- VerifyObject(in_reg, null_allowed);
- if (null_allowed) {
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
-}
-
-void X86_64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
- FrameOffset handle_scope_offset,
- ManagedRegister mscratch,
- bool null_allowed) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- CHECK(scratch.IsCpuRegister());
- if (null_allowed) {
- Label null_arg;
- movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
- j(kZero, &null_arg);
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- Bind(&null_arg);
- } else {
- leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
- }
- Store(out_off, scratch, 8);
-}
-
-// Given a handle scope entry, load the associated reference.
-void X86_64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
- ManagedRegister min_reg) {
- X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
- X86_64ManagedRegister in_reg = min_reg.AsX86_64();
- CHECK(out_reg.IsCpuRegister());
- CHECK(in_reg.IsCpuRegister());
- Label null_arg;
- if (!out_reg.Equals(in_reg)) {
- xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
- }
- testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
- j(kZero, &null_arg);
- movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
- Bind(&null_arg);
-}
-
-void X86_64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
- // TODO: not validating references
-}
-
-void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
- X86_64ManagedRegister base = mbase.AsX86_64();
- CHECK(base.IsCpuRegister());
- call(Address(base.AsCpuRegister(), offset.Int32Value()));
- // TODO: place reference map on call
-}
-
-void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
- CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
- movq(scratch, Address(CpuRegister(RSP), base));
- call(Address(scratch, offset));
-}
-
-void X86_64Assembler::CallFromThread64(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
- gs()->call(Address::Absolute(offset, true));
-}
-
-void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
- gs()->movq(tr.AsX86_64().AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
-}
-
-void X86_64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
- X86_64ManagedRegister scratch = mscratch.AsX86_64();
- gs()->movq(scratch.AsCpuRegister(),
- Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
- movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
-}
-
-// Slowpath entered when Thread::Current()->_exception is non-null
-class X86_64ExceptionSlowPath FINAL : public SlowPath {
- public:
- explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
- virtual void Emit(Assembler *sp_asm) OVERRIDE;
- private:
- const size_t stack_adjust_;
-};
-
-void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
- X86_64ExceptionSlowPath* slow = new (GetArena()) X86_64ExceptionSlowPath(stack_adjust);
- buffer_.EnqueueSlowPath(slow);
- gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
- j(kNotEqual, slow->Entry());
-}
-
-void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
- X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
-#define __ sp_asm->
- __ Bind(&entry_);
- // Note: the return value is dead
- if (stack_adjust_ != 0) { // Fix up the frame.
- __ DecreaseFrameSize(stack_adjust_);
- }
- // Pass exception as argument in RDI
- __ gs()->movq(CpuRegister(RDI),
- Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
- __ gs()->call(
- Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
- // this call should never return
- __ int3();
-#undef __
-}
-
void X86_64Assembler::AddConstantArea() {
ArrayRef<const int32_t> area = constant_area_.GetBuffer();
for (size_t i = 0, e = area.size(); i < e; i++) {
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index 52e39cf7e6..370f49cb05 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -28,6 +28,7 @@
#include "offsets.h"
#include "utils/array_ref.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
namespace art {
namespace x86_64 {
@@ -699,125 +700,6 @@ class X86_64Assembler FINAL : public Assembler {
}
void Bind(NearLabel* label);
- //
- // Overridden common assembler high-level functionality
- //
-
- // Emit code that will create an activation on the stack
- void BuildFrame(size_t frame_size,
- ManagedRegister method_reg,
- ArrayRef<const ManagedRegister> callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
-
- // Emit code that will remove an activation from the stack
- void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
- OVERRIDE;
-
- void IncreaseFrameSize(size_t adjust) OVERRIDE;
- void DecreaseFrameSize(size_t adjust) OVERRIDE;
-
- // Store routines
- void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
- void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
- void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
-
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
-
- void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
-
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
-
- void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
- ManagedRegister scratch) OVERRIDE;
-
- // Load routines
- void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
-
- void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
-
- void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
- bool unpoison_reference) OVERRIDE;
-
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
-
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
-
- // Copying routines
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
-
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
-
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
- OVERRIDE;
-
- void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
- size_t size) OVERRIDE;
-
- void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size) OVERRIDE;
-
- void MemoryBarrier(ManagedRegister) OVERRIDE;
-
- // Sign extension
- void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Zero extension
- void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
-
- // Exploit fast access in managed code to Thread::Current()
- void GetCurrentThread(ManagedRegister tr) OVERRIDE;
- void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
-
- // Set up out_reg to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed. in_reg holds a possibly stale reference
- // that can be used to avoid loading the handle scope entry to see if the value is
- // null.
- void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
- ManagedRegister in_reg, bool null_allowed) OVERRIDE;
-
- // Set up out_off to hold a Object** into the handle scope, or to be null if the
- // value is null and null_allowed.
- void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset,
- ManagedRegister scratch, bool null_allowed) OVERRIDE;
-
- // src holds a handle scope entry (Object**) load this into dst
- virtual void LoadReferenceFromHandleScope(ManagedRegister dst,
- ManagedRegister src);
-
- // Heap::VerifyObject on src. In some cases (such as a reference to this) we
- // know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
- void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
-
- // Call to address held at [base+offset]
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
-
- // Generate code to check if Thread::Current()->exception_ is non-null
- // and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
-
// Add a double to the constant area, returning the offset into
// the constant area where the literal resides.
size_t AddDouble(double v) { return constant_area_.AddDouble(v); }
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 788c7253cf..36c966b3cf 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -22,7 +22,9 @@
#include "base/bit_utils.h"
#include "base/stl_util.h"
+#include "jni_macro_assembler_x86_64.h"
#include "utils/assembler_test.h"
+#include "utils/jni_macro_assembler_test.h"
namespace art {
@@ -1485,6 +1487,62 @@ TEST_F(AssemblerX86_64Test, SetCC) {
DriverFn(&setcc_test_fn, "setcc");
}
+TEST_F(AssemblerX86_64Test, MovzxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
+}
+
+TEST_F(AssemblerX86_64Test, MovsxbRegs) {
+ DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
+}
+
+TEST_F(AssemblerX86_64Test, Repnescasw) {
+ GetAssembler()->repne_scasw();
+ const char* expected = "repne scasw\n";
+ DriverStr(expected, "Repnescasw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsw) {
+ GetAssembler()->repe_cmpsw();
+ const char* expected = "repe cmpsw\n";
+ DriverStr(expected, "Repecmpsw");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsl) {
+ GetAssembler()->repe_cmpsl();
+ const char* expected = "repe cmpsl\n";
+ DriverStr(expected, "Repecmpsl");
+}
+
+TEST_F(AssemblerX86_64Test, Repecmpsq) {
+ GetAssembler()->repe_cmpsq();
+ const char* expected = "repe cmpsq\n";
+ DriverStr(expected, "Repecmpsq");
+}
+
+TEST_F(AssemblerX86_64Test, Cmpb) {
+ GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
+ x86_64::Immediate(0));
+ const char* expected = "cmpb $0, 128(%RDI)\n";
+ DriverStr(expected, "cmpb");
+}
+
+class JNIMacroAssemblerX86_64Test : public JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler> {
+ public:
+ using Base = JNIMacroAssemblerTest<x86_64::X86_64JNIMacroAssembler>;
+
+ protected:
+ // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
+ std::string GetArchitectureString() OVERRIDE {
+ return "x86_64";
+ }
+
+ std::string GetDisassembleParameters() OVERRIDE {
+ return " -D -bbinary -mi386:x86-64 -Mx86-64,addr64,data32 --no-show-raw-insn";
+ }
+
+ private:
+};
+
static x86_64::X86_64ManagedRegister ManagedFromCpu(x86_64::Register r) {
return x86_64::X86_64ManagedRegister::FromCpuRegister(r);
}
@@ -1493,8 +1551,8 @@ static x86_64::X86_64ManagedRegister ManagedFromFpu(x86_64::FloatRegister r) {
return x86_64::X86_64ManagedRegister::FromXmmRegister(r);
}
-std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string buildframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1536,12 +1594,12 @@ std::string buildframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBU
return str.str();
}
-TEST_F(AssemblerX86_64Test, BuildFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, BuildFrame) {
DriverFn(&buildframe_test_fn, "BuildFrame");
}
-std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string removeframe_test_fn(JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
// TODO: more interesting spill registers / entry spills.
// Two random spill regs.
@@ -1567,12 +1625,13 @@ std::string removeframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIB
return str.str();
}
-TEST_F(AssemblerX86_64Test, RemoveFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, RemoveFrame) {
DriverFn(&removeframe_test_fn, "RemoveFrame");
}
-std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string increaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->IncreaseFrameSize(0U);
assembler->IncreaseFrameSize(kStackAlignment);
assembler->IncreaseFrameSize(10 * kStackAlignment);
@@ -1586,12 +1645,13 @@ std::string increaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTR
return str.str();
}
-TEST_F(AssemblerX86_64Test, IncreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, IncreaseFrame) {
DriverFn(&increaseframe_test_fn, "IncreaseFrame");
}
-std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
- x86_64::X86_64Assembler* assembler) {
+std::string decreaseframe_test_fn(
+ JNIMacroAssemblerX86_64Test::Base* assembler_test ATTRIBUTE_UNUSED,
+ x86_64::X86_64JNIMacroAssembler* assembler) {
assembler->DecreaseFrameSize(0U);
assembler->DecreaseFrameSize(kStackAlignment);
assembler->DecreaseFrameSize(10 * kStackAlignment);
@@ -1605,47 +1665,8 @@ std::string decreaseframe_test_fn(AssemblerX86_64Test::Base* assembler_test ATTR
return str.str();
}
-TEST_F(AssemblerX86_64Test, DecreaseFrame) {
+TEST_F(JNIMacroAssemblerX86_64Test, DecreaseFrame) {
DriverFn(&decreaseframe_test_fn, "DecreaseFrame");
}
-TEST_F(AssemblerX86_64Test, MovzxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movzxb, "movzbl %{reg2}, %{reg1}"), "movzxb");
-}
-
-TEST_F(AssemblerX86_64Test, MovsxbRegs) {
- DriverStr(Repeatrb(&x86_64::X86_64Assembler::movsxb, "movsbl %{reg2}, %{reg1}"), "movsxb");
-}
-
-TEST_F(AssemblerX86_64Test, Repnescasw) {
- GetAssembler()->repne_scasw();
- const char* expected = "repne scasw\n";
- DriverStr(expected, "Repnescasw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsw) {
- GetAssembler()->repe_cmpsw();
- const char* expected = "repe cmpsw\n";
- DriverStr(expected, "Repecmpsw");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsl) {
- GetAssembler()->repe_cmpsl();
- const char* expected = "repe cmpsl\n";
- DriverStr(expected, "Repecmpsl");
-}
-
-TEST_F(AssemblerX86_64Test, Repecmpsq) {
- GetAssembler()->repe_cmpsq();
- const char* expected = "repe cmpsq\n";
- DriverStr(expected, "Repecmpsq");
-}
-
-TEST_F(AssemblerX86_64Test, Cmpb) {
- GetAssembler()->cmpb(x86_64::Address(x86_64::CpuRegister(x86_64::RDI), 128),
- x86_64::Immediate(0));
- const char* expected = "cmpb $0, 128(%RDI)\n";
- DriverStr(expected, "cmpb");
-}
-
} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
new file mode 100644
index 0000000000..47fb59b1d8
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_macro_assembler_x86_64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86_64 {
+
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+ return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
+constexpr size_t kFramePointerSize = 8;
+
+#define __ asm_.
+
+void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet.
+ cfi().SetCurrentCFAOffset(8); // Return address on stack.
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ int gpr_count = 0;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ pushq(spill.AsCpuRegister());
+ gpr_count++;
+ cfi().AdjustCFAOffset(kFramePointerSize);
+ cfi().RelOffset(DWARFReg(spill.AsCpuRegister().AsRegister()), 0);
+ }
+ }
+ // return address then method on stack.
+ int64_t rest_of_frame = static_cast<int64_t>(frame_size)
+ - (gpr_count * kFramePointerSize)
+ - kFramePointerSize /*return address*/;
+ __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
+ cfi().AdjustCFAOffset(rest_of_frame);
+
+ // spill xmms
+ int64_t offset = rest_of_frame;
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset -= sizeof(double);
+ __ movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister());
+ cfi().RelOffset(DWARFReg(spill.AsXmmRegister().AsFloatRegister()), offset);
+ }
+ }
+
+ static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
+ "Unexpected frame pointer size.");
+
+ __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86_64().IsCpuRegister()) {
+ if (spill.getSize() == 8) {
+ __ movq(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movl(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsCpuRegister());
+ }
+ } else {
+ if (spill.getSize() == 8) {
+ __ movsd(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ __ movss(Address(CpuRegister(RSP), frame_size + spill.getSpillOffset()),
+ spill.AsX86_64().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
+ ArrayRef<const ManagedRegister> spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ cfi().RememberState();
+ int gpr_count = 0;
+ // unspill xmms
+ int64_t offset = static_cast<int64_t>(frame_size)
+ - (spill_regs.size() * kFramePointerSize)
+ - 2 * kFramePointerSize;
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsXmmRegister()) {
+ offset += sizeof(double);
+ __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
+ cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
+ } else {
+ gpr_count++;
+ }
+ }
+ int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
+ __ addq(CpuRegister(RSP), Immediate(adjust));
+ cfi().AdjustCFAOffset(-adjust);
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
+ if (spill.IsCpuRegister()) {
+ __ popq(spill.AsCpuRegister());
+ cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize));
+ cfi().Restore(DWARFReg(spill.AsCpuRegister().AsRegister()));
+ }
+ }
+ __ ret();
+ // The CFI should be restored for any code that follows the exit block.
+ cfi().RestoreState();
+ cfi().DefCFAOffset(frame_size);
+}
+
+void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+ cfi().AdjustCFAOffset(adjust);
+}
+
+static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ assembler->addq(CpuRegister(RSP), Immediate(adjust));
+ assembler->cfi().AdjustCFAOffset(-adjust);
+}
+
+void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
+ DecreaseFrameSizeImpl(adjust, &asm_);
+}
+
+void X86_64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsCpuRegister());
+ }
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(Address(CpuRegister(RSP), offs), src.AsRegisterPairLow());
+ __ movq(Address(CpuRegister(RSP), FrameOffset(offs.Int32Value()+4)),
+ src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ __ fstps(Address(CpuRegister(RSP), offs));
+ } else {
+ __ fstpl(Address(CpuRegister(RSP), offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ __ movss(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ } else {
+ __ movsd(Address(CpuRegister(RSP), offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movl(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ __ movq(Address(CpuRegister(RSP), dest), src.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest,
+ uint32_t imm,
+ ManagedRegister) {
+ __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq?
+}
+
+void X86_64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), fr_offs));
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
+ __ gs()->movq(Address::Absolute(thr_offs, true), CpuRegister(RSP));
+}
+
+void X86_64JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/,
+ ManagedRegister /*src*/,
+ FrameOffset /*in_off*/,
+ ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86_64JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ __ movl(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ CHECK_EQ(8u, size);
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ }
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ __ movq(dest.AsRegisterPairLow(), Address(CpuRegister(RSP), src));
+ __ movq(dest.AsRegisterPairHigh(), Address(CpuRegister(RSP), FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ flds(Address(CpuRegister(RSP), src));
+ } else {
+ __ fldl(Address(CpuRegister(RSP), src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ } else {
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), src));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadFromThread(ManagedRegister mdest,
+ ThreadOffset64 src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ __ gs()->movl(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ __ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ __ gs()->flds(Address::Absolute(src, true));
+ } else {
+ __ gs()->fldl(Address::Absolute(src, true));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ __ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
+ } else {
+ __ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(CpuRegister(RSP), src));
+}
+
+void X86_64JNIMacroAssembler::LoadRef(ManagedRegister mdest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movl(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+ if (unpoison_reference) {
+ __ MaybeUnpoisonHeapReference(dest.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ __ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ __ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
+}
+
+void X86_64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movsxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ __ movzxb(reg.AsCpuRegister(), reg.AsCpuRegister());
+ } else {
+ __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ __ movq(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ __ subl(CpuRegister(RSP), Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstps(Address(CpuRegister(RSP), 0));
+ __ movss(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ __ fstpl(Address(CpuRegister(RSP), 0));
+ __ movsd(dest.AsXmmRegister(), Address(CpuRegister(RSP), 0));
+ }
+ __ addq(CpuRegister(RSP), Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86_64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), src));
+ __ movl(Address(CpuRegister(RSP), dest), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ __ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
+ Store(fr_offs, scratch, 8);
+}
+
+void X86_64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 8);
+ __ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset /*dst*/,
+ ManagedRegister /*src_base*/,
+ Offset /*src_offset*/,
+ ManagedRegister /*scratch*/,
+ size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ __ pushq(Address(CpuRegister(RSP), src));
+ __ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ __ movq(scratch, Address(CpuRegister(RSP), src_base));
+ __ movq(scratch, Address(scratch, src_offset));
+ __ movq(Address(CpuRegister(RSP), dest), scratch);
+}
+
+void X86_64JNIMacroAssembler::Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ __ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
+ __ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64JNIMacroAssembler::Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister mscratch,
+ size_t size) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ __ movq(scratch, Address(CpuRegister(RSP), src));
+ __ pushq(Address(scratch, src_offset));
+ __ popq(Address(scratch, dest_offset));
+}
+
+void X86_64JNIMacroAssembler::MemoryBarrier(ManagedRegister) {
+ __ mfence();
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
+ FrameOffset handle_scope_offset,
+ ManagedRegister min_reg,
+ bool null_allowed) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
+ // Use out_reg as indicator of null.
+ in_reg = out_reg;
+ // TODO: movzwl
+ __ movl(in_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(out_reg.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+}
+
+void X86_64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handle_scope_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ __ movl(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ __ Bind(&null_arg);
+ } else {
+ __ leaq(scratch.AsCpuRegister(), Address(CpuRegister(RSP), handle_scope_offset));
+ }
+ Store(out_off, scratch, 8);
+}
+
+// Given a handle scope entry, load the associated reference.
+void X86_64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ __ j(kZero, &null_arg);
+ __ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ __ Bind(&null_arg);
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86_64ManagedRegister base = mbase.AsX86_64();
+ CHECK(base.IsCpuRegister());
+ __ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86_64JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ CpuRegister scratch = mscratch.AsX86_64().AsCpuRegister();
+ __ movq(scratch, Address(CpuRegister(RSP), base));
+ __ call(Address(scratch, offset));
+}
+
+void X86_64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset, ManagedRegister /*mscratch*/) {
+ __ gs()->call(Address::Absolute(offset, true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) {
+ __ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+}
+
+void X86_64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ __ gs()->movq(scratch.AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset<kX86_64PointerSize>(), true));
+ __ movq(Address(CpuRegister(RSP), offset), scratch.AsCpuRegister());
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86_64ExceptionSlowPath FINAL : public SlowPath {
+ public:
+ explicit X86_64ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm) OVERRIDE;
+ private:
+ const size_t stack_adjust_;
+};
+
+void X86_64JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86_64ExceptionSlowPath* slow = new (__ GetArena()) X86_64ExceptionSlowPath(stack_adjust);
+ __ GetBuffer()->EnqueueSlowPath(slow);
+ __ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true), Immediate(0));
+ __ j(kNotEqual, slow->Entry());
+}
+
+#undef __
+
+void X86_64ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ DecreaseFrameSizeImpl(stack_adjust_, sp_asm);
+ }
+ // Pass exception as argument in RDI
+ __ gs()->movq(CpuRegister(RDI),
+ Address::Absolute(Thread::ExceptionOffset<kX86_64PointerSize>(), true));
+ __ gs()->call(
+ Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64PointerSize, pDeliverException), true));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
new file mode 100644
index 0000000000..cc4e57c999
--- /dev/null
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_
+
+#include <vector>
+
+#include "assembler_x86_64.h"
+#include "base/arena_containers.h"
+#include "base/enums.h"
+#include "base/macros.h"
+#include "offsets.h"
+#include "utils/array_ref.h"
+#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86_64Assembler,
+ PointerSize::k64> {
+ public:
+ explicit X86_64JNIMacroAssembler(ArenaAllocator* arena)
+ : JNIMacroAssemblerFwd<X86_64Assembler, PointerSize::k64>(arena) {}
+ virtual ~X86_64JNIMacroAssembler() {}
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size,
+ ManagedRegister method_reg,
+ ArrayRef<const ManagedRegister> callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs)
+ OVERRIDE;
+
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
+
+ // Store routines
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
+
+ void StoreSpanning(FrameOffset dest,
+ ManagedRegister src,
+ FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
+
+ // Load routines
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+
+ void LoadRef(ManagedRegister dest,
+ ManagedRegister base,
+ MemberOffset offs,
+ bool unpoison_reference) OVERRIDE;
+
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+
+ // Copying routines
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ ManagedRegister src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest_base,
+ Offset dest_offset,
+ FrameOffset src,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ FrameOffset src_base,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(ManagedRegister dest,
+ Offset dest_offset,
+ ManagedRegister src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void Copy(FrameOffset dest,
+ Offset dest_offset,
+ FrameOffset src,
+ Offset src_offset,
+ ManagedRegister scratch,
+ size_t size) OVERRIDE;
+
+ void MemoryBarrier(ManagedRegister) OVERRIDE;
+
+ // Sign extension
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Zero extension
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
+
+ // Exploit fast access in managed code to Thread::Current()
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
+
+ // Set up out_reg to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the handle scope entry to see if the value is
+ // null.
+ void CreateHandleScopeEntry(ManagedRegister out_reg,
+ FrameOffset handlescope_offset,
+ ManagedRegister in_reg,
+ bool null_allowed) OVERRIDE;
+
+ // Set up out_off to hold a Object** into the handle scope, or to be null if the
+ // value is null and null_allowed.
+ void CreateHandleScopeEntry(FrameOffset out_off,
+ FrameOffset handlescope_offset,
+ ManagedRegister scratch,
+ bool null_allowed) OVERRIDE;
+
+ // src holds a handle scope entry (Object**) load this into dst
+ virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE;
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
+
+ // Call to address held at [base+offset]
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(X86_64JNIMacroAssembler);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_JNI_MACRO_ASSEMBLER_X86_64_H_