diff options
Diffstat (limited to 'compiler/utils')
-rw-r--r-- | compiler/utils/jni_macro_assembler.cc | 4 | ||||
-rw-r--r-- | compiler/utils/x86/assembler_x86.cc | 478 | ||||
-rw-r--r-- | compiler/utils/x86/assembler_x86.h | 136 | ||||
-rw-r--r-- | compiler/utils/x86/jni_macro_assembler_x86.cc | 541 | ||||
-rw-r--r-- | compiler/utils/x86/jni_macro_assembler_x86.h | 162 |
5 files changed, 706 insertions, 615 deletions
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc index 0b1aa08a17..1acc90ca6f 100644 --- a/compiler/utils/jni_macro_assembler.cc +++ b/compiler/utils/jni_macro_assembler.cc @@ -33,7 +33,7 @@ #include "mips64/assembler_mips64.h" #endif #ifdef ART_ENABLE_CODEGEN_x86 -#include "x86/assembler_x86.h" +#include "x86/jni_macro_assembler_x86.h" #endif #ifdef ART_ENABLE_CODEGEN_x86_64 #include "x86_64/jni_macro_assembler_x86_64.h" @@ -72,7 +72,7 @@ MacroAsm32UniquePtr JNIMacroAssembler<PointerSize::k32>::Create( #endif #ifdef ART_ENABLE_CODEGEN_x86 case kX86: - return MacroAsm32UniquePtr(new (arena) x86::X86Assembler(arena)); + return MacroAsm32UniquePtr(new (arena) x86::X86JNIMacroAssembler(arena)); #endif default: LOG(FATAL) << "Unknown/unsupported 4B InstructionSet: " << instruction_set; diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 89b3c3f4f1..f1a991574b 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1943,484 +1943,6 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode, EmitOperand(reg_or_opcode, operand); } -static dwarf::Reg DWARFReg(Register reg) { - return dwarf::Reg::X86Core(static_cast<int>(reg)); -} - -constexpr size_t kFramePointerSize = 4; - -void X86Assembler::BuildFrame(size_t frame_size, - ManagedRegister method_reg, - ArrayRef<const ManagedRegister> spill_regs, - const ManagedRegisterEntrySpills& entry_spills) { - DCHECK_EQ(buffer_.Size(), 0U); // Nothing emitted yet. - cfi_.SetCurrentCFAOffset(4); // Return address on stack. - CHECK_ALIGNED(frame_size, kStackAlignment); - int gpr_count = 0; - for (int i = spill_regs.size() - 1; i >= 0; --i) { - Register spill = spill_regs[i].AsX86().AsCpuRegister(); - pushl(spill); - gpr_count++; - cfi_.AdjustCFAOffset(kFramePointerSize); - cfi_.RelOffset(DWARFReg(spill), 0); - } - - // return address then method on stack. - int32_t adjust = frame_size - gpr_count * kFramePointerSize - - kFramePointerSize /*method*/ - - kFramePointerSize /*return address*/; - addl(ESP, Immediate(-adjust)); - cfi_.AdjustCFAOffset(adjust); - pushl(method_reg.AsX86().AsCpuRegister()); - cfi_.AdjustCFAOffset(kFramePointerSize); - DCHECK_EQ(static_cast<size_t>(cfi_.GetCurrentCFAOffset()), frame_size); - - for (size_t i = 0; i < entry_spills.size(); ++i) { - ManagedRegisterSpill spill = entry_spills.at(i); - if (spill.AsX86().IsCpuRegister()) { - int offset = frame_size + spill.getSpillOffset(); - movl(Address(ESP, offset), spill.AsX86().AsCpuRegister()); - } else { - DCHECK(spill.AsX86().IsXmmRegister()); - if (spill.getSize() == 8) { - movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister()); - } else { - CHECK_EQ(spill.getSize(), 4); - movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister()); - } - } - } -} - -void X86Assembler::RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> spill_regs) { - CHECK_ALIGNED(frame_size, kStackAlignment); - cfi_.RememberState(); - // -kFramePointerSize for ArtMethod*. - int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize; - addl(ESP, Immediate(adjust)); - cfi_.AdjustCFAOffset(-adjust); - for (size_t i = 0; i < spill_regs.size(); ++i) { - Register spill = spill_regs[i].AsX86().AsCpuRegister(); - popl(spill); - cfi_.AdjustCFAOffset(-static_cast<int>(kFramePointerSize)); - cfi_.Restore(DWARFReg(spill)); - } - ret(); - // The CFI should be restored for any code that follows the exit block. - cfi_.RestoreState(); - cfi_.DefCFAOffset(frame_size); -} - -void X86Assembler::IncreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - addl(ESP, Immediate(-adjust)); - cfi_.AdjustCFAOffset(adjust); -} - -void X86Assembler::DecreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - addl(ESP, Immediate(adjust)); - cfi_.AdjustCFAOffset(-adjust); -} - -void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) { - X86ManagedRegister src = msrc.AsX86(); - if (src.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (src.IsCpuRegister()) { - CHECK_EQ(4u, size); - movl(Address(ESP, offs), src.AsCpuRegister()); - } else if (src.IsRegisterPair()) { - CHECK_EQ(8u, size); - movl(Address(ESP, offs), src.AsRegisterPairLow()); - movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), - src.AsRegisterPairHigh()); - } else if (src.IsX87Register()) { - if (size == 4) { - fstps(Address(ESP, offs)); - } else { - fstpl(Address(ESP, offs)); - } - } else { - CHECK(src.IsXmmRegister()); - if (size == 4) { - movss(Address(ESP, offs), src.AsXmmRegister()); - } else { - movsd(Address(ESP, offs), src.AsXmmRegister()); - } - } -} - -void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { - X86ManagedRegister src = msrc.AsX86(); - CHECK(src.IsCpuRegister()); - movl(Address(ESP, dest), src.AsCpuRegister()); -} - -void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { - X86ManagedRegister src = msrc.AsX86(); - CHECK(src.IsCpuRegister()); - movl(Address(ESP, dest), src.AsCpuRegister()); -} - -void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, - ManagedRegister) { - movl(Address(ESP, dest), Immediate(imm)); -} - -void X86Assembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); - fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); -} - -void X86Assembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { - fs()->movl(Address::Absolute(thr_offs), ESP); -} - -void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/, - FrameOffset /*in_off*/, ManagedRegister /*scratch*/) { - UNIMPLEMENTED(FATAL); // this case only currently exists for ARM -} - -void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - CHECK_EQ(4u, size); - movl(dest.AsCpuRegister(), Address(ESP, src)); - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - movl(dest.AsRegisterPairLow(), Address(ESP, src)); - movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4))); - } else if (dest.IsX87Register()) { - if (size == 4) { - flds(Address(ESP, src)); - } else { - fldl(Address(ESP, src)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - movss(dest.AsXmmRegister(), Address(ESP, src)); - } else { - movsd(dest.AsXmmRegister(), Address(ESP, src)); - } - } -} - -void X86Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (dest.IsCpuRegister()) { - CHECK_EQ(4u, size); - fs()->movl(dest.AsCpuRegister(), Address::Absolute(src)); - } else if (dest.IsRegisterPair()) { - CHECK_EQ(8u, size); - fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); - fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4))); - } else if (dest.IsX87Register()) { - if (size == 4) { - fs()->flds(Address::Absolute(src)); - } else { - fs()->fldl(Address::Absolute(src)); - } - } else { - CHECK(dest.IsXmmRegister()); - if (size == 4) { - fs()->movss(dest.AsXmmRegister(), Address::Absolute(src)); - } else { - fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src)); - } - } -} - -void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(ESP, src)); -} - -void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); - if (unpoison_reference) { - MaybeUnpoisonHeapReference(dest.AsCpuRegister()); - } -} - -void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, - Offset offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); - movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); -} - -void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { - X86ManagedRegister dest = mdest.AsX86(); - CHECK(dest.IsCpuRegister()); - fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); -} - -void X86Assembler::SignExtend(ManagedRegister mreg, size_t size) { - X86ManagedRegister reg = mreg.AsX86(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsCpuRegister()) << reg; - if (size == 1) { - movsxb(reg.AsCpuRegister(), reg.AsByteRegister()); - } else { - movsxw(reg.AsCpuRegister(), reg.AsCpuRegister()); - } -} - -void X86Assembler::ZeroExtend(ManagedRegister mreg, size_t size) { - X86ManagedRegister reg = mreg.AsX86(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsCpuRegister()) << reg; - if (size == 1) { - movzxb(reg.AsCpuRegister(), reg.AsByteRegister()); - } else { - movzxw(reg.AsCpuRegister(), reg.AsCpuRegister()); - } -} - -void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) { - X86ManagedRegister dest = mdest.AsX86(); - X86ManagedRegister src = msrc.AsX86(); - if (!dest.Equals(src)) { - if (dest.IsCpuRegister() && src.IsCpuRegister()) { - movl(dest.AsCpuRegister(), src.AsCpuRegister()); - } else if (src.IsX87Register() && dest.IsXmmRegister()) { - // Pass via stack and pop X87 register - subl(ESP, Immediate(16)); - if (size == 4) { - CHECK_EQ(src.AsX87Register(), ST0); - fstps(Address(ESP, 0)); - movss(dest.AsXmmRegister(), Address(ESP, 0)); - } else { - CHECK_EQ(src.AsX87Register(), ST0); - fstpl(Address(ESP, 0)); - movsd(dest.AsXmmRegister(), Address(ESP, 0)); - } - addl(ESP, Immediate(16)); - } else { - // TODO: x87, SSE - UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src; - } - } -} - -void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - movl(scratch.AsCpuRegister(), Address(ESP, src)); - movl(Address(ESP, dest), scratch.AsCpuRegister()); -} - -void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); - Store(fr_offs, scratch, 4); -} - -void X86Assembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - Load(scratch, fr_offs, 4); - fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); -} - -void X86Assembler::Copy(FrameOffset dest, FrameOffset src, - ManagedRegister mscratch, - size_t size) { - X86ManagedRegister scratch = mscratch.AsX86(); - if (scratch.IsCpuRegister() && size == 8) { - Load(scratch, src, 4); - Store(dest, scratch, 4); - Load(scratch, FrameOffset(src.Int32Value() + 4), 4); - Store(FrameOffset(dest.Int32Value() + 4), scratch, 4); - } else { - Load(scratch, src, size); - Store(dest, scratch, size); - } -} - -void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/, - ManagedRegister /*scratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL); -} - -void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, - ManagedRegister scratch, size_t size) { - CHECK(scratch.IsNoRegister()); - CHECK_EQ(size, 4u); - pushl(Address(ESP, src)); - popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - movl(scratch, Address(ESP, src_base)); - movl(scratch, Address(scratch, src_offset)); - movl(Address(ESP, dest), scratch); -} - -void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset, - ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) { - CHECK_EQ(size, 4u); - CHECK(scratch.IsNoRegister()); - pushl(Address(src.AsX86().AsCpuRegister(), src_offset)); - popl(Address(dest.AsX86().AsCpuRegister(), dest_offset)); -} - -void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister mscratch, size_t size) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - movl(scratch, Address(ESP, src)); - pushl(Address(scratch, src_offset)); - popl(Address(scratch, dest_offset)); -} - -void X86Assembler::MemoryBarrier(ManagedRegister) { - mfence(); -} - -void X86Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg, - FrameOffset handle_scope_offset, - ManagedRegister min_reg, bool null_allowed) { - X86ManagedRegister out_reg = mout_reg.AsX86(); - X86ManagedRegister in_reg = min_reg.AsX86(); - CHECK(in_reg.IsCpuRegister()); - CHECK(out_reg.IsCpuRegister()); - VerifyObject(in_reg, null_allowed); - if (null_allowed) { - Label null_arg; - if (!out_reg.Equals(in_reg)) { - xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); - } - testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); - j(kZero, &null_arg); - leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset)); - Bind(&null_arg); - } else { - leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset)); - } -} - -void X86Assembler::CreateHandleScopeEntry(FrameOffset out_off, - FrameOffset handle_scope_offset, - ManagedRegister mscratch, - bool null_allowed) { - X86ManagedRegister scratch = mscratch.AsX86(); - CHECK(scratch.IsCpuRegister()); - if (null_allowed) { - Label null_arg; - movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); - testl(scratch.AsCpuRegister(), scratch.AsCpuRegister()); - j(kZero, &null_arg); - leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); - Bind(&null_arg); - } else { - leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); - } - Store(out_off, scratch, 4); -} - -// Given a handle scope entry, load the associated reference. -void X86Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg, - ManagedRegister min_reg) { - X86ManagedRegister out_reg = mout_reg.AsX86(); - X86ManagedRegister in_reg = min_reg.AsX86(); - CHECK(out_reg.IsCpuRegister()); - CHECK(in_reg.IsCpuRegister()); - Label null_arg; - if (!out_reg.Equals(in_reg)) { - xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); - } - testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); - j(kZero, &null_arg); - movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0)); - Bind(&null_arg); -} - -void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { - // TODO: not validating references -} - -void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) { - X86ManagedRegister base = mbase.AsX86(); - CHECK(base.IsCpuRegister()); - call(Address(base.AsCpuRegister(), offset.Int32Value())); - // TODO: place reference map on call -} - -void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { - Register scratch = mscratch.AsX86().AsCpuRegister(); - movl(scratch, Address(ESP, base)); - call(Address(scratch, offset)); -} - -void X86Assembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) { - fs()->call(Address::Absolute(offset)); -} - -void X86Assembler::GetCurrentThread(ManagedRegister tr) { - fs()->movl(tr.AsX86().AsCpuRegister(), - Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); -} - -void X86Assembler::GetCurrentThread(FrameOffset offset, - ManagedRegister mscratch) { - X86ManagedRegister scratch = mscratch.AsX86(); - fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); - movl(Address(ESP, offset), scratch.AsCpuRegister()); -} - -void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { - X86ExceptionSlowPath* slow = new (GetArena()) X86ExceptionSlowPath(stack_adjust); - buffer_.EnqueueSlowPath(slow); - fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0)); - j(kNotEqual, slow->Entry()); -} - -void X86ExceptionSlowPath::Emit(Assembler *sasm) { - X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm); -#define __ sp_asm-> - __ Bind(&entry_); - // Note: the return value is dead - if (stack_adjust_ != 0) { // Fix up the frame. - __ DecreaseFrameSize(stack_adjust_); - } - // Pass exception as argument in EAX - __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>())); - __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException))); - // this call should never return - __ int3(); -#undef __ -} - void X86Assembler::AddConstantArea() { ArrayRef<const int32_t> area = constant_area_.GetBuffer(); // Generate the data for the literal area. diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index b6442feb69..92a92a58b9 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -29,7 +29,6 @@ #include "offsets.h" #include "utils/array_ref.h" #include "utils/assembler.h" -#include "utils/jni_macro_assembler.h" namespace art { namespace x86 { @@ -304,18 +303,11 @@ class ConstantArea { ArenaVector<int32_t> buffer_; }; -class X86Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k32> { +class X86Assembler FINAL : public Assembler { public: explicit X86Assembler(ArenaAllocator* arena) : Assembler(arena), constant_area_(arena) {} virtual ~X86Assembler() {} - size_t CodeSize() const OVERRIDE { return Assembler::CodeSize(); } - DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } - void FinalizeCode() { Assembler::FinalizeCode(); } - void FinalizeInstructions(const MemoryRegion& region) { - Assembler::FinalizeInstructions(region); - } - /* * Emit Machine Instructions. */ @@ -640,123 +632,6 @@ class X86Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSiz void Bind(NearLabel* label); // - // Overridden common assembler high-level functionality - // - - // Emit code that will create an activation on the stack - void BuildFrame(size_t frame_size, - ManagedRegister method_reg, - ArrayRef<const ManagedRegister> callee_save_regs, - const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; - - // Emit code that will remove an activation from the stack - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; - - void IncreaseFrameSize(size_t adjust) OVERRIDE; - void DecreaseFrameSize(size_t adjust) OVERRIDE; - - // Store routines - void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE; - void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; - void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; - - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - - void StoreStackOffsetToThread(ThreadOffset32 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; - - void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; - - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, - ManagedRegister scratch) OVERRIDE; - - // Load routines - void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - - void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; - - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) OVERRIDE; - - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - - void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; - - // Copying routines - void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - - void CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset32 thr_offs, - ManagedRegister scratch) OVERRIDE; - - void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) - OVERRIDE; - - void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; - - void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; - - void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, - size_t size) OVERRIDE; - - void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, - size_t size) OVERRIDE; - - void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, - size_t size) OVERRIDE; - - void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) OVERRIDE; - - void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size) OVERRIDE; - - void MemoryBarrier(ManagedRegister) OVERRIDE; - - // Sign extension - void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; - - // Zero extension - void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; - - // Exploit fast access in managed code to Thread::Current() - void GetCurrentThread(ManagedRegister tr) OVERRIDE; - void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE; - - // Set up out_reg to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the handle scope entry to see if the value is - // null. - void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, - ManagedRegister in_reg, bool null_allowed) OVERRIDE; - - // Set up out_off to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. - void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, - ManagedRegister scratch, bool null_allowed) OVERRIDE; - - // src holds a handle scope entry (Object**) load this into dst - void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; - void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; - - // Call to address held at [base+offset] - void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; - void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; - - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; - - // // Heap poisoning. // @@ -853,15 +728,6 @@ inline void X86Assembler::EmitOperandSizeOverride() { EmitUint8(0x66); } -// Slowpath entered when Thread::Current()->_exception is non-null -class X86ExceptionSlowPath FINAL : public SlowPath { - public: - explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} - virtual void Emit(Assembler *sp_asm) OVERRIDE; - private: - const size_t stack_adjust_; -}; - } // namespace x86 } // namespace art diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc new file mode 100644 index 0000000000..77af885646 --- /dev/null +++ b/compiler/utils/x86/jni_macro_assembler_x86.cc @@ -0,0 +1,541 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_macro_assembler_x86.h" + +#include "utils/assembler.h" +#include "base/casts.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "thread.h" + +namespace art { +namespace x86 { + +// Slowpath entered when Thread::Current()->_exception is non-null +class X86ExceptionSlowPath FINAL : public SlowPath { + public: + explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {} + virtual void Emit(Assembler *sp_asm) OVERRIDE; + private: + const size_t stack_adjust_; +}; + +static dwarf::Reg DWARFReg(Register reg) { + return dwarf::Reg::X86Core(static_cast<int>(reg)); +} + +constexpr size_t kFramePointerSize = 4; + +#define __ asm_. + +void X86JNIMacroAssembler::BuildFrame(size_t frame_size, + ManagedRegister method_reg, + ArrayRef<const ManagedRegister> spill_regs, + const ManagedRegisterEntrySpills& entry_spills) { + DCHECK_EQ(CodeSize(), 0U); // Nothing emitted yet. + cfi().SetCurrentCFAOffset(4); // Return address on stack. + CHECK_ALIGNED(frame_size, kStackAlignment); + int gpr_count = 0; + for (int i = spill_regs.size() - 1; i >= 0; --i) { + Register spill = spill_regs[i].AsX86().AsCpuRegister(); + __ pushl(spill); + gpr_count++; + cfi().AdjustCFAOffset(kFramePointerSize); + cfi().RelOffset(DWARFReg(spill), 0); + } + + // return address then method on stack. + int32_t adjust = frame_size - gpr_count * kFramePointerSize - + kFramePointerSize /*method*/ - + kFramePointerSize /*return address*/; + __ addl(ESP, Immediate(-adjust)); + cfi().AdjustCFAOffset(adjust); + __ pushl(method_reg.AsX86().AsCpuRegister()); + cfi().AdjustCFAOffset(kFramePointerSize); + DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size); + + for (size_t i = 0; i < entry_spills.size(); ++i) { + ManagedRegisterSpill spill = entry_spills.at(i); + if (spill.AsX86().IsCpuRegister()) { + int offset = frame_size + spill.getSpillOffset(); + __ movl(Address(ESP, offset), spill.AsX86().AsCpuRegister()); + } else { + DCHECK(spill.AsX86().IsXmmRegister()); + if (spill.getSize() == 8) { + __ movsd(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister()); + } else { + CHECK_EQ(spill.getSize(), 4); + __ movss(Address(ESP, frame_size + spill.getSpillOffset()), spill.AsX86().AsXmmRegister()); + } + } + } +} + +void X86JNIMacroAssembler::RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> spill_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + cfi().RememberState(); + // -kFramePointerSize for ArtMethod*. + int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize; + __ addl(ESP, Immediate(adjust)); + cfi().AdjustCFAOffset(-adjust); + for (size_t i = 0; i < spill_regs.size(); ++i) { + Register spill = spill_regs[i].AsX86().AsCpuRegister(); + __ popl(spill); + cfi().AdjustCFAOffset(-static_cast<int>(kFramePointerSize)); + cfi().Restore(DWARFReg(spill)); + } + __ ret(); + // The CFI should be restored for any code that follows the exit block. + cfi().RestoreState(); + cfi().DefCFAOffset(frame_size); +} + +void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + __ addl(ESP, Immediate(-adjust)); + cfi().AdjustCFAOffset(adjust); +} + +static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + assembler->addl(ESP, Immediate(adjust)); + assembler->cfi().AdjustCFAOffset(-adjust); +} + +void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) { + DecreaseFrameSizeImpl(&asm_, adjust); +} + +void X86JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) { + X86ManagedRegister src = msrc.AsX86(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCpuRegister()) { + CHECK_EQ(4u, size); + __ movl(Address(ESP, offs), src.AsCpuRegister()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + __ movl(Address(ESP, offs), src.AsRegisterPairLow()); + __ movl(Address(ESP, FrameOffset(offs.Int32Value()+4)), src.AsRegisterPairHigh()); + } else if (src.IsX87Register()) { + if (size == 4) { + __ fstps(Address(ESP, offs)); + } else { + __ fstpl(Address(ESP, offs)); + } + } else { + CHECK(src.IsXmmRegister()); + if (size == 4) { + __ movss(Address(ESP, offs), src.AsXmmRegister()); + } else { + __ movsd(Address(ESP, offs), src.AsXmmRegister()); + } + } +} + +void X86JNIMacroAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + X86ManagedRegister src = msrc.AsX86(); + CHECK(src.IsCpuRegister()); + __ movl(Address(ESP, dest), src.AsCpuRegister()); +} + +void X86JNIMacroAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + X86ManagedRegister src = msrc.AsX86(); + CHECK(src.IsCpuRegister()); + __ movl(Address(ESP, dest), src.AsCpuRegister()); +} + +void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) { + __ movl(Address(ESP, dest), Immediate(imm)); +} + +void X86JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + __ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs)); + __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); +} + +void X86JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) { + __ fs()->movl(Address::Absolute(thr_offs), ESP); +} + +void X86JNIMacroAssembler::StoreSpanning(FrameOffset /*dst*/, + ManagedRegister /*src*/, + FrameOffset /*in_off*/, + ManagedRegister /*scratch*/) { + UNIMPLEMENTED(FATAL); // this case only currently exists for ARM +} + +void X86JNIMacroAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (dest.IsCpuRegister()) { + CHECK_EQ(4u, size); + __ movl(dest.AsCpuRegister(), Address(ESP, src)); + } else if (dest.IsRegisterPair()) { + CHECK_EQ(8u, size); + __ movl(dest.AsRegisterPairLow(), Address(ESP, src)); + __ movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4))); + } else if (dest.IsX87Register()) { + if (size == 4) { + __ flds(Address(ESP, src)); + } else { + __ fldl(Address(ESP, src)); + } + } else { + CHECK(dest.IsXmmRegister()); + if (size == 4) { + __ movss(dest.AsXmmRegister(), Address(ESP, src)); + } else { + __ movsd(dest.AsXmmRegister(), Address(ESP, src)); + } + } +} + +void X86JNIMacroAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (dest.IsCpuRegister()) { + CHECK_EQ(4u, size); + __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(src)); + } else if (dest.IsRegisterPair()) { + CHECK_EQ(8u, size); + __ fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src)); + __ fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset32(src.Int32Value()+4))); + } else if (dest.IsX87Register()) { + if (size == 4) { + __ fs()->flds(Address::Absolute(src)); + } else { + __ fs()->fldl(Address::Absolute(src)); + } + } else { + CHECK(dest.IsXmmRegister()); + if (size == 4) { + __ fs()->movss(dest.AsXmmRegister(), Address::Absolute(src)); + } else { + __ fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src)); + } + } +} + +void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister()); + __ movl(dest.AsCpuRegister(), Address(ESP, src)); +} + +void X86JNIMacroAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs, + bool unpoison_reference) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); + __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); + if (unpoison_reference) { + __ MaybeUnpoisonHeapReference(dest.AsCpuRegister()); + } +} + +void X86JNIMacroAssembler::LoadRawPtr(ManagedRegister mdest, + ManagedRegister base, + Offset offs) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister() && dest.IsCpuRegister()); + __ movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs)); +} + +void X86JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) { + X86ManagedRegister dest = mdest.AsX86(); + CHECK(dest.IsCpuRegister()); + __ fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs)); +} + +void X86JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) { + X86ManagedRegister reg = mreg.AsX86(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsCpuRegister()) << reg; + if (size == 1) { + __ movsxb(reg.AsCpuRegister(), reg.AsByteRegister()); + } else { + __ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister()); + } +} + +void X86JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) { + X86ManagedRegister reg = mreg.AsX86(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsCpuRegister()) << reg; + if (size == 1) { + __ movzxb(reg.AsCpuRegister(), reg.AsByteRegister()); + } else { + __ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister()); + } +} + +void X86JNIMacroAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) { + X86ManagedRegister dest = mdest.AsX86(); + X86ManagedRegister src = msrc.AsX86(); + if (!dest.Equals(src)) { + if (dest.IsCpuRegister() && src.IsCpuRegister()) { + __ movl(dest.AsCpuRegister(), src.AsCpuRegister()); + } else if (src.IsX87Register() && dest.IsXmmRegister()) { + // Pass via stack and pop X87 register + __ subl(ESP, Immediate(16)); + if (size == 4) { + CHECK_EQ(src.AsX87Register(), ST0); + __ fstps(Address(ESP, 0)); + __ movss(dest.AsXmmRegister(), Address(ESP, 0)); + } else { + CHECK_EQ(src.AsX87Register(), ST0); + __ fstpl(Address(ESP, 0)); + __ movsd(dest.AsXmmRegister(), Address(ESP, 0)); + } + __ addl(ESP, Immediate(16)); + } else { + // TODO: x87, SSE + UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src; + } + } +} + +void X86JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + __ movl(scratch.AsCpuRegister(), Address(ESP, src)); + __ movl(Address(ESP, dest), scratch.AsCpuRegister()); +} + +void X86JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs)); + Store(fr_offs, scratch, 4); +} + +void X86JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + Load(scratch, fr_offs, 4); + __ fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister()); +} + +void X86JNIMacroAssembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch, + size_t size) { + X86ManagedRegister scratch = mscratch.AsX86(); + if (scratch.IsCpuRegister() && size == 8) { + Load(scratch, src, 4); + Store(dest, scratch, 4); + Load(scratch, FrameOffset(src.Int32Value() + 4), 4); + Store(FrameOffset(dest.Int32Value() + 4), scratch, 4); + } else { + Load(scratch, src, size); + Store(dest, scratch, size); + } +} + +void X86JNIMacroAssembler::Copy(FrameOffset /*dst*/, + ManagedRegister /*src_base*/, + Offset /*src_offset*/, + ManagedRegister /*scratch*/, + size_t /*size*/) { + UNIMPLEMENTED(FATAL); +} + +void X86JNIMacroAssembler::Copy(ManagedRegister dest_base, + Offset dest_offset, + FrameOffset src, + ManagedRegister scratch, + size_t size) { + CHECK(scratch.IsNoRegister()); + CHECK_EQ(size, 4u); + __ pushl(Address(ESP, src)); + __ popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset)); +} + +void X86JNIMacroAssembler::Copy(FrameOffset dest, + FrameOffset src_base, + Offset src_offset, + ManagedRegister mscratch, + size_t size) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + CHECK_EQ(size, 4u); + __ movl(scratch, Address(ESP, src_base)); + __ movl(scratch, Address(scratch, src_offset)); + __ movl(Address(ESP, dest), scratch); +} + +void X86JNIMacroAssembler::Copy(ManagedRegister dest, + Offset dest_offset, + ManagedRegister src, + Offset src_offset, + ManagedRegister scratch, + size_t size) { + CHECK_EQ(size, 4u); + CHECK(scratch.IsNoRegister()); + __ pushl(Address(src.AsX86().AsCpuRegister(), src_offset)); + __ popl(Address(dest.AsX86().AsCpuRegister(), dest_offset)); +} + +void X86JNIMacroAssembler::Copy(FrameOffset dest, + Offset dest_offset, + FrameOffset src, + Offset src_offset, + ManagedRegister mscratch, + size_t size) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + CHECK_EQ(size, 4u); + CHECK_EQ(dest.Int32Value(), src.Int32Value()); + __ movl(scratch, Address(ESP, src)); + __ pushl(Address(scratch, src_offset)); + __ popl(Address(scratch, dest_offset)); +} + +void X86JNIMacroAssembler::MemoryBarrier(ManagedRegister) { + __ mfence(); +} + +void X86JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg, + FrameOffset handle_scope_offset, + ManagedRegister min_reg, + bool null_allowed) { + X86ManagedRegister out_reg = mout_reg.AsX86(); + X86ManagedRegister in_reg = min_reg.AsX86(); + CHECK(in_reg.IsCpuRegister()); + CHECK(out_reg.IsCpuRegister()); + VerifyObject(in_reg, null_allowed); + if (null_allowed) { + Label null_arg; + if (!out_reg.Equals(in_reg)) { + __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); + } + __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); + __ j(kZero, &null_arg); + __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset)); + __ Bind(&null_arg); + } else { + __ leal(out_reg.AsCpuRegister(), Address(ESP, handle_scope_offset)); + } +} + +void X86JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off, + FrameOffset handle_scope_offset, + ManagedRegister mscratch, + bool null_allowed) { + X86ManagedRegister scratch = mscratch.AsX86(); + CHECK(scratch.IsCpuRegister()); + if (null_allowed) { + Label null_arg; + __ movl(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); + __ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister()); + __ j(kZero, &null_arg); + __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); + __ Bind(&null_arg); + } else { + __ leal(scratch.AsCpuRegister(), Address(ESP, handle_scope_offset)); + } + Store(out_off, scratch, 4); +} + +// Given a handle scope entry, load the associated reference. +void X86JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg, + ManagedRegister min_reg) { + X86ManagedRegister out_reg = mout_reg.AsX86(); + X86ManagedRegister in_reg = min_reg.AsX86(); + CHECK(out_reg.IsCpuRegister()); + CHECK(in_reg.IsCpuRegister()); + Label null_arg; + if (!out_reg.Equals(in_reg)) { + __ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister()); + } + __ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister()); + __ j(kZero, &null_arg); + __ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0)); + __ Bind(&null_arg); +} + +void X86JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void X86JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) { + X86ManagedRegister base = mbase.AsX86(); + CHECK(base.IsCpuRegister()); + __ call(Address(base.AsCpuRegister(), offset.Int32Value())); + // TODO: place reference map on call +} + +void X86JNIMacroAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { + Register scratch = mscratch.AsX86().AsCpuRegister(); + __ movl(scratch, Address(ESP, base)); + __ call(Address(scratch, offset)); +} + +void X86JNIMacroAssembler::CallFromThread(ThreadOffset32 offset, ManagedRegister /*mscratch*/) { + __ fs()->call(Address::Absolute(offset)); +} + +void X86JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) { + __ fs()->movl(tr.AsX86().AsCpuRegister(), + Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); +} + +void X86JNIMacroAssembler::GetCurrentThread(FrameOffset offset, + ManagedRegister mscratch) { + X86ManagedRegister scratch = mscratch.AsX86(); + __ fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset<kX86PointerSize>())); + __ movl(Address(ESP, offset), scratch.AsCpuRegister()); +} + +void X86JNIMacroAssembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) { + X86ExceptionSlowPath* slow = new (__ GetArena()) X86ExceptionSlowPath(stack_adjust); + __ GetBuffer()->EnqueueSlowPath(slow); + __ fs()->cmpl(Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>()), Immediate(0)); + __ j(kNotEqual, slow->Entry()); +} + +#undef __ + +void X86ExceptionSlowPath::Emit(Assembler *sasm) { + X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm); +#define __ sp_asm-> + __ Bind(&entry_); + // Note: the return value is dead + if (stack_adjust_ != 0) { // Fix up the frame. + DecreaseFrameSizeImpl(sp_asm, stack_adjust_); + } + // Pass exception as argument in EAX + __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset<kX86PointerSize>())); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86PointerSize, pDeliverException))); + // this call should never return + __ int3(); +#undef __ +} + +} // namespace x86 +} // namespace art diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h new file mode 100644 index 0000000000..3f07ede865 --- /dev/null +++ b/compiler/utils/x86/jni_macro_assembler_x86.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_ +#define ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_ + +#include <vector> + +#include "assembler_x86.h" +#include "base/arena_containers.h" +#include "base/enums.h" +#include "base/macros.h" +#include "offsets.h" +#include "utils/array_ref.h" +#include "utils/jni_macro_assembler.h" + +namespace art { +namespace x86 { + +class X86JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<X86Assembler, PointerSize::k32> { + public: + explicit X86JNIMacroAssembler(ArenaAllocator* arena) : JNIMacroAssemblerFwd(arena) {} + virtual ~X86JNIMacroAssembler() {} + + // + // Overridden common assembler high-level functionality + // + + // Emit code that will create an activation on the stack + void BuildFrame(size_t frame_size, + ManagedRegister method_reg, + ArrayRef<const ManagedRegister> callee_save_regs, + const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; + + // Emit code that will remove an activation from the stack + void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) + OVERRIDE; + + void IncreaseFrameSize(size_t adjust) OVERRIDE; + void DecreaseFrameSize(size_t adjust) OVERRIDE; + + // Store routines + void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE; + void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; + void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; + + void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; + + void StoreStackOffsetToThread(ThreadOffset32 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; + + void StoreStackPointerToThread(ThreadOffset32 thr_offs) OVERRIDE; + + void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, + ManagedRegister scratch) OVERRIDE; + + // Load routines + void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; + + void LoadFromThread(ManagedRegister dest, ThreadOffset32 src, size_t size) OVERRIDE; + + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + + void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, + bool unpoison_reference) OVERRIDE; + + void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; + + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset32 offs) OVERRIDE; + + // Copying routines + void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; + + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset32 thr_offs, + ManagedRegister scratch) OVERRIDE; + + void CopyRawPtrToThread(ThreadOffset32 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + OVERRIDE; + + void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; + + void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; + + void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, + size_t size) OVERRIDE; + + void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, + size_t size) OVERRIDE; + + void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, + size_t size) OVERRIDE; + + void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, + ManagedRegister scratch, size_t size) OVERRIDE; + + void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, + ManagedRegister scratch, size_t size) OVERRIDE; + + void MemoryBarrier(ManagedRegister) OVERRIDE; + + // Sign extension + void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; + + // Zero extension + void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; + + // Exploit fast access in managed code to Thread::Current() + void GetCurrentThread(ManagedRegister tr) OVERRIDE; + void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE; + + // Set up out_reg to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the handle scope entry to see if the value is + // null. + void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset, + ManagedRegister in_reg, bool null_allowed) OVERRIDE; + + // Set up out_off to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. + void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, + ManagedRegister scratch, bool null_allowed) OVERRIDE; + + // src holds a handle scope entry (Object**) load this into dst + void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; + void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; + + // Call to address held at [base+offset] + void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; + void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset32 offset, ManagedRegister scratch) OVERRIDE; + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; + + private: + DISALLOW_COPY_AND_ASSIGN(X86JNIMacroAssembler); +}; + +} // namespace x86 +} // namespace art + +#endif // ART_COMPILER_UTILS_X86_JNI_MACRO_ASSEMBLER_X86_H_ |