diff options
Diffstat (limited to 'compiler/utils/arm64')
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 661 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 163 | ||||
| -rw-r--r-- | compiler/utils/arm64/jni_macro_assembler_arm64.cc | 754 | ||||
| -rw-r--r-- | compiler/utils/arm64/jni_macro_assembler_arm64.h | 230 |
4 files changed, 989 insertions, 819 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 53685bfa53..22221e752a 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -32,9 +32,6 @@ namespace arm64 { #endif void Arm64Assembler::FinalizeCode() { - for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) { - EmitExceptionPoll(exception.get()); - } ___ FinalizeCode(); } @@ -52,254 +49,6 @@ void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) { region.CopyFrom(0, from); } -void Arm64Assembler::GetCurrentThread(ManagedRegister tr) { - ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR)); -} - -void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) { - StoreToOffset(TR, SP, offset.Int32Value()); -} - -// See Arm64 PCS Section 5.2.2.1. -void Arm64Assembler::IncreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - AddConstant(SP, -adjust); - cfi().AdjustCFAOffset(adjust); -} - -// See Arm64 PCS Section 5.2.2.1. -void Arm64Assembler::DecreaseFrameSize(size_t adjust) { - CHECK_ALIGNED(adjust, kStackAlignment); - AddConstant(SP, adjust); - cfi().AdjustCFAOffset(-adjust); -} - -void Arm64Assembler::AddConstant(XRegister rd, int32_t value, Condition cond) { - AddConstant(rd, rd, value, cond); -} - -void Arm64Assembler::AddConstant(XRegister rd, XRegister rn, int32_t value, - Condition cond) { - if ((cond == al) || (cond == nv)) { - // VIXL macro-assembler handles all variants. - ___ Add(reg_x(rd), reg_x(rn), value); - } else { - // temp = rd + value - // rd = cond ? temp : rn - UseScratchRegisterScope temps(&vixl_masm_); - temps.Exclude(reg_x(rd), reg_x(rn)); - Register temp = temps.AcquireX(); - ___ Add(temp, reg_x(rn), value); - ___ Csel(reg_x(rd), temp, reg_x(rd), cond); - } -} - -void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source, - XRegister base, int32_t offset) { - switch (type) { - case kStoreByte: - ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset)); - break; - case kStoreHalfword: - ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset)); - break; - case kStoreWord: - ___ Str(reg_w(source), MEM_OP(reg_x(base), offset)); - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -void Arm64Assembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) { - CHECK_NE(source, SP); - ___ Str(reg_x(source), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) { - ___ Str(reg_s(source), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) { - ___ Str(reg_d(source), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) { - Arm64ManagedRegister src = m_src.AsArm64(); - if (src.IsNoRegister()) { - CHECK_EQ(0u, size); - } else if (src.IsWRegister()) { - CHECK_EQ(4u, size); - StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value()); - } else if (src.IsXRegister()) { - CHECK_EQ(8u, size); - StoreToOffset(src.AsXRegister(), SP, offs.Int32Value()); - } else if (src.IsSRegister()) { - StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value()); - } else { - CHECK(src.IsDRegister()) << src; - StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value()); - } -} - -void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) { - Arm64ManagedRegister src = m_src.AsArm64(); - CHECK(src.IsXRegister()) << src; - StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP, - offs.Int32Value()); -} - -void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) { - Arm64ManagedRegister src = m_src.AsArm64(); - CHECK(src.IsXRegister()) << src; - StoreToOffset(src.AsXRegister(), SP, offs.Int32Value()); -} - -void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - LoadImmediate(scratch.AsXRegister(), imm); - StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, - offs.Int32Value()); -} - -void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); -} - -void Arm64Assembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) { - UseScratchRegisterScope temps(&vixl_masm_); - Register temp = temps.AcquireX(); - ___ Mov(temp, reg_x(SP)); - ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value())); -} - -void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source, - FrameOffset in_off, ManagedRegister m_scratch) { - Arm64ManagedRegister source = m_source.AsArm64(); - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value()); - LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value()); - StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8); -} - -// Load routines. -void Arm64Assembler::LoadImmediate(XRegister dest, int32_t value, - Condition cond) { - if ((cond == al) || (cond == nv)) { - ___ Mov(reg_x(dest), value); - } else { - // temp = value - // rd = cond ? temp : rd - if (value != 0) { - UseScratchRegisterScope temps(&vixl_masm_); - temps.Exclude(reg_x(dest)); - Register temp = temps.AcquireX(); - ___ Mov(temp, value); - ___ Csel(reg_x(dest), temp, reg_x(dest), cond); - } else { - ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond); - } - } -} - -void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest, - XRegister base, int32_t offset) { - switch (type) { - case kLoadSignedByte: - ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset)); - break; - case kLoadSignedHalfword: - ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset)); - break; - case kLoadUnsignedByte: - ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset)); - break; - case kLoadUnsignedHalfword: - ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset)); - break; - case kLoadWord: - ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset)); - break; - default: - LOG(FATAL) << "UNREACHABLE"; - } -} - -// Note: We can extend this member by adding load type info - see -// sign extended A64 load variants. -void Arm64Assembler::LoadFromOffset(XRegister dest, XRegister base, - int32_t offset) { - CHECK_NE(dest, SP); - ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::LoadSFromOffset(SRegister dest, XRegister base, - int32_t offset) { - ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::LoadDFromOffset(DRegister dest, XRegister base, - int32_t offset) { - ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset)); -} - -void Arm64Assembler::Load(Arm64ManagedRegister dest, XRegister base, - int32_t offset, size_t size) { - if (dest.IsNoRegister()) { - CHECK_EQ(0u, size) << dest; - } else if (dest.IsWRegister()) { - CHECK_EQ(4u, size) << dest; - ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset)); - } else if (dest.IsXRegister()) { - CHECK_NE(dest.AsXRegister(), SP) << dest; - if (size == 4u) { - ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset)); - } else { - CHECK_EQ(8u, size) << dest; - ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset)); - } - } else if (dest.IsSRegister()) { - ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset)); - } else { - CHECK(dest.IsDRegister()) << dest; - ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset)); - } -} - -void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { - return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); -} - -void Arm64Assembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset64 src, size_t size) { - return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); -} - -void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - CHECK(dst.IsXRegister()) << dst; - LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value()); -} - -void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, MemberOffset offs, - bool unpoison_reference) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - Arm64ManagedRegister base = m_base.AsArm64(); - CHECK(dst.IsXRegister() && base.IsXRegister()); - LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), - offs.Int32Value()); - if (unpoison_reference) { - WRegister ref_reg = dst.AsOverlappingWRegister(); - MaybeUnpoisonHeapReference(reg_w(ref_reg)); - } -} - void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); Arm64ManagedRegister base = m_base.AsArm64(); @@ -310,209 +59,6 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); } -void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - CHECK(dst.IsXRegister()) << dst; - LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value()); -} - -// Copying routines. -void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) { - Arm64ManagedRegister dst = m_dst.AsArm64(); - Arm64ManagedRegister src = m_src.AsArm64(); - if (!dst.Equals(src)) { - if (dst.IsXRegister()) { - if (size == 4) { - CHECK(src.IsWRegister()); - ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister())); - } else { - if (src.IsXRegister()) { - ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister())); - } else { - ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister())); - } - } - } else if (dst.IsWRegister()) { - CHECK(src.IsWRegister()) << src; - ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister())); - } else if (dst.IsSRegister()) { - CHECK(src.IsSRegister()) << src; - ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister())); - } else { - CHECK(dst.IsDRegister()) << dst; - CHECK(src.IsDRegister()) << src; - ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister())); - } - } -} - -void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset64 tr_offs, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); - StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); -} - -void Arm64Assembler::CopyRawPtrToThread(ThreadOffset64 tr_offs, - FrameOffset fr_offs, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); -} - -void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src, - ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), - SP, src.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), - SP, dest.Int32Value()); -} - -void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src, - ManagedRegister m_scratch, size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value()); - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value()); - StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, - ManagedRegister m_scratch, size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister base = src_base.AsArm64(); - CHECK(base.IsXRegister()) << base; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value()); - StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src, - ManagedRegister m_scratch, size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister base = m_dest_base.AsArm64(); - CHECK(base.IsXRegister()) << base; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(), - dest_offs.Int32Value()); - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value()); - StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/, - ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; -} - -void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset, - ManagedRegister m_src, Offset src_offset, - ManagedRegister m_scratch, size_t size) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64ManagedRegister src = m_src.AsArm64(); - Arm64ManagedRegister dest = m_dest.AsArm64(); - CHECK(dest.IsXRegister()) << dest; - CHECK(src.IsXRegister()) << src; - CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; - CHECK(size == 4 || size == 8) << size; - if (size == 4) { - if (scratch.IsWRegister()) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(), - dest_offset.Int32Value()); - } else { - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(), - src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(), - dest_offset.Int32Value()); - } - } else if (size == 8) { - LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value()); - StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value()); - } else { - UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; - } -} - -void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, - FrameOffset /*src*/, Offset /*src_offset*/, - ManagedRegister /*scratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; -} - -void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) { - // TODO: Should we check that m_scratch is IP? - see arm. - ___ Dmb(InnerShareable, BarrierAll); -} - -void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) { - Arm64ManagedRegister reg = mreg.AsArm64(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsWRegister()) << reg; - if (size == 1) { - ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); - } else { - ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); - } -} - -void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) { - Arm64ManagedRegister reg = mreg.AsArm64(); - CHECK(size == 1 || size == 2) << size; - CHECK(reg.IsWRegister()) << reg; - if (size == 1) { - ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); - } else { - ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); - } -} - -void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { - // TODO: not validating references. -} - -void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { - // TODO: not validating references. -} - -void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) { - Arm64ManagedRegister base = m_base.AsArm64(); - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(base.IsXRegister()) << base; - CHECK(scratch.IsXRegister()) << scratch; - LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value()); - ___ Blr(reg_x(scratch.AsXRegister())); -} - void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) { Arm64ManagedRegister base = m_base.AsArm64(); Arm64ManagedRegister scratch = m_scratch.AsArm64(); @@ -525,114 +71,6 @@ void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister ___ Br(reg_x(scratch.AsXRegister())); } -void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - // Call *(*(SP + base) + offset) - LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value()); - LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value()); - ___ Blr(reg_x(scratch.AsXRegister())); -} - -void Arm64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED, - ManagedRegister scratch ATTRIBUTE_UNUSED) { - UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; -} - -void Arm64Assembler::CreateHandleScopeEntry( - ManagedRegister m_out_reg, FrameOffset handle_scope_offs, ManagedRegister m_in_reg, - bool null_allowed) { - Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); - Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); - // For now we only hold stale handle scope entries in x registers. - CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg; - CHECK(out_reg.IsXRegister()) << out_reg; - if (null_allowed) { - // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is - // the address in the handle scope holding the reference. - // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) - if (in_reg.IsNoRegister()) { - LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP, - handle_scope_offs.Int32Value()); - in_reg = out_reg; - } - ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0); - if (!out_reg.Equals(in_reg)) { - LoadImmediate(out_reg.AsXRegister(), 0, eq); - } - AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne); - } else { - AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al); - } -} - -void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset, - ManagedRegister m_scratch, bool null_allowed) { - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsXRegister()) << scratch; - if (null_allowed) { - LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, - handle_scope_offset.Int32Value()); - // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is - // the address in the handle scope holding the reference. - // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset) - ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0); - // Move this logic in add constants with flags. - AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne); - } else { - AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al); - } - StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value()); -} - -void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, - ManagedRegister m_in_reg) { - Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); - Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); - CHECK(out_reg.IsXRegister()) << out_reg; - CHECK(in_reg.IsXRegister()) << in_reg; - vixl::aarch64::Label exit; - if (!out_reg.Equals(in_reg)) { - // FIXME: Who sets the flags here? - LoadImmediate(out_reg.AsXRegister(), 0, eq); - } - ___ Cbz(reg_x(in_reg.AsXRegister()), &exit); - LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0); - ___ Bind(&exit); -} - -void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { - CHECK_ALIGNED(stack_adjust, kStackAlignment); - Arm64ManagedRegister scratch = m_scratch.AsArm64(); - exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust)); - LoadFromOffset(scratch.AsXRegister(), - TR, - Thread::ExceptionOffset<kArm64PointerSize>().Int32Value()); - ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry()); -} - -void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { - UseScratchRegisterScope temps(&vixl_masm_); - temps.Exclude(reg_x(exception->scratch_.AsXRegister())); - Register temp = temps.AcquireX(); - - // Bind exception poll entry. - ___ Bind(exception->Entry()); - if (exception->stack_adjust_ != 0) { // Fix up the frame. - DecreaseFrameSize(exception->stack_adjust_); - } - // Pass exception object as argument. - // Don't care about preserving X0 as this won't return. - ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister())); - ___ Ldr(temp, - MEM_OP(reg_x(TR), - QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value())); - - ___ Blr(temp); - // Call should never return. - ___ Brk(); -} - static inline dwarf::Reg DWARFReg(CPURegister reg) { if (reg.IsFPRegister()) { return dwarf::Reg::Arm64Fp(reg.GetCode()); @@ -696,105 +134,6 @@ void Arm64Assembler::UnspillRegisters(CPURegList registers, int offset) { DCHECK(registers.IsEmpty()); } -void Arm64Assembler::BuildFrame(size_t frame_size, - ManagedRegister method_reg, - ArrayRef<const ManagedRegister> callee_save_regs, - const ManagedRegisterEntrySpills& entry_spills) { - // Setup VIXL CPURegList for callee-saves. - CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0); - CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0); - for (auto r : callee_save_regs) { - Arm64ManagedRegister reg = r.AsArm64(); - if (reg.IsXRegister()) { - core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode()); - } else { - DCHECK(reg.IsDRegister()); - fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode()); - } - } - size_t core_reg_size = core_reg_list.GetTotalSizeInBytes(); - size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes(); - - // Increase frame to required size. - DCHECK_ALIGNED(frame_size, kStackAlignment); - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); - IncreaseFrameSize(frame_size); - - // Save callee-saves. - SpillRegisters(core_reg_list, frame_size - core_reg_size); - SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); - - DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR))); - - // Write ArtMethod* - DCHECK(X0 == method_reg.AsArm64().AsXRegister()); - StoreToOffset(X0, SP, 0); - - // Write out entry spills - int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize); - for (size_t i = 0; i < entry_spills.size(); ++i) { - Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); - if (reg.IsNoRegister()) { - // only increment stack offset. - ManagedRegisterSpill spill = entry_spills.at(i); - offset += spill.getSize(); - } else if (reg.IsXRegister()) { - StoreToOffset(reg.AsXRegister(), SP, offset); - offset += 8; - } else if (reg.IsWRegister()) { - StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset); - offset += 4; - } else if (reg.IsDRegister()) { - StoreDToOffset(reg.AsDRegister(), SP, offset); - offset += 8; - } else if (reg.IsSRegister()) { - StoreSToOffset(reg.AsSRegister(), SP, offset); - offset += 4; - } - } -} - -void Arm64Assembler::RemoveFrame(size_t frame_size, - ArrayRef<const ManagedRegister> callee_save_regs) { - // Setup VIXL CPURegList for callee-saves. - CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0); - CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0); - for (auto r : callee_save_regs) { - Arm64ManagedRegister reg = r.AsArm64(); - if (reg.IsXRegister()) { - core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode()); - } else { - DCHECK(reg.IsDRegister()); - fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode()); - } - } - size_t core_reg_size = core_reg_list.GetTotalSizeInBytes(); - size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes(); - - // For now we only check that the size of the frame is large enough to hold spills and method - // reference. - DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); - DCHECK_ALIGNED(frame_size, kStackAlignment); - - DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR))); - - cfi_.RememberState(); - - // Restore callee-saves. - UnspillRegisters(core_reg_list, frame_size - core_reg_size); - UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); - - // Decrease frame size to start of callee saved regs. - DecreaseFrameSize(frame_size); - - // Pop callee saved and return to LR. - ___ Ret(); - - // The CFI should be restored for any code that follows the exit block. - cfi_.RestoreState(); - cfi_.DefCFAOffset(frame_size); -} - void Arm64Assembler::PoisonHeapReference(Register reg) { DCHECK(reg.IsW()); // reg = -reg. diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index d7084dad1c..4e88e640e5 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -22,11 +22,9 @@ #include <vector> #include "base/arena_containers.h" -#include "base/enums.h" #include "base/logging.h" #include "utils/arm64/managed_register_arm64.h" #include "utils/assembler.h" -#include "utils/jni_macro_assembler.h" #include "offsets.h" // TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn @@ -63,38 +61,14 @@ enum StoreOperandType { kStoreDWord }; -class Arm64Exception { - private: - Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) - : scratch_(scratch), stack_adjust_(stack_adjust) { - } - - vixl::aarch64::Label* Entry() { return &exception_entry_; } - - // Register used for passing Thread::Current()->exception_ . - const Arm64ManagedRegister scratch_; - - // Stack adjust for ExceptionPool. - const size_t stack_adjust_; - - vixl::aarch64::Label exception_entry_; - - friend class Arm64Assembler; - DISALLOW_COPY_AND_ASSIGN(Arm64Exception); -}; - -class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> { +class Arm64Assembler FINAL : public Assembler { public: - explicit Arm64Assembler(ArenaAllocator* arena) - : Assembler(arena), - exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {} + explicit Arm64Assembler(ArenaAllocator* arena) : Assembler(arena) {} virtual ~Arm64Assembler() {} vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; } - DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); } - // Finalize the code. void FinalizeCode() OVERRIDE; @@ -105,110 +79,14 @@ class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerS // Copy instructions out of assembly buffer into the given region of memory. void FinalizeInstructions(const MemoryRegion& region); + void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs); + void SpillRegisters(vixl::aarch64::CPURegList registers, int offset); void UnspillRegisters(vixl::aarch64::CPURegList registers, int offset); - // Emit code that will create an activation on the stack. - void BuildFrame(size_t frame_size, - ManagedRegister method_reg, - ArrayRef<const ManagedRegister> callee_save_regs, - const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; - - // Emit code that will remove an activation from the stack. - void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) - OVERRIDE; - - void IncreaseFrameSize(size_t adjust) OVERRIDE; - void DecreaseFrameSize(size_t adjust) OVERRIDE; - - // Store routines. - void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE; - void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; - void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; - void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread(ThreadOffset64 thr_offs, - FrameOffset fr_offs, - ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE; - void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, - ManagedRegister scratch) OVERRIDE; - - // Load routines. - void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; - void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; - void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs, - bool unpoison_reference) OVERRIDE; - void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; - - // Copying routines. - void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread(FrameOffset fr_offs, - ThreadOffset64 thr_offs, - ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) - OVERRIDE; - void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; - void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; - void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch, - size_t size) OVERRIDE; - void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch, - size_t size) OVERRIDE; - void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch, - size_t size) OVERRIDE; - void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset, - ManagedRegister scratch, size_t size) OVERRIDE; - void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset, - ManagedRegister scratch, size_t size) OVERRIDE; - void MemoryBarrier(ManagedRegister scratch) OVERRIDE; - - // Sign extension. - void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; - - // Zero extension. - void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; - - // Exploit fast access in managed code to Thread::Current(). - void GetCurrentThread(ManagedRegister tr) OVERRIDE; - void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE; - - // Set up out_reg to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. in_reg holds a possibly stale reference - // that can be used to avoid loading the handle scope entry to see if the value is - // null. - void CreateHandleScopeEntry(ManagedRegister out_reg, - FrameOffset handlescope_offset, - ManagedRegister in_reg, - bool null_allowed) OVERRIDE; - - // Set up out_off to hold a Object** into the handle scope, or to be null if the - // value is null and null_allowed. - void CreateHandleScopeEntry(FrameOffset out_off, - FrameOffset handlescope_offset, - ManagedRegister scratch, - bool null_allowed) OVERRIDE; - - // src holds a handle scope entry (Object**) load this into dst. - void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; - - // Heap::VerifyObject on src. In some cases (such as a reference to this) we - // know that src may not be null. - void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; - void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; - - // Call to address held at [base+offset]. - void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; - void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; - // Jump to address (not setting link register) void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch); - // Generate code to check if Thread::Current()->exception_ is non-null - // and branch to a ExceptionSlowPath if it is. - void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; - // // Heap poisoning. // @@ -227,7 +105,6 @@ class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerS UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM64"; } - private: static vixl::aarch64::Register reg_x(int code) { CHECK(code < kNumberOfXRegisters) << code; if (code == SP) { @@ -256,37 +133,7 @@ class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerS return vixl::aarch64::FPRegister::GetSRegFromCode(code); } - // Emits Exception block. - void EmitExceptionPoll(Arm64Exception *exception); - - void StoreWToOffset(StoreOperandType type, WRegister source, - XRegister base, int32_t offset); - void StoreToOffset(XRegister source, XRegister base, int32_t offset); - void StoreSToOffset(SRegister source, XRegister base, int32_t offset); - void StoreDToOffset(DRegister source, XRegister base, int32_t offset); - - void LoadImmediate(XRegister dest, - int32_t value, - vixl::aarch64::Condition cond = vixl::aarch64::al); - void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size); - void LoadWFromOffset(LoadOperandType type, - WRegister dest, - XRegister base, - int32_t offset); - void LoadFromOffset(XRegister dest, XRegister base, int32_t offset); - void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset); - void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset); - void AddConstant(XRegister rd, - int32_t value, - vixl::aarch64::Condition cond = vixl::aarch64::al); - void AddConstant(XRegister rd, - XRegister rn, - int32_t value, - vixl::aarch64::Condition cond = vixl::aarch64::al); - - // List of exception blocks to generate at the end of the code cache. - ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_; - + private: // VIXL assembler. vixl::aarch64::MacroAssembler vixl_masm_; diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc new file mode 100644 index 0000000000..dfdcd11893 --- /dev/null +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc @@ -0,0 +1,754 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "jni_macro_assembler_arm64.h" + +#include "base/logging.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "managed_register_arm64.h" +#include "offsets.h" +#include "thread.h" + +using namespace vixl::aarch64; // NOLINT(build/namespaces) + +namespace art { +namespace arm64 { + +#ifdef ___ +#error "ARM64 Assembler macro already defined." +#else +#define ___ asm_.GetVIXLAssembler()-> +#endif + +#define reg_x(X) Arm64Assembler::reg_x(X) +#define reg_w(W) Arm64Assembler::reg_w(W) +#define reg_d(D) Arm64Assembler::reg_d(D) +#define reg_s(S) Arm64Assembler::reg_s(S) + +Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() { +} + +void Arm64JNIMacroAssembler::FinalizeCode() { + for (const std::unique_ptr<Arm64Exception>& exception : exception_blocks_) { + EmitExceptionPoll(exception.get()); + } + ___ FinalizeCode(); +} + +void Arm64JNIMacroAssembler::GetCurrentThread(ManagedRegister tr) { + ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR)); +} + +void Arm64JNIMacroAssembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) { + StoreToOffset(TR, SP, offset.Int32Value()); +} + +// See Arm64 PCS Section 5.2.2.1. +void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, -adjust); + cfi().AdjustCFAOffset(adjust); +} + +// See Arm64 PCS Section 5.2.2.1. +void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, adjust); + cfi().AdjustCFAOffset(-adjust); +} + +void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) { + AddConstant(rd, rd, value, cond); +} + +void Arm64JNIMacroAssembler::AddConstant(XRegister rd, + XRegister rn, + int32_t value, + Condition cond) { + if ((cond == al) || (cond == nv)) { + // VIXL macro-assembler handles all variants. + ___ Add(reg_x(rd), reg_x(rn), value); + } else { + // temp = rd + value + // rd = cond ? temp : rn + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(reg_x(rd), reg_x(rn)); + Register temp = temps.AcquireX(); + ___ Add(temp, reg_x(rn), value); + ___ Csel(reg_x(rd), temp, reg_x(rd), cond); + } +} + +void Arm64JNIMacroAssembler::StoreWToOffset(StoreOperandType type, + WRegister source, + XRegister base, + int32_t offset) { + switch (type) { + case kStoreByte: + ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + case kStoreHalfword: + ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + case kStoreWord: + ___ Str(reg_w(source), MEM_OP(reg_x(base), offset)); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void Arm64JNIMacroAssembler::StoreToOffset(XRegister source, XRegister base, int32_t offset) { + CHECK_NE(source, SP); + ___ Str(reg_x(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::StoreSToOffset(SRegister source, XRegister base, int32_t offset) { + ___ Str(reg_s(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::StoreDToOffset(DRegister source, XRegister base, int32_t offset) { + ___ Str(reg_d(source), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) { + Arm64ManagedRegister src = m_src.AsArm64(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsWRegister()) { + CHECK_EQ(4u, size); + StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value()); + } else if (src.IsXRegister()) { + CHECK_EQ(8u, size); + StoreToOffset(src.AsXRegister(), SP, offs.Int32Value()); + } else if (src.IsSRegister()) { + StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value()); + } else { + CHECK(src.IsDRegister()) << src; + StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value()); + } +} + +void Arm64JNIMacroAssembler::StoreRef(FrameOffset offs, ManagedRegister m_src) { + Arm64ManagedRegister src = m_src.AsArm64(); + CHECK(src.IsXRegister()) << src; + StoreWToOffset(kStoreWord, src.AsOverlappingWRegister(), SP, + offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) { + Arm64ManagedRegister src = m_src.AsArm64(); + CHECK(src.IsXRegister()) << src; + StoreToOffset(src.AsXRegister(), SP, offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::StoreImmediateToFrame(FrameOffset offs, + uint32_t imm, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + LoadImmediate(scratch.AsXRegister(), imm); + StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, + offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + Register temp = temps.AcquireX(); + ___ Mov(temp, reg_x(SP)); + ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value())); +} + +void Arm64JNIMacroAssembler::StoreSpanning(FrameOffset dest_off, + ManagedRegister m_source, + FrameOffset in_off, + ManagedRegister m_scratch) { + Arm64ManagedRegister source = m_source.AsArm64(); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + StoreToOffset(source.AsXRegister(), SP, dest_off.Int32Value()); + LoadFromOffset(scratch.AsXRegister(), SP, in_off.Int32Value()); + StoreToOffset(scratch.AsXRegister(), SP, dest_off.Int32Value() + 8); +} + +// Load routines. +void Arm64JNIMacroAssembler::LoadImmediate(XRegister dest, int32_t value, Condition cond) { + if ((cond == al) || (cond == nv)) { + ___ Mov(reg_x(dest), value); + } else { + // temp = value + // rd = cond ? temp : rd + if (value != 0) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(reg_x(dest)); + Register temp = temps.AcquireX(); + ___ Mov(temp, value); + ___ Csel(reg_x(dest), temp, reg_x(dest), cond); + } else { + ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond); + } + } +} + +void Arm64JNIMacroAssembler::LoadWFromOffset(LoadOperandType type, + WRegister dest, + XRegister base, + int32_t offset) { + switch (type) { + case kLoadSignedByte: + ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadSignedHalfword: + ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadUnsignedByte: + ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadUnsignedHalfword: + ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + case kLoadWord: + ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset)); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +// Note: We can extend this member by adding load type info - see +// sign extended A64 load variants. +void Arm64JNIMacroAssembler::LoadFromOffset(XRegister dest, XRegister base, int32_t offset) { + CHECK_NE(dest, SP); + ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::LoadSFromOffset(SRegister dest, XRegister base, int32_t offset) { + ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::LoadDFromOffset(DRegister dest, XRegister base, int32_t offset) { + ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset)); +} + +void Arm64JNIMacroAssembler::Load(Arm64ManagedRegister dest, + XRegister base, + int32_t offset, + size_t size) { + if (dest.IsNoRegister()) { + CHECK_EQ(0u, size) << dest; + } else if (dest.IsWRegister()) { + CHECK_EQ(4u, size) << dest; + ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset)); + } else if (dest.IsXRegister()) { + CHECK_NE(dest.AsXRegister(), SP) << dest; + if (size == 4u) { + ___ Ldr(reg_w(dest.AsOverlappingWRegister()), MEM_OP(reg_x(base), offset)); + } else { + CHECK_EQ(8u, size) << dest; + ___ Ldr(reg_x(dest.AsXRegister()), MEM_OP(reg_x(base), offset)); + } + } else if (dest.IsSRegister()) { + ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset)); + } else { + CHECK(dest.IsDRegister()) << dest; + ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset)); + } +} + +void Arm64JNIMacroAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { + return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); +} + +void Arm64JNIMacroAssembler::LoadFromThread(ManagedRegister m_dst, + ThreadOffset64 src, + size_t size) { + return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); +} + +void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + CHECK(dst.IsXRegister()) << dst; + LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), SP, offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::LoadRef(ManagedRegister m_dst, + ManagedRegister m_base, + MemberOffset offs, + bool unpoison_reference) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister base = m_base.AsArm64(); + CHECK(dst.IsXRegister() && base.IsXRegister()); + LoadWFromOffset(kLoadWord, dst.AsOverlappingWRegister(), base.AsXRegister(), + offs.Int32Value()); + if (unpoison_reference) { + WRegister ref_reg = dst.AsOverlappingWRegister(); + asm_.MaybeUnpoisonHeapReference(reg_w(ref_reg)); + } +} + +void Arm64JNIMacroAssembler::LoadRawPtr(ManagedRegister m_dst, + ManagedRegister m_base, + Offset offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister base = m_base.AsArm64(); + CHECK(dst.IsXRegister() && base.IsXRegister()); + // Remove dst and base form the temp list - higher level API uses IP1, IP0. + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(reg_x(dst.AsXRegister()), reg_x(base.AsXRegister())); + ___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value())); +} + +void Arm64JNIMacroAssembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + CHECK(dst.IsXRegister()) << dst; + LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value()); +} + +// Copying routines. +void Arm64JNIMacroAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) { + Arm64ManagedRegister dst = m_dst.AsArm64(); + Arm64ManagedRegister src = m_src.AsArm64(); + if (!dst.Equals(src)) { + if (dst.IsXRegister()) { + if (size == 4) { + CHECK(src.IsWRegister()); + ___ Mov(reg_w(dst.AsOverlappingWRegister()), reg_w(src.AsWRegister())); + } else { + if (src.IsXRegister()) { + ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsXRegister())); + } else { + ___ Mov(reg_x(dst.AsXRegister()), reg_x(src.AsOverlappingXRegister())); + } + } + } else if (dst.IsWRegister()) { + CHECK(src.IsWRegister()) << src; + ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister())); + } else if (dst.IsSRegister()) { + CHECK(src.IsSRegister()) << src; + ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister())); + } else { + CHECK(dst.IsDRegister()) << dst; + CHECK(src.IsDRegister()) << src; + ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister())); + } + } +} + +void Arm64JNIMacroAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 tr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); + StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::CopyRawPtrToThread(ThreadOffset64 tr_offs, + FrameOffset fr_offs, + ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value()); +} + +void Arm64JNIMacroAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), + SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), + SP, dest.Int32Value()); +} + +void Arm64JNIMacroAssembler::Copy(FrameOffset dest, + FrameOffset src, + ManagedRegister m_scratch, + size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value()); + StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64JNIMacroAssembler::Copy(FrameOffset dest, + ManagedRegister src_base, + Offset src_offset, + ManagedRegister m_scratch, + size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister base = src_base.AsArm64(); + CHECK(base.IsXRegister()) << base; + CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsXRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), src_offset.Int32Value()); + StoreToOffset(scratch.AsXRegister(), SP, dest.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest_base, + Offset dest_offs, + FrameOffset src, + ManagedRegister m_scratch, + size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister base = m_dest_base.AsArm64(); + CHECK(base.IsXRegister()) << base; + CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsXRegister(), + dest_offs.Int32Value()); + } else if (size == 8) { + LoadFromOffset(scratch.AsXRegister(), SP, src.Int32Value()); + StoreToOffset(scratch.AsXRegister(), base.AsXRegister(), dest_offs.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/, + FrameOffset /*src_base*/, + Offset /*src_offset*/, + ManagedRegister /*mscratch*/, + size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; +} + +void Arm64JNIMacroAssembler::Copy(ManagedRegister m_dest, + Offset dest_offset, + ManagedRegister m_src, + Offset src_offset, + ManagedRegister m_scratch, + size_t size) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + Arm64ManagedRegister src = m_src.AsArm64(); + Arm64ManagedRegister dest = m_dest.AsArm64(); + CHECK(dest.IsXRegister()) << dest; + CHECK(src.IsXRegister()) << src; + CHECK(scratch.IsXRegister() || scratch.IsWRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + if (scratch.IsWRegister()) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsXRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsXRegister(), + dest_offset.Int32Value()); + } else { + LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), src.AsXRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingWRegister(), dest.AsXRegister(), + dest_offset.Int32Value()); + } + } else if (size == 8) { + LoadFromOffset(scratch.AsXRegister(), src.AsXRegister(), src_offset.Int32Value()); + StoreToOffset(scratch.AsXRegister(), dest.AsXRegister(), dest_offset.Int32Value()); + } else { + UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8"; + } +} + +void Arm64JNIMacroAssembler::Copy(FrameOffset /*dst*/, + Offset /*dest_offset*/, + FrameOffset /*src*/, + Offset /*src_offset*/, + ManagedRegister /*scratch*/, + size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant"; +} + +void Arm64JNIMacroAssembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) { + // TODO: Should we check that m_scratch is IP? - see arm. + ___ Dmb(InnerShareable, BarrierAll); +} + +void Arm64JNIMacroAssembler::SignExtend(ManagedRegister mreg, size_t size) { + Arm64ManagedRegister reg = mreg.AsArm64(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsWRegister()) << reg; + if (size == 1) { + ___ Sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } else { + ___ Sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } +} + +void Arm64JNIMacroAssembler::ZeroExtend(ManagedRegister mreg, size_t size) { + Arm64ManagedRegister reg = mreg.AsArm64(); + CHECK(size == 1 || size == 2) << size; + CHECK(reg.IsWRegister()) << reg; + if (size == 1) { + ___ Uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } else { + ___ Uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister())); + } +} + +void Arm64JNIMacroAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references. +} + +void Arm64JNIMacroAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references. +} + +void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) { + Arm64ManagedRegister base = m_base.AsArm64(); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(base.IsXRegister()) << base; + CHECK(scratch.IsXRegister()) << scratch; + LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value()); + ___ Blr(reg_x(scratch.AsXRegister())); +} + +void Arm64JNIMacroAssembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(scratch.AsXRegister(), SP, base.Int32Value()); + LoadFromOffset(scratch.AsXRegister(), scratch.AsXRegister(), offs.Int32Value()); + ___ Blr(reg_x(scratch.AsXRegister())); +} + +void Arm64JNIMacroAssembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED, + ManagedRegister scratch ATTRIBUTE_UNUSED) { + UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; +} + +void Arm64JNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, + FrameOffset handle_scope_offs, + ManagedRegister m_in_reg, + bool null_allowed) { + Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); + Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); + // For now we only hold stale handle scope entries in x registers. + CHECK(in_reg.IsNoRegister() || in_reg.IsXRegister()) << in_reg; + CHECK(out_reg.IsXRegister()) << out_reg; + if (null_allowed) { + // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is + // the address in the handle scope holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadWFromOffset(kLoadWord, out_reg.AsOverlappingWRegister(), SP, + handle_scope_offs.Int32Value()); + in_reg = out_reg; + } + ___ Cmp(reg_w(in_reg.AsOverlappingWRegister()), 0); + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsXRegister(), 0, eq); + } + AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), ne); + } else { + AddConstant(out_reg.AsXRegister(), SP, handle_scope_offs.Int32Value(), al); + } +} + +void Arm64JNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off, + FrameOffset handle_scope_offset, + ManagedRegister m_scratch, + bool null_allowed) { + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + CHECK(scratch.IsXRegister()) << scratch; + if (null_allowed) { + LoadWFromOffset(kLoadWord, scratch.AsOverlappingWRegister(), SP, + handle_scope_offset.Int32Value()); + // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is + // the address in the handle scope holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset) + ___ Cmp(reg_w(scratch.AsOverlappingWRegister()), 0); + // Move this logic in add constants with flags. + AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), ne); + } else { + AddConstant(scratch.AsXRegister(), SP, handle_scope_offset.Int32Value(), al); + } + StoreToOffset(scratch.AsXRegister(), SP, out_off.Int32Value()); +} + +void Arm64JNIMacroAssembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, + ManagedRegister m_in_reg) { + Arm64ManagedRegister out_reg = m_out_reg.AsArm64(); + Arm64ManagedRegister in_reg = m_in_reg.AsArm64(); + CHECK(out_reg.IsXRegister()) << out_reg; + CHECK(in_reg.IsXRegister()) << in_reg; + vixl::aarch64::Label exit; + if (!out_reg.Equals(in_reg)) { + // FIXME: Who sets the flags here? + LoadImmediate(out_reg.AsXRegister(), 0, eq); + } + ___ Cbz(reg_x(in_reg.AsXRegister()), &exit); + LoadFromOffset(out_reg.AsXRegister(), in_reg.AsXRegister(), 0); + ___ Bind(&exit); +} + +void Arm64JNIMacroAssembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { + CHECK_ALIGNED(stack_adjust, kStackAlignment); + Arm64ManagedRegister scratch = m_scratch.AsArm64(); + exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust)); + LoadFromOffset(scratch.AsXRegister(), + TR, + Thread::ExceptionOffset<kArm64PointerSize>().Int32Value()); + ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry()); +} + +void Arm64JNIMacroAssembler::EmitExceptionPoll(Arm64Exception *exception) { + UseScratchRegisterScope temps(asm_.GetVIXLAssembler()); + temps.Exclude(reg_x(exception->scratch_.AsXRegister())); + Register temp = temps.AcquireX(); + + // Bind exception poll entry. + ___ Bind(exception->Entry()); + if (exception->stack_adjust_ != 0) { // Fix up the frame. + DecreaseFrameSize(exception->stack_adjust_); + } + // Pass exception object as argument. + // Don't care about preserving X0 as this won't return. + ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister())); + ___ Ldr(temp, + MEM_OP(reg_x(TR), + QUICK_ENTRYPOINT_OFFSET(kArm64PointerSize, pDeliverException).Int32Value())); + + ___ Blr(temp); + // Call should never return. + ___ Brk(); +} + +void Arm64JNIMacroAssembler::BuildFrame(size_t frame_size, + ManagedRegister method_reg, + ArrayRef<const ManagedRegister> callee_save_regs, + const ManagedRegisterEntrySpills& entry_spills) { + // Setup VIXL CPURegList for callee-saves. + CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0); + CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0); + for (auto r : callee_save_regs) { + Arm64ManagedRegister reg = r.AsArm64(); + if (reg.IsXRegister()) { + core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode()); + } else { + DCHECK(reg.IsDRegister()); + fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode()); + } + } + size_t core_reg_size = core_reg_list.GetTotalSizeInBytes(); + size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes(); + + // Increase frame to required size. + DCHECK_ALIGNED(frame_size, kStackAlignment); + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); + IncreaseFrameSize(frame_size); + + // Save callee-saves. + asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size); + asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); + + DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR))); + + // Write ArtMethod* + DCHECK(X0 == method_reg.AsArm64().AsXRegister()); + StoreToOffset(X0, SP, 0); + + // Write out entry spills + int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize); + for (size_t i = 0; i < entry_spills.size(); ++i) { + Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); + if (reg.IsNoRegister()) { + // only increment stack offset. + ManagedRegisterSpill spill = entry_spills.at(i); + offset += spill.getSize(); + } else if (reg.IsXRegister()) { + StoreToOffset(reg.AsXRegister(), SP, offset); + offset += 8; + } else if (reg.IsWRegister()) { + StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset); + offset += 4; + } else if (reg.IsDRegister()) { + StoreDToOffset(reg.AsDRegister(), SP, offset); + offset += 8; + } else if (reg.IsSRegister()) { + StoreSToOffset(reg.AsSRegister(), SP, offset); + offset += 4; + } + } +} + +void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size, + ArrayRef<const ManagedRegister> callee_save_regs) { + // Setup VIXL CPURegList for callee-saves. + CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0); + CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0); + for (auto r : callee_save_regs) { + Arm64ManagedRegister reg = r.AsArm64(); + if (reg.IsXRegister()) { + core_reg_list.Combine(reg_x(reg.AsXRegister()).GetCode()); + } else { + DCHECK(reg.IsDRegister()); + fp_reg_list.Combine(reg_d(reg.AsDRegister()).GetCode()); + } + } + size_t core_reg_size = core_reg_list.GetTotalSizeInBytes(); + size_t fp_reg_size = fp_reg_list.GetTotalSizeInBytes(); + + // For now we only check that the size of the frame is large enough to hold spills and method + // reference. + DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize)); + DCHECK_ALIGNED(frame_size, kStackAlignment); + + DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR))); + + cfi().RememberState(); + + // Restore callee-saves. + asm_.UnspillRegisters(core_reg_list, frame_size - core_reg_size); + asm_.UnspillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size); + + // Decrease frame size to start of callee saved regs. + DecreaseFrameSize(frame_size); + + // Pop callee saved and return to LR. + ___ Ret(); + + // The CFI should be restored for any code that follows the exit block. + cfi().RestoreState(); + cfi().DefCFAOffset(frame_size); +} + +#undef ___ + +} // namespace arm64 +} // namespace art diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h new file mode 100644 index 0000000000..79ee441144 --- /dev/null +++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h @@ -0,0 +1,230 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_ +#define ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_ + +#include <stdint.h> +#include <memory> +#include <vector> + +#include "assembler_arm64.h" +#include "base/arena_containers.h" +#include "base/enums.h" +#include "base/logging.h" +#include "utils/assembler.h" +#include "utils/jni_macro_assembler.h" +#include "offsets.h" + +// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunknown-pragmas" +#pragma GCC diagnostic ignored "-Wshadow" +#pragma GCC diagnostic ignored "-Wmissing-noreturn" +#include "a64/macro-assembler-a64.h" +#pragma GCC diagnostic pop + +namespace art { +namespace arm64 { + +class Arm64JNIMacroAssembler FINAL : public JNIMacroAssemblerFwd<Arm64Assembler, PointerSize::k64> { + public: + explicit Arm64JNIMacroAssembler(ArenaAllocator* arena) + : JNIMacroAssemblerFwd(arena), + exception_blocks_(arena->Adapter(kArenaAllocAssembler)) {} + + ~Arm64JNIMacroAssembler(); + + // Finalize the code. + void FinalizeCode() OVERRIDE; + + // Emit code that will create an activation on the stack. + void BuildFrame(size_t frame_size, + ManagedRegister method_reg, + ArrayRef<const ManagedRegister> callee_save_regs, + const ManagedRegisterEntrySpills& entry_spills) OVERRIDE; + + // Emit code that will remove an activation from the stack. + void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) + OVERRIDE; + + void IncreaseFrameSize(size_t adjust) OVERRIDE; + void DecreaseFrameSize(size_t adjust) OVERRIDE; + + // Store routines. + void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE; + void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; + void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; + void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; + void StoreStackOffsetToThread(ThreadOffset64 thr_offs, + FrameOffset fr_offs, + ManagedRegister scratch) OVERRIDE; + void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE; + void StoreSpanning(FrameOffset dest, + ManagedRegister src, + FrameOffset in_off, + ManagedRegister scratch) OVERRIDE; + + // Load routines. + void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; + void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE; + void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; + void LoadRef(ManagedRegister dest, + ManagedRegister base, + MemberOffset offs, + bool unpoison_reference) OVERRIDE; + void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; + void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE; + + // Copying routines. + void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; + void CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset64 thr_offs, + ManagedRegister scratch) OVERRIDE; + void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + OVERRIDE; + void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; + void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; + void Copy(FrameOffset dest, + ManagedRegister src_base, + Offset src_offset, + ManagedRegister scratch, + size_t size) OVERRIDE; + void Copy(ManagedRegister dest_base, + Offset dest_offset, + FrameOffset src, + ManagedRegister scratch, + size_t size) OVERRIDE; + void Copy(FrameOffset dest, + FrameOffset src_base, + Offset src_offset, + ManagedRegister scratch, + size_t size) OVERRIDE; + void Copy(ManagedRegister dest, + Offset dest_offset, + ManagedRegister src, + Offset src_offset, + ManagedRegister scratch, + size_t size) OVERRIDE; + void Copy(FrameOffset dest, + Offset dest_offset, + FrameOffset src, + Offset src_offset, + ManagedRegister scratch, + size_t size) OVERRIDE; + void MemoryBarrier(ManagedRegister scratch) OVERRIDE; + + // Sign extension. + void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE; + + // Zero extension. + void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE; + + // Exploit fast access in managed code to Thread::Current(). + void GetCurrentThread(ManagedRegister tr) OVERRIDE; + void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE; + + // Set up out_reg to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. in_reg holds a possibly stale reference + // that can be used to avoid loading the handle scope entry to see if the value is + // null. + void CreateHandleScopeEntry(ManagedRegister out_reg, + FrameOffset handlescope_offset, + ManagedRegister in_reg, + bool null_allowed) OVERRIDE; + + // Set up out_off to hold a Object** into the handle scope, or to be null if the + // value is null and null_allowed. + void CreateHandleScopeEntry(FrameOffset out_off, + FrameOffset handlescope_offset, + ManagedRegister scratch, + bool null_allowed) OVERRIDE; + + // src holds a handle scope entry (Object**) load this into dst. + void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) OVERRIDE; + + // Heap::VerifyObject on src. In some cases (such as a reference to this) we + // know that src may not be null. + void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE; + void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE; + + // Call to address held at [base+offset]. + void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; + void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE; + + // Generate code to check if Thread::Current()->exception_ is non-null + // and branch to a ExceptionSlowPath if it is. + void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE; + + private: + class Arm64Exception { + public: + Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) {} + + vixl::aarch64::Label* Entry() { return &exception_entry_; } + + // Register used for passing Thread::Current()->exception_ . + const Arm64ManagedRegister scratch_; + + // Stack adjust for ExceptionPool. + const size_t stack_adjust_; + + vixl::aarch64::Label exception_entry_; + + private: + DISALLOW_COPY_AND_ASSIGN(Arm64Exception); + }; + + // Emits Exception block. + void EmitExceptionPoll(Arm64Exception *exception); + + void StoreWToOffset(StoreOperandType type, + WRegister source, + XRegister base, + int32_t offset); + void StoreToOffset(XRegister source, XRegister base, int32_t offset); + void StoreSToOffset(SRegister source, XRegister base, int32_t offset); + void StoreDToOffset(DRegister source, XRegister base, int32_t offset); + + void LoadImmediate(XRegister dest, + int32_t value, + vixl::aarch64::Condition cond = vixl::aarch64::al); + void Load(Arm64ManagedRegister dst, XRegister src, int32_t src_offset, size_t size); + void LoadWFromOffset(LoadOperandType type, + WRegister dest, + XRegister base, + int32_t offset); + void LoadFromOffset(XRegister dest, XRegister base, int32_t offset); + void LoadSFromOffset(SRegister dest, XRegister base, int32_t offset); + void LoadDFromOffset(DRegister dest, XRegister base, int32_t offset); + void AddConstant(XRegister rd, + int32_t value, + vixl::aarch64::Condition cond = vixl::aarch64::al); + void AddConstant(XRegister rd, + XRegister rn, + int32_t value, + vixl::aarch64::Condition cond = vixl::aarch64::al); + + // List of exception blocks to generate at the end of the code cache. + ArenaVector<std::unique_ptr<Arm64Exception>> exception_blocks_; +}; + +} // namespace arm64 +} // namespace art + +#endif // ART_COMPILER_UTILS_ARM64_JNI_MACRO_ASSEMBLER_ARM64_H_ |