diff options
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 138 | ||||
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.h | 23 | ||||
| -rw-r--r-- | compiler/utils/arm64/constants_arm64.h | 3 | ||||
| -rw-r--r-- | compiler/utils/managed_register.h | 8 |
4 files changed, 116 insertions, 56 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index a11c2da19e..1d87eaaa60 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -50,11 +50,11 @@ void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) { } void Arm64Assembler::GetCurrentThread(ManagedRegister tr) { - ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR)); + ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(TR1)); } void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) { - StoreToOffset(TR, SP, offset.Int32Value()); + StoreToOffset(TR1, SP, offset.Int32Value()); } // See Arm64 PCS Section 5.2.2.1. @@ -138,7 +138,8 @@ void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) { Arm64ManagedRegister src = m_src.AsArm64(); CHECK(src.IsCoreRegister()) << src; - StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value()); + StoreWToOffset(kStoreWord, src.AsOverlappingCoreRegisterLow(), SP, + offs.Int32Value()); } void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) { @@ -152,30 +153,31 @@ void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm, Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(scratch.AsCoreRegister(), SP, offs.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP, + offs.Int32Value()); } -void Arm64Assembler::StoreImmediateToThread32(ThreadOffset<4> offs, uint32_t imm, +void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; LoadImmediate(scratch.AsCoreRegister(), imm); - StoreToOffset(scratch.AsCoreRegister(), TR, offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), TR1, offs.Int32Value()); } -void Arm64Assembler::StoreStackOffsetToThread32(ThreadOffset<4> tr_offs, +void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs, FrameOffset fr_offs, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value()); } -void Arm64Assembler::StoreStackPointerToThread32(ThreadOffset<4> tr_offs) { +void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) { // Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg. ___ Mov(reg_x(IP1), reg_x(SP)); - StoreToOffset(IP1, TR, tr_offs.Int32Value()); + StoreToOffset(IP1, TR1, tr_offs.Int32Value()); } void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source, @@ -254,9 +256,13 @@ void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base, CHECK_EQ(4u, size) << dest; ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset)); } else if (dest.IsCoreRegister()) { - CHECK_EQ(8u, size) << dest; CHECK_NE(dest.AsCoreRegister(), SP) << dest; - ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset)); + if (size == 4u) { + ___ Ldr(reg_w(dest.AsOverlappingCoreRegisterLow()), MEM_OP(reg_x(base), offset)); + } else { + CHECK_EQ(8u, size) << dest; + ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset)); + } } else if (dest.IsSRegister()) { ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset)); } else { @@ -269,14 +275,14 @@ void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) { return Load(m_dst.AsArm64(), SP, src.Int32Value(), size); } -void Arm64Assembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) { - return Load(m_dst.AsArm64(), TR, src.Int32Value(), size); +void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) { + return Load(m_dst.AsArm64(), TR1, src.Int32Value(), size); } void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); CHECK(dst.IsCoreRegister()) << dst; - LoadFromOffset(dst.AsCoreRegister(), SP, offs.Int32Value()); + LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), SP, offs.Int32Value()); } void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, @@ -284,7 +290,8 @@ void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base, Arm64ManagedRegister dst = m_dst.AsArm64(); Arm64ManagedRegister base = m_base.AsArm64(); CHECK(dst.IsCoreRegister() && base.IsCoreRegister()); - LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value()); + LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), base.AsCoreRegister(), + offs.Int32Value()); } void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) { @@ -294,10 +301,10 @@ void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, O LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value()); } -void Arm64Assembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) { +void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) { Arm64ManagedRegister dst = m_dst.AsArm64(); CHECK(dst.IsCoreRegister()) << dst; - LoadFromOffset(dst.AsCoreRegister(), TR, offs.Int32Value()); + LoadFromOffset(dst.AsCoreRegister(), TR1, offs.Int32Value()); } // Copying routines. @@ -306,8 +313,16 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s Arm64ManagedRegister src = m_src.AsArm64(); if (!dst.Equals(src)) { if (dst.IsCoreRegister()) { - CHECK(src.IsCoreRegister()) << src; - ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister())); + if (size == 4) { + CHECK(src.IsWRegister()); + ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister())); + } else { + if (src.IsCoreRegister()) { + ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister())); + } else { + ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister())); + } + } } else if (dst.IsWRegister()) { CHECK(src.IsWRegister()) << src; ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister())); @@ -322,40 +337,42 @@ void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t s } } -void Arm64Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, - ThreadOffset<4> tr_offs, +void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, + ThreadOffset<8> tr_offs, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); + LoadFromOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value()); StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); } -void Arm64Assembler::CopyRawPtrToThread32(ThreadOffset<4> tr_offs, +void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs, FrameOffset fr_offs, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); - StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value()); + StoreToOffset(scratch.AsCoreRegister(), TR1, tr_offs.Int32Value()); } void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; - LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value()); - StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value()); + LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), + SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), + SP, dest.Int32Value()); } void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister m_scratch, size_t size) { Arm64ManagedRegister scratch = m_scratch.AsArm64(); - CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; + CHECK(scratch.IsCoreRegister()) << scratch; CHECK(size == 4 || size == 8) << size; if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value()); + LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, src.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP, dest.Int32Value()); } else if (size == 8) { LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value()); StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value()); @@ -418,10 +435,17 @@ void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset, CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch; CHECK(size == 4 || size == 8) << size; if (size == 4) { - LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(), + if (scratch.IsWRegister()) { + LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(), src_offset.Int32Value()); - StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(), + StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(), dest_offset.Int32Value()); + } else { + LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), src.AsCoreRegister(), + src_offset.Int32Value()); + StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), dest.AsCoreRegister(), + dest_offset.Int32Value()); + } } else if (size == 8) { LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value()); StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value()); @@ -486,7 +510,7 @@ void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scrat ___ Blr(reg_x(scratch.AsCoreRegister())); } -void Arm64Assembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) { +void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) { UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant"; } @@ -502,10 +526,11 @@ void Arm64Assembler::CreateSirtEntry(ManagedRegister m_out_reg, FrameOffset sirt // the address in the SIRT holding the reference. // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) if (in_reg.IsNoRegister()) { - LoadFromOffset(out_reg.AsCoreRegister(), SP, sirt_offs.Int32Value()); + LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP, + sirt_offs.Int32Value()); in_reg = out_reg; } - ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0); + ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0); if (!out_reg.Equals(in_reg)) { LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); } @@ -520,11 +545,12 @@ void Arm64Assembler::CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offse Arm64ManagedRegister scratch = m_scratch.AsArm64(); CHECK(scratch.IsCoreRegister()) << scratch; if (null_allowed) { - LoadFromOffset(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, + sirt_offset.Int32Value()); // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is // the address in the SIRT holding the reference. // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) - ___ Cmp(reg_x(scratch.AsCoreRegister()), 0); + ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0); // Move this logic in add constants with flags. AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE); } else { @@ -555,7 +581,7 @@ void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjus Arm64ManagedRegister scratch = m_scratch.AsArm64(); Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust); exception_blocks_.push_back(current_exception); - LoadFromOffset(scratch.AsCoreRegister(), TR, Thread::ExceptionOffset<4>().Int32Value()); + LoadFromOffset(scratch.AsCoreRegister(), TR1, Thread::ExceptionOffset<8>().Int32Value()); ___ Cmp(reg_x(scratch.AsCoreRegister()), 0); ___ B(current_exception->Entry(), COND_OP(NE)); } @@ -569,7 +595,11 @@ void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { // Pass exception object as argument. // Don't care about preserving X0 as this won't return. ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister())); - LoadFromOffset(IP1, TR, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()); + LoadFromOffset(IP1, TR1, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()); + + // FIXME: Temporary fix for TR (XSELF). + ___ Mov(reg_x(TR), reg_x(TR1)); + ___ Blr(reg_x(IP1)); // Call should never return. ___ Brk(); @@ -590,6 +620,9 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, CHECK_EQ(callee_save_regs.size(), kCalleeSavedRegsSize); ___ PushCalleeSavedRegisters(); + // FIXME: Temporary fix for TR (XSELF). + ___ Mov(reg_x(TR1), reg_x(TR)); + // Increate frame to required size - must be at least space to push Method*. CHECK_GT(frame_size, kCalleeSavedRegsSize * kFramePointerSize); size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize); @@ -598,11 +631,27 @@ void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, // Write Method*. StoreToOffset(X0, SP, 0); - // Write out entry spills, treated as X regs. - // TODO: we can implement a %2 STRP variant of StoreToOffset. + // Write out entry spills + int32_t offset = frame_size + kFramePointerSize; for (size_t i = 0; i < entry_spills.size(); ++i) { - Register reg = entry_spills.at(i).AsArm64().AsCoreRegister(); - StoreToOffset(reg, SP, frame_size + kFramePointerSize + (i * kFramePointerSize)); + Arm64ManagedRegister reg = entry_spills.at(i).AsArm64(); + if (reg.IsNoRegister()) { + // only increment stack offset. + ManagedRegisterSpill spill = entry_spills.at(i); + offset += spill.getSize(); + } else if (reg.IsCoreRegister()) { + StoreToOffset(reg.AsCoreRegister(), SP, offset); + offset += 8; + } else if (reg.IsWRegister()) { + StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset); + offset += 4; + } else if (reg.IsDRegister()) { + StoreDToOffset(reg.AsDRegister(), SP, offset); + offset += 8; + } else if (reg.IsSRegister()) { + StoreSToOffset(reg.AsSRegister(), SP, offset); + offset += 4; + } } } @@ -618,6 +667,9 @@ void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedReg size_t adjust = frame_size - (kCalleeSavedRegsSize * kFramePointerSize); DecreaseFrameSize(adjust); + // FIXME: Temporary fix for TR (XSELF). + ___ Mov(reg_x(TR), reg_x(TR1)); + // Pop callee saved and return to LR. ___ PopCalleeSavedRegisters(); ___ Ret(); diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 8acd1f9950..97fb93af82 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -81,8 +81,8 @@ class Arm64Exception; class Arm64Assembler FINAL : public Assembler { public: - Arm64Assembler() : vixl_buf_(new byte[BUF_SIZE]), - vixl_masm_(new vixl::MacroAssembler(vixl_buf_, BUF_SIZE)) {} + Arm64Assembler() : vixl_buf_(new byte[kBufferSizeArm64]), + vixl_masm_(new vixl::MacroAssembler(vixl_buf_, kBufferSizeArm64)) {} virtual ~Arm64Assembler() { delete[] vixl_buf_; @@ -114,27 +114,27 @@ class Arm64Assembler FINAL : public Assembler { void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE; void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch) + void StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm, ManagedRegister scratch) OVERRIDE; - void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, + void StoreStackOffsetToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; - void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE; + void StoreStackPointerToThread64(ThreadOffset<8> thr_offs) OVERRIDE; void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off, ManagedRegister scratch) OVERRIDE; // Load routines. void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE; - void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE; + void LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) OVERRIDE; void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE; void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE; void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE; - void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE; + void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) OVERRIDE; // Copying routines. void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE; - void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs, + void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs, ManagedRegister scratch) OVERRIDE; - void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) + void CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs, ManagedRegister scratch) OVERRIDE; void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE; void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE; @@ -183,7 +183,7 @@ class Arm64Assembler FINAL : public Assembler { // Call to address held at [base+offset]. void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE; void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE; - void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE; + void CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) OVERRIDE; // Jump to address (not setting link register) void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch); @@ -234,9 +234,6 @@ class Arm64Assembler FINAL : public Assembler { void AddConstant(Register rd, int32_t value, Condition cond = AL); void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL); - // Vixl buffer size. - static constexpr size_t BUF_SIZE = 4096; - // Vixl buffer. byte* vixl_buf_; diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h index ecf9fbe1d9..2a08c95654 100644 --- a/compiler/utils/arm64/constants_arm64.h +++ b/compiler/utils/arm64/constants_arm64.h @@ -31,6 +31,9 @@ namespace arm64 { constexpr unsigned int kCalleeSavedRegsSize = 20; +// Vixl buffer size. +constexpr size_t kBufferSizeArm64 = 4096*2; + } // arm64 } // art diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h index 0d31322db6..bfb2829a32 100644 --- a/compiler/utils/managed_register.h +++ b/compiler/utils/managed_register.h @@ -91,6 +91,9 @@ class ManagedRegisterSpill : public ManagedRegister { explicit ManagedRegisterSpill(const ManagedRegister& other) : ManagedRegister(other), size_(-1), spill_offset_(-1) { } + explicit ManagedRegisterSpill(const ManagedRegister& other, int32_t size) + : ManagedRegister(other), size_(size), spill_offset_(-1) { } + int32_t getSpillOffset() { return spill_offset_; } @@ -113,6 +116,11 @@ class ManagedRegisterEntrySpills : public std::vector<ManagedRegisterSpill> { std::vector<ManagedRegisterSpill>::push_back(spill); } + void push_back(ManagedRegister __x, int32_t __size) { + ManagedRegisterSpill spill(__x, __size); + std::vector<ManagedRegisterSpill>::push_back(spill); + } + void push_back(ManagedRegisterSpill __x) { std::vector<ManagedRegisterSpill>::push_back(__x); } |