diff options
Diffstat (limited to 'compiler/utils')
| -rw-r--r-- | compiler/utils/arm64/assembler_arm64.cc | 6 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64.cc | 44 | ||||
| -rw-r--r-- | compiler/utils/x86_64/assembler_x86_64_test.cc | 8 |
3 files changed, 42 insertions, 16 deletions
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index 009b227209..5b97ba0a02 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -595,8 +595,7 @@ void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, // FIXME: Who sets the flags here? LoadImmediate(out_reg.AsCoreRegister(), 0, EQ); } - ___ Cmp(reg_x(in_reg.AsCoreRegister()), 0); - ___ B(&exit, COND_OP(EQ)); + ___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit); LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0); ___ Bind(&exit); } @@ -607,8 +606,7 @@ void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjus Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust); exception_blocks_.push_back(current_exception); LoadFromOffset(scratch.AsCoreRegister(), ETR, Thread::ExceptionOffset<8>().Int32Value()); - ___ Cmp(reg_x(scratch.AsCoreRegister()), 0); - ___ B(current_exception->Entry(), COND_OP(NE)); + ___ Cbnz(reg_x(scratch.AsCoreRegister()), current_exception->Entry()); } void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 4d5d613015..78738d8934 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1671,16 +1671,31 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, const std::vector<ManagedRegister>& spill_regs, const ManagedRegisterEntrySpills& entry_spills) { CHECK_ALIGNED(frame_size, kStackAlignment); + int gpr_count = 0; for (int i = spill_regs.size() - 1; i >= 0; --i) { - pushq(spill_regs.at(i).AsX86_64().AsCpuRegister()); + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsCpuRegister()) { + pushq(spill.AsCpuRegister()); + gpr_count++; + } } // return address then method on stack - addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(frame_size) + (spill_regs.size() * kFramePointerSize) + - sizeof(StackReference<mirror::ArtMethod>) /*method*/ + - kFramePointerSize /*return address*/)); + int64_t rest_of_frame = static_cast<int64_t>(frame_size) + - (gpr_count * kFramePointerSize) + - kFramePointerSize /*return address*/; + subq(CpuRegister(RSP), Immediate(rest_of_frame)); + // spill xmms + int64_t offset = rest_of_frame; + for (int i = spill_regs.size() - 1; i >= 0; --i) { + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsXmmRegister()) { + offset -= sizeof(double); + movsd(Address(CpuRegister(RSP), offset), spill.AsXmmRegister()); + } + } DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>)); - subq(CpuRegister(RSP), Immediate(4)); + movl(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister()); for (size_t i = 0; i < entry_spills.size(); ++i) { @@ -1707,9 +1722,24 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, void X86_64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& spill_regs) { CHECK_ALIGNED(frame_size, kStackAlignment); - addq(CpuRegister(RSP), Immediate(static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - kFramePointerSize)); + int gpr_count = 0; + // unspill xmms + int64_t offset = static_cast<int64_t>(frame_size) - (spill_regs.size() * kFramePointerSize) - 2 * kFramePointerSize; for (size_t i = 0; i < spill_regs.size(); ++i) { - popq(spill_regs.at(i).AsX86_64().AsCpuRegister()); + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsXmmRegister()) { + offset += sizeof(double); + movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset)); + } else { + gpr_count++; + } + } + addq(CpuRegister(RSP), Immediate(static_cast<int64_t>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize)); + for (size_t i = 0; i < spill_regs.size(); ++i) { + x86_64::X86_64ManagedRegister spill = spill_regs.at(i).AsX86_64(); + if (spill.IsCpuRegister()) { + popq(spill.AsCpuRegister()); + } } ret(); } diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index f7bad8b057..dc1758ffdf 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -246,11 +246,9 @@ std::string buildframe_test_fn(x86_64::X86_64Assembler* assembler) { str << "pushq %rsi\n"; str << "pushq %r10\n"; // 2) Move down the stack pointer. - ssize_t displacement = -static_cast<ssize_t>(frame_size) + spill_regs.size() * 8 + - sizeof(StackReference<mirror::ArtMethod>) + 8; - str << "addq $" << displacement << ", %rsp\n"; - // 3) Make space for method reference, and store it. - str << "subq $4, %rsp\n"; + ssize_t displacement = static_cast<ssize_t>(frame_size) - (spill_regs.size() * 8 + 8); + str << "subq $" << displacement << ", %rsp\n"; + // 3) Store method reference. str << "movl %edi, (%rsp)\n"; // 4) Entry spills. str << "movq %rax, " << frame_size + 0 << "(%rsp)\n"; |