From 166db04e259ca51838c311891598664deeed85ad Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 26 Jul 2013 12:05:57 -0700 Subject: Move assembler out of runtime into compiler/utils. Other directory layout bits of clean up. There is still work to separate quick and portable in some files (e.g. argument visitor, proxy..). Change-Id: If8fecffda8ba5c4c47a035f0c622c538c6b58351 --- compiler/utils/mips/assembler_mips.cc | 999 ++++++++++++++++++++++++++++++++++ 1 file changed, 999 insertions(+) create mode 100644 compiler/utils/mips/assembler_mips.cc (limited to 'compiler/utils/mips/assembler_mips.cc') diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc new file mode 100644 index 0000000000..58815da1b8 --- /dev/null +++ b/compiler/utils/mips/assembler_mips.cc @@ -0,0 +1,999 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_mips.h" + +#include "base/casts.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "memory_region.h" +#include "thread.h" + +namespace art { +namespace mips { +#if 0 +class DirectCallRelocation : public AssemblerFixup { + public: + void Process(const MemoryRegion& region, int position) { + // Direct calls are relative to the following instruction on mips. + int32_t pointer = region.Load(position); + int32_t start = reinterpret_cast(region.start()); + int32_t delta = start + position + sizeof(int32_t); + region.Store(position, pointer - delta); + } +}; +#endif + +std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { + if (rhs >= D0 && rhs < kNumberOfDRegisters) { + os << "d" << static_cast(rhs); + } else { + os << "DRegister[" << static_cast(rhs) << "]"; + } + return os; +} + +void MipsAssembler::Emit(int32_t value) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + buffer_.Emit(value); +} + +void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rd, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + static_cast(rd) << kRdShift | + shamt << kShamtShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitJ(int opcode, int address) { + int32_t encoding = opcode << kOpcodeShift | + address; + Emit(encoding); +} + +void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) { + CHECK_NE(ft, kNoFRegister); + CHECK_NE(fs, kNoFRegister); + CHECK_NE(fd, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(ft) << kFtShift | + static_cast(fs) << kFsShift | + static_cast(fd) << kFdShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) { + CHECK_NE(rt, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the branch instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (equal) { + Beq(rt, rs, (offset >> 2) & kBranchOffsetMask); + } else { + Bne(rt, rs, (offset >> 2) & kBranchOffsetMask); + } +} + +void MipsAssembler::EmitJump(Label* label, bool link) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the jump instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (link) { + Jal((offset >> 2) & kJumpOffsetMask); + } else { + J((offset >> 2) & kJumpOffsetMask); + } +} + +int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { + CHECK_ALIGNED(offset, 4); + CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + + // Properly preserve only the bits supported in the instruction. + offset >>= 2; + if (is_jump) { + offset &= kJumpOffsetMask; + return (inst & ~kJumpOffsetMask) | offset; + } else { + offset &= kBranchOffsetMask; + return (inst & ~kBranchOffsetMask) | offset; + } +} + +int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) { + // Sign-extend, then left-shift by 2. + if (is_jump) { + return (((inst & kJumpOffsetMask) << 6) >> 4); + } else { + return (((inst & kBranchOffsetMask) << 16) >> 14); + } +} + +void MipsAssembler::Bind(Label* label, bool is_jump) { + CHECK(!label->IsBound()); + int bound_pc = buffer_.Size(); + while (label->IsLinked()) { + int32_t position = label->Position(); + int32_t next = buffer_.Load(position); + int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4; + int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump); + buffer_.Store(position, encoded); + label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump); + } + label->BindTo(bound_pc); +} + +void MipsAssembler::Add(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x20); +} + +void MipsAssembler::Addu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x21); +} + +void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) { + EmitI(0x8, rs, rt, imm16); +} + +void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x9, rs, rt, imm16); +} + +void MipsAssembler::Sub(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x22); +} + +void MipsAssembler::Subu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x23); +} + +void MipsAssembler::Mult(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x18); +} + +void MipsAssembler::Multu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x19); +} + +void MipsAssembler::Div(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1a); +} + +void MipsAssembler::Divu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1b); +} + +void MipsAssembler::And(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x24); +} + +void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) { + EmitI(0xc, rs, rt, imm16); +} + +void MipsAssembler::Or(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x25); +} + +void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xd, rs, rt, imm16); +} + +void MipsAssembler::Xor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x26); +} + +void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xe, rs, rt, imm16); +} + +void MipsAssembler::Nor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x27); +} + +void MipsAssembler::Sll(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x00); +} + +void MipsAssembler::Srl(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x02); +} + +void MipsAssembler::Sra(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x03); +} + +void MipsAssembler::Sllv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x04); +} + +void MipsAssembler::Srlv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x06); +} + +void MipsAssembler::Srav(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x07); +} + +void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x20, rs, rt, imm16); +} + +void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x21, rs, rt, imm16); +} + +void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x23, rs, rt, imm16); +} + +void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x24, rs, rt, imm16); +} + +void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x25, rs, rt, imm16); +} + +void MipsAssembler::Lui(Register rt, uint16_t imm16) { + EmitI(0xf, static_cast(0), rt, imm16); +} + +void MipsAssembler::Mfhi(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x10); +} + +void MipsAssembler::Mflo(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x12); +} + +void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x28, rs, rt, imm16); +} + +void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x29, rs, rt, imm16); +} + +void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x2b, rs, rt, imm16); +} + +void MipsAssembler::Slt(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2a); +} + +void MipsAssembler::Sltu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2b); +} + +void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) { + EmitI(0xa, rs, rt, imm16); +} + +void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0xb, rs, rt, imm16); +} + +void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) { + EmitI(0x4, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) { + EmitI(0x5, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::J(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jal(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jr(Register rs) { + EmitR(0, rs, static_cast(0), static_cast(0), 0, 0x08); + Nop(); +} + +void MipsAssembler::Jalr(Register rs) { + EmitR(0, rs, static_cast(0), RA, 0, 0x09); + Nop(); +} + +void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x0); +} + +void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x1); +} + +void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x2); +} + +void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x3); +} + +void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x0); +} + +void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x1); +} + +void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x2); +} + +void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x3); +} + +void MipsAssembler::MovS(FRegister fd, FRegister fs) { + EmitFR(0x11, 0x10, static_cast(0), fs, fd, 0x6); +} + +void MipsAssembler::MovD(DRegister fd, DRegister fs) { + EmitFR(0x11, 0x11, static_cast(0), static_cast(fs), + static_cast(fd), 0x6); +} + +void MipsAssembler::Mfc1(Register rt, FRegister fs) { + EmitFR(0x11, 0x00, static_cast(rt), fs, static_cast(0), 0x0); +} + +void MipsAssembler::Mtc1(FRegister ft, Register rs) { + EmitFR(0x11, 0x04, ft, static_cast(rs), static_cast(0), 0x0); +} + +void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x31, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x35, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x39, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x3d, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Break() { + EmitR(0, static_cast(0), static_cast(0), + static_cast(0), 0, 0xD); +} + +void MipsAssembler::Nop() { + EmitR(0x0, static_cast(0), static_cast(0), static_cast(0), 0, 0x0); +} + +void MipsAssembler::Move(Register rt, Register rs) { + EmitI(0x8, rs, rt, 0); +} + +void MipsAssembler::Clear(Register rt) { + EmitR(0, static_cast(0), static_cast(0), rt, 0, 0x20); +} + +void MipsAssembler::Not(Register rt, Register rs) { + EmitR(0, static_cast(0), rs, rt, 0, 0x27); +} + +void MipsAssembler::Mul(Register rd, Register rs, Register rt) { + Mult(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Div(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Rem(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mfhi(rd); +} + +void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) { + Addi(rt, rs, value); +} + +void MipsAssembler::LoadImmediate(Register rt, int32_t value) { + Addi(rt, ZERO, value); +} + +void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, + size_t size) { + MipsManagedRegister dst = m_dst.AsMips(); + if (dst.IsNoRegister()) { + CHECK_EQ(0u, size) << dst; + } else if (dst.IsCoreRegister()) { + CHECK_EQ(4u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); + } else if (dst.IsRegisterPair()) { + CHECK_EQ(8u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); + LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); + } else if (dst.IsFRegister()) { + LoadSFromOffset(dst.AsFRegister(), src_register, src_offset); + } else { + CHECK(dst.IsDRegister()) << dst; + LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); + } +} + +void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kLoadSignedByte: + Lb(reg, base, offset); + break; + case kLoadUnsignedByte: + Lbu(reg, base, offset); + break; + case kLoadSignedHalfword: + Lh(reg, base, offset); + break; + case kLoadUnsignedHalfword: + Lhu(reg, base, offset); + break; + case kLoadWord: + Lw(reg, base, offset); + break; + case kLoadWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) { + Lwc1(reg, base, offset); +} + +void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) { + Ldc1(reg, base, offset); +} + +void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kStoreByte: + Sb(reg, base, offset); + break; + case kStoreHalfword: + Sh(reg, base, offset); + break; + case kStoreWord: + Sw(reg, base, offset); + break; + case kStoreWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) { + Swc1(reg, base, offset); +} + +void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) { + Sdc1(reg, base, offset); +} + +void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Increase frame to required size. + IncreaseFrameSize(frame_size); + + // Push callee saves and return address + int stack_offset = frame_size - kPointerSize; + StoreToOffset(kStoreWord, RA, SP, stack_offset); + for (int i = callee_save_regs.size() - 1; i >= 0; --i) { + stack_offset -= kPointerSize; + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, stack_offset); + } + + // Write out Method*. + StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0); + + // Write out entry spills. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } +} + +void MipsAssembler::RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Pop callee saves and return address + int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize; + for (size_t i = 0; i < callee_save_regs.size(); ++i) { + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, reg, SP, stack_offset); + stack_offset += kPointerSize; + } + LoadFromOffset(kLoadWord, RA, SP, stack_offset); + + // Decrease frame to required size. + DecreaseFrameSize(frame_size); + + // Then jump to the return address. + Jr(RA); +} + +void MipsAssembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, -adjust); +} + +void MipsAssembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, adjust); +} + +void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { + MipsManagedRegister src = msrc.AsMips(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCoreRegister()) { + CHECK_EQ(4u, size); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); + StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), + SP, dest.Int32Value() + 4); + } else if (src.IsFRegister()) { + StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value()); + } else { + CHECK(src.IsDRegister()); + StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); + } +} + +void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); +} + +void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { + StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, + FrameOffset in_off, ManagedRegister mscratch) { + MipsManagedRegister src = msrc.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); +} + +void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { + return EmitLoad(mdest, SP, src.Int32Value(), size); +} + +void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { + return EmitLoad(mdest, S1, src.Int32Value(), size); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value()); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest; + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, + ThreadOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); +} + +void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips"; +} + +void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips"; +} + +void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) { + MipsManagedRegister dest = mdest.AsMips(); + MipsManagedRegister src = msrc.AsMips(); + if (!dest.Equals(src)) { + if (dest.IsCoreRegister()) { + CHECK(src.IsCoreRegister()) << src; + Move(dest.AsCoreRegister(), src.AsCoreRegister()); + } else if (dest.IsFRegister()) { + CHECK(src.IsFRegister()) << src; + MovS(dest.AsFRegister(), src.AsFRegister()); + } else if (dest.IsDRegister()) { + CHECK(src.IsDRegister()) << src; + MovD(dest.AsDRegister(), src.AsDRegister()); + } else { + CHECK(dest.IsRegisterPair()) << dest; + CHECK(src.IsRegisterPair()) << src; + // Ensure that the first move doesn't clobber the input of the second + if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) { + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + } else { + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + } + } + } +} + +void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset thr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); +} + +void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch, size_t size) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); + } +} + +void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); +} + +void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + movl(scratch, Address(ESP, src_base)); + movl(scratch, Address(scratch, src_offset)); + movl(Address(ESP, dest), scratch); +#endif +} + +void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister mscratch, size_t size) { + CHECK_EQ(size, 4u); + Register scratch = mscratch.AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + CHECK_EQ(dest.Int32Value(), src.Int32Value()); + movl(scratch, Address(ESP, src)); + pushl(Address(scratch, src_offset)); + popl(Address(scratch, dest_offset)); +#endif +} + +void MipsAssembler::MemoryBarrier(ManagedRegister) { + UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED"; +#if 0 +#if ANDROID_SMP != 0 + mfence(); +#endif +#endif +} + +void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg, + FrameOffset sirt_offset, + ManagedRegister min_reg, bool null_allowed) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; + CHECK(out_reg.IsCoreRegister()) << out_reg; + if (null_allowed) { + Label null_arg; + // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is + // the address in the SIRT holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + SP, sirt_offset.Int32Value()); + in_reg = out_reg; + } + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } +} + +void MipsAssembler::CreateSirtEntry(FrameOffset out_off, + FrameOffset sirt_offset, + ManagedRegister mscratch, + bool null_allowed) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + if (null_allowed) { + Label null_arg; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, + sirt_offset.Int32Value()); + // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is + // the address in the SIRT holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) + EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); +} + +// Given a SIRT entry, load the associated reference. +void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, + ManagedRegister min_reg) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(out_reg.IsCoreRegister()) << out_reg; + CHECK(in_reg.IsCoreRegister()) << in_reg; + Label null_arg; + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + in_reg.AsCoreRegister(), 0); + Bind(&null_arg, false); +} + +void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister base = mbase.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + base.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, base.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + scratch.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + fs()->call(Address::Absolute(offset)); +#endif +} + +void MipsAssembler::GetCurrentThread(ManagedRegister tr) { + Move(tr.AsMips().AsCoreRegister(), S1); +} + +void MipsAssembler::GetCurrentThread(FrameOffset offset, + ManagedRegister /*mscratch*/) { + StoreToOffset(kStoreWord, S1, SP, offset.Int32Value()); +} + +void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { + MipsManagedRegister scratch = mscratch.AsMips(); + MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust); + buffer_.EnqueueSlowPath(slow); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, Thread::ExceptionOffset().Int32Value()); + EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false); +} + +void MipsExceptionSlowPath::Emit(Assembler* sasm) { + MipsAssembler* sp_asm = down_cast(sasm); +#define __ sp_asm-> + __ Bind(&entry_, false); + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } + // Pass exception object as argument + // Don't care about preserving A0 as this call won't return + __ Move(A0, scratch_.AsCoreRegister()); + // Set up call to Thread::Current()->pDeliverException + __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException)); + __ Jr(T9); + // Call never returns + __ Break(); +#undef __ +} + +} // namespace mips +} // namespace art -- cgit v1.2.3-59-g8ed1b From 7655f29fabc0a12765de828914a18314382e5a35 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 29 Jul 2013 11:07:13 -0700 Subject: Portable refactorings. Separate quick from portable entrypoints. Move architectural dependencies into arch. Change-Id: I9adbc0a9782e2959fdc3308215f01e3107632b7c --- build/Android.gtest.mk | 2 +- compiler/dex/quick/arm/call_arm.cc | 6 +- compiler/dex/quick/arm/fp_arm.cc | 16 +- compiler/dex/quick/arm/int_arm.cc | 4 +- compiler/dex/quick/gen_common.cc | 101 +-- compiler/dex/quick/gen_invoke.cc | 36 +- compiler/dex/quick/mips/call_mips.cc | 6 +- compiler/dex/quick/mips/fp_mips.cc | 26 +- compiler/dex/quick/mips/int_mips.cc | 2 +- compiler/dex/quick/x86/call_x86.cc | 6 +- compiler/dex/quick/x86/fp_x86.cc | 14 +- compiler/dex/quick/x86/int_x86.cc | 2 +- compiler/jni/quick/jni_compiler.cc | 12 +- compiler/stubs/portable/stubs.cc | 7 +- compiler/stubs/quick/stubs.cc | 18 +- compiler/utils/arm/assembler_arm.cc | 2 +- compiler/utils/mips/assembler_mips.cc | 2 +- compiler/utils/x86/assembler_x86.cc | 2 +- runtime/Android.mk | 62 +- runtime/arch/arm/asm_support_arm.S | 38 + runtime/arch/arm/asm_support_arm.h | 31 + runtime/arch/arm/entrypoints_init_arm.cc | 241 ++++++ runtime/arch/arm/jni_entrypoints_arm.S | 65 ++ runtime/arch/arm/portable_entrypoints_arm.S | 96 +++ runtime/arch/arm/quick_entrypoints_arm.S | 127 +-- runtime/arch/arm/quick_entrypoints_init_arm.cc | 237 ------ runtime/arch/arm/thread_arm.cc | 29 + runtime/arch/mips/asm_support_mips.S | 41 + runtime/arch/mips/asm_support_mips.h | 31 + runtime/arch/mips/entrypoints_init_mips.cc | 242 ++++++ runtime/arch/mips/jni_entrypoints_mips.S | 89 ++ runtime/arch/mips/portable_entrypoints_mips.S | 73 ++ runtime/arch/mips/quick_entrypoints_init_mips.cc | 238 ------ runtime/arch/mips/quick_entrypoints_mips.S | 115 +-- runtime/arch/mips/thread_mips.cc | 29 + runtime/arch/x86/asm_support_x86.S | 91 ++ runtime/arch/x86/asm_support_x86.h | 27 + runtime/arch/x86/entrypoints_init_x86.cc | 224 +++++ runtime/arch/x86/jni_entrypoints_x86.S | 35 + runtime/arch/x86/portable_entrypoints_x86.S | 109 +++ runtime/arch/x86/quick_entrypoints_init_x86.cc | 221 ----- runtime/arch/x86/quick_entrypoints_x86.S | 172 +--- runtime/arch/x86/thread_x86.cc | 139 +++ runtime/asm_support.h | 25 - runtime/class_linker.cc | 5 +- runtime/class_linker_test.cc | 2 +- runtime/common_test.h | 2 +- runtime/entrypoints/entrypoint_utils.cc | 407 +++++++++ runtime/entrypoints/entrypoint_utils.h | 412 +++++++++ runtime/entrypoints/jni/jni_entrypoints.cc | 46 + runtime/entrypoints/math_entrypoints.cc | 89 ++ runtime/entrypoints/math_entrypoints.h | 29 + runtime/entrypoints/math_entrypoints_test.cc | 74 ++ .../portable/portable_alloc_entrypoints.cc | 69 ++ .../portable/portable_argument_visitor.h | 136 +++ .../portable/portable_cast_entrypoints.cc | 57 ++ .../portable/portable_dexcache_entrypoints.cc | 53 ++ .../entrypoints/portable/portable_entrypoints.h | 44 + .../portable/portable_field_entrypoints.cc | 241 ++++++ .../portable/portable_fillarray_entrypoints.cc | 50 ++ .../portable/portable_invoke_entrypoints.cc | 104 +++ .../portable/portable_jni_entrypoints.cc | 98 +++ .../portable/portable_lock_entrypoints.cc | 38 + .../portable/portable_proxy_entrypoints.cc | 109 +++ .../portable/portable_stub_entrypoints.cc | 145 ++++ .../portable/portable_thread_entrypoints.cc | 99 +++ .../portable/portable_throw_entrypoints.cc | 123 +++ .../entrypoints/quick/quick_alloc_entrypoints.cc | 2 +- runtime/entrypoints/quick/quick_argument_visitor.h | 110 --- .../entrypoints/quick/quick_cast_entrypoints.cc | 2 +- .../quick/quick_dexcache_entrypoints.cc | 4 +- runtime/entrypoints/quick/quick_entrypoints.h | 15 +- .../entrypoints/quick/quick_field_entrypoints.cc | 2 +- .../quick/quick_fillarray_entrypoints.cc | 2 +- .../entrypoints/quick/quick_invoke_entrypoints.cc | 2 +- runtime/entrypoints/quick/quick_jni_entrypoints.cc | 2 +- .../entrypoints/quick/quick_proxy_entrypoints.cc | 86 +- .../entrypoints/quick/quick_stub_entrypoints.cc | 143 ---- .../entrypoints/quick/quick_thread_entrypoints.cc | 2 +- .../entrypoints/quick/quick_throw_entrypoints.cc | 2 +- runtime/interpreter/interpreter.cc | 2 +- runtime/mirror/abstract_method-inl.h | 2 +- runtime/mirror/object_test.cc | 2 +- runtime/runtime_support.cc | 475 ----------- runtime/runtime_support.h | 419 ---------- runtime/runtime_support_llvm.cc | 930 --------------------- runtime/runtime_support_llvm.h | 27 - runtime/runtime_support_test.cc | 74 -- runtime/thread.cc | 177 ++-- runtime/thread.h | 8 +- runtime/thread_arm.cc | 29 - runtime/thread_mips.cc | 29 - runtime/thread_x86.cc | 139 --- 93 files changed, 4346 insertions(+), 3864 deletions(-) create mode 100644 runtime/arch/arm/asm_support_arm.S create mode 100644 runtime/arch/arm/asm_support_arm.h create mode 100644 runtime/arch/arm/entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/jni_entrypoints_arm.S create mode 100644 runtime/arch/arm/portable_entrypoints_arm.S delete mode 100644 runtime/arch/arm/quick_entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/thread_arm.cc create mode 100644 runtime/arch/mips/asm_support_mips.S create mode 100644 runtime/arch/mips/asm_support_mips.h create mode 100644 runtime/arch/mips/entrypoints_init_mips.cc create mode 100644 runtime/arch/mips/jni_entrypoints_mips.S create mode 100644 runtime/arch/mips/portable_entrypoints_mips.S delete mode 100644 runtime/arch/mips/quick_entrypoints_init_mips.cc create mode 100644 runtime/arch/mips/thread_mips.cc create mode 100644 runtime/arch/x86/asm_support_x86.S create mode 100644 runtime/arch/x86/asm_support_x86.h create mode 100644 runtime/arch/x86/entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/jni_entrypoints_x86.S create mode 100644 runtime/arch/x86/portable_entrypoints_x86.S delete mode 100644 runtime/arch/x86/quick_entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/thread_x86.cc create mode 100644 runtime/entrypoints/entrypoint_utils.cc create mode 100644 runtime/entrypoints/entrypoint_utils.h create mode 100644 runtime/entrypoints/jni/jni_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.h create mode 100644 runtime/entrypoints/math_entrypoints_test.cc create mode 100644 runtime/entrypoints/portable/portable_alloc_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_argument_visitor.h create mode 100644 runtime/entrypoints/portable/portable_cast_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_dexcache_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_entrypoints.h create mode 100644 runtime/entrypoints/portable/portable_field_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_fillarray_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_invoke_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_jni_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_lock_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_proxy_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_stub_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_thread_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_throw_entrypoints.cc delete mode 100644 runtime/runtime_support.cc delete mode 100644 runtime/runtime_support.h delete mode 100644 runtime/runtime_support_llvm.cc delete mode 100644 runtime/runtime_support_llvm.h delete mode 100644 runtime/runtime_support_test.cc delete mode 100644 runtime/thread_arm.cc delete mode 100644 runtime/thread_mips.cc delete mode 100644 runtime/thread_x86.cc (limited to 'compiler/utils/mips/assembler_mips.cc') diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 4648d44899..b9ebd83555 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -34,6 +34,7 @@ TEST_COMMON_SRC_FILES := \ runtime/dex_file_test.cc \ runtime/dex_instruction_visitor_test.cc \ runtime/dex_method_iterator_test.cc \ + runtime/entrypoints/math_entrypoints_test.cc \ runtime/exception_test.cc \ runtime/gc/accounting/space_bitmap_test.cc \ runtime/gc/heap_test.cc \ @@ -50,7 +51,6 @@ TEST_COMMON_SRC_FILES := \ runtime/oat_test.cc \ runtime/output_stream_test.cc \ runtime/reference_table_test.cc \ - runtime/runtime_support_test.cc \ runtime/runtime_test.cc \ runtime/thread_pool_test.cc \ runtime/utils_test.cc \ diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 7c3ec14981..745e43dc38 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -432,7 +432,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, r0); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rARM_LR); // Materialize a pointer to the fill data image NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast(tab_rec)); @@ -488,7 +488,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { OpRegImm(kOpCmp, r1, 0); OpIT(kCondNe, "T"); // Go expensive route - artLockObjectFromCode(self, obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); @@ -519,7 +519,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpIT(kCondEq, "EE"); StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3); // Go expensive route - UnlockObjectFromCode(obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 1bb08c45e3..08d6778129 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -49,7 +49,8 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -91,7 +92,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -140,16 +142,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, op = kThumb2VcvtDI; break; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -315,7 +317,7 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) { branch = NewLIR2(kThumbBCond, 0, kArmCondEq); ClobberCalleeSave(); LockCallTemps(); // Using fixed registers - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt)); NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg)); NewLIR1(kThumbBlxR, r_tgt); NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 4bb507b9ea..9db1016efa 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -665,7 +665,7 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, */ RegLocation rl_result; if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) { - int func_offset = ENTRYPOINT_OFFSET(pLmul); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); FlushAllRegs(); CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); rl_result = GetReturnWide(false); @@ -956,7 +956,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 8934340d48..ebe10bb57e 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -211,9 +211,9 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode); } else { - func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); + func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); RegLocation rl_result = GetReturn(false); @@ -233,9 +233,9 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); FreeTemp(TargetReg(kArg2)); @@ -375,7 +375,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); LoadConstant(TargetReg(kArg0), ssb_index); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -408,9 +408,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do FreeTemp(rBase); } else { FlushAllRegs(); // Everything to home locations - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic) - : ENTRYPOINT_OFFSET(pSet32Static)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true); } } @@ -455,7 +455,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, // or NULL if not initialized. Check for NULL and call helper if NULL. // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -483,9 +483,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } else { FlushAllRegs(); // Everything to home locations - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic) - : ENTRYPOINT_OFFSET(pGet32Static)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); CallRuntimeHelperImm(getterOffset, field_idx, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -499,7 +499,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); - int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode); + int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode); for (int i = 0; i < num_elems; i++) { ResetRegPool(); ResetDefTracking(); @@ -545,7 +545,7 @@ void Mir2Lir::HandleThrowLaunchPads() { bool target_x86 = (cu_->instruction_set == kX86); switch (lab->operands[0]) { case kThrowNullPointer: - func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); break; case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. @@ -557,7 +557,7 @@ void Mir2Lir::HandleThrowLaunchPads() { // Make sure the following LoadConstant doesn't mess with kArg1. LockTemp(TargetReg(kArg1)); LoadConstant(TargetReg(kArg0), v2); - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowArrayBounds: // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 @@ -590,18 +590,18 @@ void Mir2Lir::HandleThrowLaunchPads() { OpRegCopy(TargetReg(kArg0), v1); } } - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowDivZero: - func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); break; case kThrowNoSuchMethod: OpRegCopy(TargetReg(kArg0), v1); func_offset = - ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); + QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); break; case kThrowStackOverflow: - func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); // Restore stack alignment if (target_x86) { OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); @@ -664,9 +664,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, StoreValue(rl_dest, rl_result); } } else { - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance) - : ENTRYPOINT_OFFSET(pGet32Instance)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -719,9 +719,9 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } } else { - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance) - : ENTRYPOINT_OFFSET(pSet32Instance)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true); } } @@ -735,7 +735,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -764,7 +764,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // TUNING: move slow path to end & remove unconditional branch LIR* target1 = NewLIR0(kPseudoTargetLabel); // Call out to helper, which will return resolved type in kArg0 - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -797,7 +797,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { LoadWordDisp(TargetReg(kArg2), mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 - int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode)); + int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode)); LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); LoadConstant(TargetReg(kArg1), string_idx); if (cu_->instruction_set == kThumb2) { @@ -821,7 +821,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { branch->target = target; } else { DCHECK_EQ(cu_->instruction_set, kX86); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), + TargetReg(kArg1), true); } GenBarrier(); StoreValue(rl_dest, GetReturn(false)); @@ -847,9 +848,9 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { int func_offset; if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks( cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); } CallRuntimeHelperImmMethod(func_offset, type_idx, true); RegLocation rl_result = GetReturn(false); @@ -858,7 +859,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { void Mir2Lir::GenThrow(RegLocation rl_src) { FlushAllRegs(); - CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); + CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); } // For final classes there are no sub-classes to check and so we can answer the instance-of @@ -928,7 +929,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref @@ -950,7 +951,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); // Not resolved // Call out to helper, which will return resolved type in kRet0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ // Rejoin code paths @@ -985,7 +986,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } } else { if (cu_->instruction_set == kThumb2) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); if (!type_known_abstract) { /* Uses conditional nullification */ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? @@ -1002,13 +1003,13 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } if (cu_->instruction_set != kX86) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } else { OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); } } } @@ -1068,7 +1069,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 // InitializeTypeAndVerifyAccess(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path } else if (use_declaring_class) { @@ -1088,8 +1089,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Not resolved // Call out to helper, which will return resolved type in kArg0 // InitializeTypeFromCode(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1), - true); + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); @@ -1108,8 +1109,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (!type_known_abstract) { branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL); } - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), - true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), + TargetReg(kArg2), true); /* branch target here */ LIR* target = NewLIR0(kPseudoTargetLabel); branch1->target = target; @@ -1172,15 +1173,15 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::SHL_LONG: case Instruction::SHL_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShlLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); break; case Instruction::SHR_LONG: case Instruction::SHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); break; case Instruction::USHR_LONG: case Instruction::USHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pUshrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); break; default: LOG(FATAL) << "Unexpected case"; @@ -1302,7 +1303,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); } else { - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); FlushAllRegs(); /* Send everything to home location */ LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); int r_tgt = CallHelperSetup(func_offset); @@ -1557,7 +1558,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, TargetReg(kArg0)); Clobber(TargetReg(kArg0)); - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); if (is_div) rl_result = GetReturn(false); @@ -1634,7 +1635,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } else { call_out = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLmul); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); } break; case Instruction::DIV_LONG: @@ -1642,13 +1643,13 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, call_out = true; check_zero = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLdiv); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); break; case Instruction::REM_LONG: case Instruction::REM_LONG_2ADDR: call_out = true; check_zero = true; - func_offset = ENTRYPOINT_OFFSET(pLdivmod); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); break; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 91f250075a..1b34e99a72 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -471,7 +471,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, direct_method = 0; } int trampoline = (cu->instruction_set == kX86) ? 0 - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); if (direct_method != 0) { switch (state) { @@ -555,7 +555,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -563,7 +563,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -571,7 +571,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,7 +579,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -589,7 +589,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -773,14 +773,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { if (info->num_arg_words >= 20) { // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { // Use vldm/vstm pair using kArg3 as a temp @@ -1047,7 +1047,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } else { LoadValueDirectFixed(rl_start, reg_start); } - int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0; + int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0; GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags); LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); intrinsic_launchpads_.Insert(launch_pad); @@ -1056,7 +1056,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf)); } LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); launch_pad->operands[2] = reinterpret_cast(resume_tgt); @@ -1084,7 +1084,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { LoadValueDirectFixed(rl_this, reg_this); LoadValueDirectFixed(rl_cmp, reg_cmp); int r_tgt = (cu_->instruction_set != kX86) ? - LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; + LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags); // TUNING: check if rl_cmp.s_reg_low is already null checked LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); @@ -1094,7 +1094,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)); } launch_pad->operands[2] = 0; // No return possible // Record that we've already inlined & null checked @@ -1409,20 +1409,20 @@ void Mir2Lir::GenInvoke(CallInfo* info) { int trampoline = 0; switch (info->type) { case kInterface: - trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); break; case kDirect: - trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); break; case kStatic: - trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); break; case kSuper: - trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); break; case kVirtual: - trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); break; default: LOG(FATAL) << "Unexpected invoke type"; diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index b6c200ca98..846c055ac2 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -247,7 +247,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { GenBarrier(); NewLIR0(kMipsCurrPC); // Really a jal to .+8 // Now, fill the branch delay slot with the helper load - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); GenBarrier(); // Scheduling barrier // Construct BaseLabel and set up table base register @@ -272,7 +272,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - artLockObjectFromCode(self, obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); @@ -287,7 +287,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - UnlockObjectFromCode(obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 620527e35b..320301726b 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -50,7 +50,8 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -92,7 +93,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -133,22 +135,22 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, op = kMipsFcvtdw; break; case Instruction::FLOAT_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); return; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -178,18 +180,18 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::CMPL_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmplFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat); wide = false; break; case Instruction::CMPG_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmpgFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat); wide = false; break; case Instruction::CMPL_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmplDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble); break; case Instruction::CMPG_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmpgDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble); break; default: LOG(FATAL) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 7c8214b927..bd044c66bd 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -579,7 +579,7 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d530a1c644..1c395def55 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -148,7 +148,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { NewLIR1(kX86StartOfMethod, rX86_ARG2); NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast(tab_rec)); NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, rX86_ARG1, true); } @@ -165,7 +165,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX); LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); // If lock is held, go the expensive route - artLockObjectFromCode(self, obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); branch->target = NewLIR0(kPseudoTargetLabel); } @@ -185,7 +185,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LIR* branch2 = NewLIR1(kX86Jmp8, 0); branch->target = NewLIR0(kPseudoTargetLabel); // Otherwise, go the expensive route - UnlockObjectFromCode(obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); branch2->target = NewLIR0(kPseudoTargetLabel); } diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index cc6f374488..f736b5e28f 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -49,7 +49,8 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -99,7 +100,8 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -196,17 +198,17 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, return; } case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers. - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(INFO) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 3be24df565..0b4b4be04e 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -532,7 +532,7 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index aeadb54a22..b069fbd4a1 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -172,8 +172,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. - uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodStart); + uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); FrameOffset locked_object_sirt_offset(0); if (is_synchronized) { @@ -304,13 +304,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, uintptr_t jni_end; if (reference_return) { // Pass result. - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEndWithReference); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference); SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); end_jni_conv->Next(); } else { - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEnd); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd); } // Pass saved local reference state. if (end_jni_conv->IsCurrentParamOnStack()) { diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc index cee68478a4..def43e2bd2 100644 --- a/compiler/stubs/portable/stubs.cc +++ b/compiler/stubs/portable/stubs.cc @@ -34,7 +34,8 @@ const std::vector* CreatePortableResolutionTrampoline() { RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr __ IncreaseFrameSize(12); // 3 words of space for alignment @@ -69,7 +70,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ StoreToOffset(kStoreWord, A0, SP, 0); __ LoadFromOffset(kLoadWord, T9, S1, - ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*) @@ -112,7 +113,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ pushl(ECX); // pass receiver __ pushl(EAX); // pass called // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), + __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ leave(); diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc index 598481f3f7..912f1c0746 100644 --- a/compiler/stubs/quick/stubs.cc +++ b/compiler/stubs/quick/stubs.cc @@ -46,7 +46,7 @@ const std::vector* CreateQuickResolutionTrampoline() { // TODO: enable when GetCalleeSaveMethod is available at stub generation time // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask()); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ IncreaseFrameSize(8); // 2 words of space for alignment __ mov(R2, ShifterOperand(SP)); // Pass SP @@ -71,7 +71,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -85,7 +85,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -123,7 +123,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ StoreToOffset(kStoreWord, A2, SP, 8); __ StoreToOffset(kStoreWord, A1, SP, 4); - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*) @@ -161,7 +161,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -176,7 +176,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -208,7 +208,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ pushl(EAX); // pass Method* // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), + __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ movl(EDI, EAX); // save code pointer in EDI @@ -236,7 +236,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); @@ -249,7 +249,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 0778cd3bbc..fa202c3017 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -1884,7 +1884,7 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving R0 as this call won't return __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); __ blx(R12); // Call never returns __ bkpt(0); diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 58815da1b8..931d7ab0f7 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -988,7 +988,7 @@ void MipsExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving A0 as this call won't return __ Move(A0, scratch_.AsCoreRegister()); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); __ Jr(T9); // Call never returns __ Break(); diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 89bfeb5917..9095180246 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1837,7 +1837,7 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { } // Pass exception as argument in EAX __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); - __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException))); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException))); // this call should never return __ int3(); #undef __ diff --git a/runtime/Android.mk b/runtime/Android.mk index bc6a2ed2f8..c686128418 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -118,8 +118,6 @@ LIBART_COMMON_SRC_FILES := \ reference_table.cc \ reflection.cc \ runtime.cc \ - runtime_support.cc \ - runtime_support_llvm.cc \ signal_catcher.cc \ stack.cc \ thread.cc \ @@ -143,6 +141,21 @@ LIBART_COMMON_SRC_FILES += \ arch/arm/registers_arm.cc \ arch/x86/registers_x86.cc \ arch/mips/registers_mips.cc \ + entrypoints/entrypoint_utils.cc \ + entrypoints/jni/jni_entrypoints.cc \ + entrypoints/math_entrypoints.cc \ + entrypoints/portable/portable_alloc_entrypoints.cc \ + entrypoints/portable/portable_cast_entrypoints.cc \ + entrypoints/portable/portable_dexcache_entrypoints.cc \ + entrypoints/portable/portable_field_entrypoints.cc \ + entrypoints/portable/portable_fillarray_entrypoints.cc \ + entrypoints/portable/portable_invoke_entrypoints.cc \ + entrypoints/portable/portable_jni_entrypoints.cc \ + entrypoints/portable/portable_lock_entrypoints.cc \ + entrypoints/portable/portable_proxy_entrypoints.cc \ + entrypoints/portable/portable_stub_entrypoints.cc \ + entrypoints/portable/portable_thread_entrypoints.cc \ + entrypoints/portable/portable_throw_entrypoints.cc \ entrypoints/quick/quick_alloc_entrypoints.cc \ entrypoints/quick/quick_cast_entrypoints.cc \ entrypoints/quick/quick_deoptimization_entrypoints.cc \ @@ -171,39 +184,35 @@ LIBART_TARGET_SRC_FILES := \ ifeq ($(TARGET_ARCH),arm) LIBART_TARGET_SRC_FILES += \ arch/arm/context_arm.cc.arm \ + arch/arm/entrypoints_init_arm.cc \ + arch/arm/jni_entrypoints_arm.S \ + arch/arm/portable_entrypoints_arm.S \ arch/arm/quick_entrypoints_arm.S \ - arch/arm/quick_entrypoints_init_arm.cc + arch/arm/thread_arm.cc else # TARGET_ARCH != arm ifeq ($(TARGET_ARCH),x86) LIBART_TARGET_SRC_FILES += \ arch/x86/context_x86.cc \ - arch/x86/quick_entrypoints_init_x86.cc \ - arch/x86/quick_entrypoints_x86.S + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # TARGET_ARCH != x86 ifeq ($(TARGET_ARCH),mips) LIBART_TARGET_SRC_FILES += \ arch/mips/context_mips.cc \ - arch/mips/quick_entrypoints_init_mips.cc \ - arch/mips/quick_entrypoints_mips.S + arch/mips/entrypoints_init_mips.cc \ + arch/mips/jni_entrypoints_mips.S \ + arch/mips/portable_entrypoints_mips.S \ + arch/mips/quick_entrypoints_mips.S \ + arch/mips/thread_mips.cc else # TARGET_ARCH != mips $(error unsupported TARGET_ARCH=$(TARGET_ARCH)) endif # TARGET_ARCH != mips endif # TARGET_ARCH != x86 endif # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),arm) -LIBART_TARGET_SRC_FILES += thread_arm.cc -else # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),x86) -LIBART_TARGET_SRC_FILES += thread_x86.cc -else # TARGET_ARCH != x86 -ifeq ($(TARGET_ARCH),mips) -LIBART_TARGET_SRC_FILES += thread_mips.cc -else # TARGET_ARCH != mips -$(error unsupported TARGET_ARCH=$(TARGET_ARCH)) -endif # TARGET_ARCH != mips -endif # TARGET_ARCH != x86 -endif # TARGET_ARCH != arm LIBART_HOST_SRC_FILES := \ $(LIBART_COMMON_SRC_FILES) \ @@ -215,14 +224,11 @@ LIBART_HOST_SRC_FILES := \ ifeq ($(HOST_ARCH),x86) LIBART_HOST_SRC_FILES += \ arch/x86/context_x86.cc \ - arch/x86/quick_entrypoints_init_x86.cc \ - arch/x86/quick_entrypoints_x86.S -else # HOST_ARCH != x86 -$(error unsupported HOST_ARCH=$(HOST_ARCH)) -endif # HOST_ARCH != x86 - -ifeq ($(HOST_ARCH),x86) -LIBART_HOST_SRC_FILES += thread_x86.cc + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # HOST_ARCH != x86 $(error unsupported HOST_ARCH=$(HOST_ARCH)) endif # HOST_ARCH != x86 diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S new file mode 100644 index 0000000000..ed655e95b1 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.S @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_arm.h" + +.macro ENTRY name + .type \name, #function + .global \name + /* Cache alignment for function entry */ + .balign 16 +\name: + .cfi_startproc + .fnstart +.endm + +.macro END name + .fnend + .cfi_endproc + .size \name, .-\name +.endm + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h new file mode 100644 index 0000000000..ed3d476b24 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ +#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND r4 +// Register holding Thread::Current(). +#define rSELF r9 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc new file mode 100644 index 0000000000..b71a158289 --- /dev/null +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); + +// Math conversions. +extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT +extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT +extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT +extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Integer arithmetics. +extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); +extern "C" int64_t art_quick_mul_long(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pSqrt = sqrt; + qpoints->pL2d = __aeabi_l2d; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __aeabi_l2f; + qpoints->pD2iz = __aeabi_d2iz; + qpoints->pF2iz = __aeabi_f2iz; + qpoints->pIdivmod = __aeabi_idivmod; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = __aeabi_ldivmod; + qpoints->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 + qpoints->pLmul = art_quick_mul_long; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S new file mode 100644 index 0000000000..0a0d06a22a --- /dev/null +++ b/runtime/arch/arm/jni_entrypoints_arm.S @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + push {r0, r1, r2, r3, lr} @ spill regs + .save {r0, r1, r2, r3, lr} + .pad #20 + .cfi_adjust_cfa_offset 20 + sub sp, #12 @ pad stack pointer to align frame + .pad #12 + .cfi_adjust_cfa_offset 12 + mov r0, r9 @ pass Thread::Current + blx artFindNativeMethod @ (Thread*) + mov r12, r0 @ save result in r12 + add sp, #12 @ restore stack pointer + .cfi_adjust_cfa_offset -12 + pop {r0, r1, r2, r3, lr} @ restore regs + .cfi_adjust_cfa_offset -20 + cmp r12, #0 @ is method code null? + bxne r12 @ if non-null, tail call to method's code + bx lr @ otherwise, return to caller to handle exception +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + @ save registers that may contain arguments and LR that will be crushed by a call + push {r0-r3, lr} + .save {r0-r3, lr} + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + sub sp, #12 @ 3 words of space for alignment + mov r0, r9 @ pass Thread::Current + mov r1, sp @ pass SP + bl artWorkAroundAppJniBugs @ (Thread*, SP) + add sp, #12 @ rewind stack + mov r12, r0 @ save target address + pop {r0-r3, lr} @ restore possibly modified argument registers + .cfi_adjust_cfa_offset -16 + bx r12 @ tail call into JNI routine +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S new file mode 100644 index 0000000000..4cc6654ebb --- /dev/null +++ b/runtime/arch/arm/portable_entrypoints_arm.S @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Portable invocation stub. + * On entry: + * r0 = method pointer + * r1 = argument array or NULL for no argument methods + * r2 = size of argument array in bytes + * r3 = (managed) thread pointer + * [sp] = JValue* result + * [sp + 4] = result type char + */ +ENTRY art_portable_invoke_stub + push {r0, r4, r5, r9, r11, lr} @ spill regs + .save {r0, r4, r5, r9, r11, lr} + .pad #24 + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r4, 4 + .cfi_rel_offset r5, 8 + .cfi_rel_offset r9, 12 + .cfi_rel_offset r11, 16 + .cfi_rel_offset lr, 20 + mov r11, sp @ save the stack pointer + .cfi_def_cfa_register r11 + mov r9, r3 @ move managed thread pointer into r9 + mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval + add r5, r2, #16 @ create space for method pointer in frame + and r5, #0xFFFFFFF0 @ align frame size to 16 bytes + sub sp, r5 @ reserve stack space for argument array + add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy + bl memcpy @ memcpy (dest, src, bytes) + ldr r0, [r11] @ restore method* + ldr r1, [sp, #4] @ copy arg value for r1 + ldr r2, [sp, #8] @ copy arg value for r2 + ldr r3, [sp, #12] @ copy arg value for r3 + mov ip, #0 @ set ip to 0 + str ip, [sp] @ store NULL for method* at bottom of frame + add sp, #16 @ first 4 args are not passed on stack for portable + ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code + blx ip @ call the method + mov sp, r11 @ restore the stack pointer + ldr ip, [sp, #24] @ load the result pointer + strd r0, [ip] @ store r0/r1 into result pointer + pop {r0, r4, r5, r9, r11, lr} @ restore spill regs + .cfi_adjust_cfa_offset -24 + bx lr +END art_portable_invoke_stub + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves + .save {r1-r3, r5-r8, r10-r11, lr} + .cfi_adjust_cfa_offset 40 + .cfi_rel_offset r1, 0 + .cfi_rel_offset r2, 4 + .cfi_rel_offset r3, 8 + .cfi_rel_offset r5, 12 + .cfi_rel_offset r6, 16 + .cfi_rel_offset r7, 20 + .cfi_rel_offset r8, 24 + .cfi_rel_offset r10, 28 + .cfi_rel_offset r11, 32 + .cfi_rel_offset lr, 36 + sub sp, #8 @ 2 words of space, bottom word will hold Method* + .pad #8 + .cfi_adjust_cfa_offset 8 + @ Begin argument set up. + str r0, [sp, #0] @ place proxy method at bottom of frame + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ pop frame + .cfi_adjust_cfa_offset -48 + bx lr @ return +END art_portable_proxy_invoke_handler diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index f19e8bada0..9b8d238ab8 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -14,29 +14,13 @@ * limitations under the License. */ -#include "asm_support.h" +#include "asm_support_arm.S" /* Deliver the given exception */ .extern artDeliverExceptionFromCode /* Deliver an exception pending on a thread */ .extern artDeliverPendingException -.macro ENTRY name - .type \name, #function - .global \name - /* Cache alignment for function entry */ - .balign 16 -\name: - .cfi_startproc - .fnstart -.endm - -.macro END name - .fnend - .cfi_endproc - .size \name, .-\name -.endm - /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveAll) @@ -246,53 +230,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - /* - * Portable invocation stub. - * On entry: - * r0 = method pointer - * r1 = argument array or NULL for no argument methods - * r2 = size of argument array in bytes - * r3 = (managed) thread pointer - * [sp] = JValue* result - * [sp + 4] = result type char - */ -ENTRY art_portable_invoke_stub - push {r0, r4, r5, r9, r11, lr} @ spill regs - .save {r0, r4, r5, r9, r11, lr} - .pad #24 - .cfi_adjust_cfa_offset 24 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r4, 4 - .cfi_rel_offset r5, 8 - .cfi_rel_offset r9, 12 - .cfi_rel_offset r11, 16 - .cfi_rel_offset lr, 20 - mov r11, sp @ save the stack pointer - .cfi_def_cfa_register r11 - mov r9, r3 @ move managed thread pointer into r9 - mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval - add r5, r2, #16 @ create space for method pointer in frame - and r5, #0xFFFFFFF0 @ align frame size to 16 bytes - sub sp, r5 @ reserve stack space for argument array - add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy - bl memcpy @ memcpy (dest, src, bytes) - ldr r0, [r11] @ restore method* - ldr r1, [sp, #4] @ copy arg value for r1 - ldr r2, [sp, #8] @ copy arg value for r2 - ldr r3, [sp, #12] @ copy arg value for r3 - mov ip, #0 @ set ip to 0 - str ip, [sp] @ store NULL for method* at bottom of frame - add sp, #16 @ first 4 args are not passed on stack for portable - ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code - blx ip @ call the method - mov sp, r11 @ restore the stack pointer - ldr ip, [sp, #24] @ load the result pointer - strd r0, [ip] @ store r0/r1 into result pointer - pop {r0, r4, r5, r9, r11, lr} @ restore spill regs - .cfi_adjust_cfa_offset -24 - bx lr -END art_portable_invoke_stub - /* * Quick invocation stub. * On entry: @@ -352,30 +289,6 @@ ENTRY art_quick_do_long_jump bx r2 @ do long jump END art_quick_do_long_jump - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - @ save registers that may contain arguments and LR that will be crushed by a call - push {r0-r3, lr} - .save {r0-r3, lr} - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r1, 4 - .cfi_rel_offset r2, 8 - .cfi_rel_offset r3, 12 - sub sp, #12 @ 3 words of space for alignment - mov r0, r9 @ pass Thread::Current - mov r1, sp @ pass SP - bl artWorkAroundAppJniBugs @ (Thread*, SP) - add sp, #12 @ rewind stack - mov r12, r0 @ save target address - pop {r0-r3, lr} @ restore possibly modified argument registers - .cfi_adjust_cfa_offset -16 - bx r12 @ tail call into JNI routine -END art_quick_work_around_app_jni_bugs - /* * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on * failure. @@ -906,20 +819,6 @@ ENTRY art_quick_test_suspend RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN END art_quick_test_suspend - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ pop frame - .cfi_adjust_cfa_offset -48 - bx lr @ return -END art_portable_proxy_invoke_handler - /* * Called by managed code that is attempting to call a method on a proxy class. On entry * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The @@ -1044,30 +943,6 @@ ENTRY art_quick_abstract_method_error_stub b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) END art_quick_abstract_method_error_stub - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - push {r0, r1, r2, r3, lr} @ spill regs - .save {r0, r1, r2, r3, lr} - .pad #20 - .cfi_adjust_cfa_offset 20 - sub sp, #12 @ pad stack pointer to align frame - .pad #12 - .cfi_adjust_cfa_offset 12 - mov r0, r9 @ pass Thread::Current - blx artFindNativeMethod @ (Thread*) - mov r12, r0 @ save result in r12 - add sp, #12 @ restore stack pointer - .cfi_adjust_cfa_offset -12 - pop {r0, r1, r2, r3, lr} @ restore regs - .cfi_adjust_cfa_offset -20 - cmp r12, #0 @ is method code null? - bxne r12 @ if non-null, tail call to method's code - bx lr @ otherwise, return to caller to handle exception -END art_jni_dlsym_lookup_stub - /* * Signed 64-bit integer multiply. * diff --git a/runtime/arch/arm/quick_entrypoints_init_arm.cc b/runtime/arch/arm/quick_entrypoints_init_arm.cc deleted file mode 100644 index 2f66b361ee..0000000000 --- a/runtime/arch/arm/quick_entrypoints_init_arm.cc +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); - -// Math conversions. -extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT -extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT -extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT -extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Integer arithmetics. -extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); -extern "C" int64_t art_quick_mul_long(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pSqrt = sqrt; - points->pL2d = __aeabi_l2d; - points->pFmodf = fmodf; - points->pL2f = __aeabi_l2f; - points->pD2iz = __aeabi_d2iz; - points->pF2iz = __aeabi_f2iz; - points->pIdivmod = __aeabi_idivmod; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = __aeabi_ldivmod; - points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 - points->pLmul = art_quick_mul_long; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/arm/thread_arm.cc b/runtime/arch/arm/thread_arm.cc new file mode 100644 index 0000000000..ea908be22c --- /dev/null +++ b/runtime/arch/arm/thread_arm.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_arm.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S new file mode 100644 index 0000000000..8a34b9dbd0 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.S @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ + +#include "asm_support_mips.h" + + /* Cache alignment for function entry */ +.macro ENTRY name + .type \name, %function + .global \name + .balign 16 +\name: + .cfi_startproc +.endm + +.macro END name + .cfi_endproc + .size \name, .-\name +.endm + + /* Generates $gp for function calls */ +.macro GENERATE_GLOBAL_POINTER + .cpload $t9 +.endm + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h new file mode 100644 index 0000000000..9a66352ad1 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND $s0 +// Register holding Thread::Current(). +#define rSELF $s1 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc new file mode 100644 index 0000000000..0a62a4096d --- /dev/null +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); +extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); + +// Math conversions. +extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT +extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT +extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT +extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE +extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG +extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __divdi3(int64_t, int64_t); +extern "C" int64_t __moddi3(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pL2d = __floatdidf; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __floatdisf; + qpoints->pD2iz = __fixdfsi; + qpoints->pF2iz = __fixsfsi; + qpoints->pIdivmod = NULL; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = artLdivFromCode; + qpoints->pLdivmod = artLdivmodFromCode; + qpoints->pLmul = artLmulFromCode; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S new file mode 100644 index 0000000000..fca6d777ab --- /dev/null +++ b/runtime/arch/mips/jni_entrypoints_mips.S @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + GENERATE_GLOBAL_POINTER + addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra + .cfi_adjust_cfa_offset 32 + sw $ra, 16($sp) + .cfi_rel_offset 31, 16 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + sw $a0, 0($sp) + .cfi_rel_offset 4, 0 + jal artFindNativeMethod # (Thread*) + move $a0, $s1 # pass Thread::Current() + lw $a0, 0($sp) # restore registers from stack + lw $a1, 4($sp) + lw $a2, 8($sp) + lw $a3, 12($sp) + lw $ra, 16($sp) + beq $v0, $zero, no_native_code_found + addiu $sp, $sp, 32 # restore the stack + .cfi_adjust_cfa_offset -32 + move $t9, $v0 # put method code result in $t9 + jr $t9 # leaf call to method's code + nop +no_native_code_found: + jr $ra + nop +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + GENERATE_GLOBAL_POINTER + # save registers that may contain arguments and LR that will be crushed by a call + addiu $sp, $sp, -32 + .cfi_adjust_cfa_offset 32 + sw $ra, 28($sp) + .cfi_rel_offset 31, 28 + sw $a3, 24($sp) + .cfi_rel_offset 7, 28 + sw $a2, 20($sp) + .cfi_rel_offset 6, 28 + sw $a1, 16($sp) + .cfi_rel_offset 5, 28 + sw $a0, 12($sp) + .cfi_rel_offset 4, 28 + move $a0, rSELF # pass Thread::Current + jal artWorkAroundAppJniBugs # (Thread*, $sp) + move $a1, $sp # pass $sp + move $t9, $v0 # save target address + lw $a0, 12($sp) + lw $a1, 16($sp) + lw $a2, 20($sp) + lw $a3, 24($sp) + lw $ra, 28($sp) + jr $t9 # tail call into JNI routine + addiu $sp, $sp, 32 + .cfi_adjust_cfa_offset -32 +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S new file mode 100644 index 0000000000..e7a9b0fb60 --- /dev/null +++ b/runtime/arch/mips/portable_entrypoints_mips.S @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + GENERATE_GLOBAL_POINTER + # Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + # TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + # Begin argument set up. + sw $a0, 0($sp) # place proxy method at bottom of frame + move $a2, rSELF # pass Thread::Current + jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) + move $a3, $sp # pass $sp + lw $ra, 60($sp) # restore $ra + jr $ra + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 +END art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. + */ + .extern artThrowAbstractMethodErrorFromCode +ENTRY art_portable_abstract_method_error_stub + GENERATE_GLOBAL_POINTER + la $t9, artThrowAbstractMethodErrorFromCode + jr $t9 # (Method*, Thread*, SP) + move $a1, $s1 # pass Thread::Current +END art_portable_abstract_method_error_stub diff --git a/runtime/arch/mips/quick_entrypoints_init_mips.cc b/runtime/arch/mips/quick_entrypoints_init_mips.cc deleted file mode 100644 index d494c65615..0000000000 --- a/runtime/arch/mips/quick_entrypoints_init_mips.cc +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); -extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); - -// Math conversions. -extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT -extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT -extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT -extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE -extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG -extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __divdi3(int64_t, int64_t); -extern "C" int64_t __moddi3(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pL2d = __floatdidf; - points->pFmodf = fmodf; - points->pL2f = __floatdisf; - points->pD2iz = __fixdfsi; - points->pF2iz = __fixsfsi; - points->pIdivmod = NULL; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = artLdivFromCode; - points->pLdivmod = artLdivmodFromCode; - points->pLmul = artLmulFromCode; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 45d583e097..d32a2b4a15 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "asm_support.h" +#include "asm_support_mips.S" .set noreorder .balign 4 @@ -24,25 +24,6 @@ /* Deliver an exception pending on a thread */ .extern artDeliverPendingExceptionFromCode - /* Cache alignment for function entry */ -.macro ENTRY name - .type \name, %function - .global \name - .balign 16 -\name: - .cfi_startproc -.endm - -.macro END name - .cfi_endproc - .size \name, .-\name -.endm - - /* Generates $gp for function calls */ -.macro GENERATE_GLOBAL_POINTER - .cpload $t9 -.endm - /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveAll) @@ -480,39 +461,6 @@ ENTRY art_quick_invoke_stub END art_quick_invoke_stub .size art_portable_invoke_stub, .-art_portable_invoke_stub - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - GENERATE_GLOBAL_POINTER - # save registers that may contain arguments and LR that will be crushed by a call - addiu $sp, $sp, -32 - .cfi_adjust_cfa_offset 32 - sw $ra, 28($sp) - .cfi_rel_offset 31, 28 - sw $a3, 24($sp) - .cfi_rel_offset 7, 28 - sw $a2, 20($sp) - .cfi_rel_offset 6, 28 - sw $a1, 16($sp) - .cfi_rel_offset 5, 28 - sw $a0, 12($sp) - .cfi_rel_offset 4, 28 - move $a0, rSELF # pass Thread::Current - jal artWorkAroundAppJniBugs # (Thread*, $sp) - move $a1, $sp # pass $sp - move $t9, $v0 # save target address - lw $a0, 12($sp) - lw $a1, 16($sp) - lw $a2, 20($sp) - lw $a3, 24($sp) - lw $ra, 28($sp) - jr $t9 # tail call into JNI routine - addiu $sp, $sp, 32 - .cfi_adjust_cfa_offset -32 -END art_quick_work_around_app_jni_bugs - /* * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on * failure. @@ -912,20 +860,6 @@ ENTRY art_quick_test_suspend RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN END art_quick_test_suspend - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame - move $a2, rSELF # pass Thread::Current - jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) - move $a3, $sp # pass $sp - lw $ra, 60($sp) # restore $ra - jr $ra - addiu $sp, $sp, 64 # pop frame - .cfi_adjust_cfa_offset -64 -END art_portable_proxy_invoke_handler - /* * Called by managed code that is attempting to call a method on a proxy class. On entry * r0 holds the proxy method; r1, r2 and r3 may contain arguments. @@ -1043,17 +977,6 @@ ENTRY art_quick_deoptimize move $a1, $sp # pass $sp END art_quick_deoptimize - /* - * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a1, $s1 # pass Thread::Current -END art_portable_abstract_method_error_stub - /* * Quick abstract method error stub. $a0 contains method* on entry. */ @@ -1066,42 +989,6 @@ ENTRY art_quick_abstract_method_error_stub move $a2, $sp # pass SP END art_quick_abstract_method_error_stub - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - GENERATE_GLOBAL_POINTER - addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra - .cfi_adjust_cfa_offset 32 - sw $ra, 16($sp) - .cfi_rel_offset 31, 16 - sw $a3, 12($sp) - .cfi_rel_offset 7, 12 - sw $a2, 8($sp) - .cfi_rel_offset 6, 8 - sw $a1, 4($sp) - .cfi_rel_offset 5, 4 - sw $a0, 0($sp) - .cfi_rel_offset 4, 0 - jal artFindNativeMethod # (Thread*) - move $a0, $s1 # pass Thread::Current() - lw $a0, 0($sp) # restore registers from stack - lw $a1, 4($sp) - lw $a2, 8($sp) - lw $a3, 12($sp) - lw $ra, 16($sp) - beq $v0, $zero, no_native_code_found - addiu $sp, $sp, 32 # restore the stack - .cfi_adjust_cfa_offset -32 - move $t9, $v0 # put method code result in $t9 - jr $t9 # leaf call to method's code - nop -no_native_code_found: - jr $ra - nop -END art_jni_dlsym_lookup_stub - /* * Long integer shift. This is different from the generic 32/64-bit * binary operations because vAA/vBB are 64-bit but vCC (the shift diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc new file mode 100644 index 0000000000..7364de067e --- /dev/null +++ b/runtime/arch/mips/thread_mips.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_mips.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S new file mode 100644 index 0000000000..7e6dce9c6a --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.S @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_x86.h" + +#if defined(__APPLE__) + // Mac OS' as(1) doesn't let you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name + #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name + #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name + #define END_MACRO .endmacro + + // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names + // are mangled with an extra underscore prefix. The use of $x for arguments + // mean that literals need to be represented with $$x in macros. + #define SYMBOL(name) _ ## name + #define VAR(name,index) SYMBOL($index) + #define REG_VAR(name,index) %$index + #define CALL_MACRO(name,index) $index + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $$value +#else + // Regular gas(1) lets you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 + #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 + #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 + #define END_MACRO .endm + + // Regular gas(1) uses \argument_name for macro arguments. + // We need to turn on alternate macro syntax so we can use & instead or the preprocessor + // will screw us by inserting a space between the \ and the name. Even in this mode there's + // no special meaning to $, so literals are still just $x. The use of altmacro means % is a + // special character meaning care needs to be taken when passing registers as macro arguments. + .altmacro + #define SYMBOL(name) name + #define VAR(name,index) name& + #define REG_VAR(name,index) %name + #define CALL_MACRO(name,index) name& + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $value +#endif + + /* Cache alignment for function entry */ +MACRO0(ALIGN_FUNCTION_ENTRY) + .balign 16 +END_MACRO + +MACRO1(DEFINE_FUNCTION, c_name) + .type VAR(c_name, 0), @function + .globl VAR(c_name, 0) + ALIGN_FUNCTION_ENTRY +VAR(c_name, 0): + .cfi_startproc +END_MACRO + +MACRO1(END_FUNCTION, c_name) + .cfi_endproc + .size \c_name, .-\c_name +END_MACRO + +MACRO1(PUSH, reg) + pushl REG_VAR(reg, 0) + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset REG_VAR(reg, 0), 0 +END_MACRO + +MACRO1(POP, reg) + popl REG_VAR(reg,0) + .cfi_adjust_cfa_offset -4 + .cfi_restore REG_VAR(reg,0) +END_MACRO + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h new file mode 100644 index 0000000000..1092910d78 --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ + +#include "asm_support.h" + +// Offset of field Thread::self_ verified in InitCpu +#define THREAD_SELF_OFFSET 40 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc new file mode 100644 index 0000000000..d47dfef047 --- /dev/null +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern "C" double art_quick_fmod_from_code(double, double); +extern "C" float art_quick_fmodf_from_code(float, float); +extern "C" double art_quick_l2d_from_code(int64_t); +extern "C" float art_quick_l2f_from_code(int64_t); +extern "C" int64_t art_quick_d2l_from_code(double); +extern "C" int64_t art_quick_f2l_from_code(float); +extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); +extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); +extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); +extern "C" void* art_quick_memcpy(void*, const void*, size_t); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + // points->pCmpgDouble = NULL; // Not needed on x86. + // points->pCmpgFloat = NULL; // Not needed on x86. + // points->pCmplDouble = NULL; // Not needed on x86. + // points->pCmplFloat = NULL; // Not needed on x86. + qpoints->pFmod = art_quick_fmod_from_code; + qpoints->pL2d = art_quick_l2d_from_code; + qpoints->pFmodf = art_quick_fmodf_from_code; + qpoints->pL2f = art_quick_l2f_from_code; + // points->pD2iz = NULL; // Not needed on x86. + // points->pF2iz = NULL; // Not needed on x86. + qpoints->pIdivmod = art_quick_idivmod_from_code; + qpoints->pD2l = art_quick_d2l_from_code; + qpoints->pF2l = art_quick_f2l_from_code; + qpoints->pLdiv = art_quick_ldiv_from_code; + qpoints->pLdivmod = art_quick_ldivmod_from_code; + qpoints->pLmul = art_quick_lmul_from_code; + qpoints->pShlLong = art_quick_lshl_from_code; + qpoints->pShrLong = art_quick_lshr_from_code; + qpoints->pUshrLong = art_quick_lushr_from_code; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = art_quick_memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = art_quick_memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S new file mode 100644 index 0000000000..e9c88fec02 --- /dev/null +++ b/runtime/arch/x86/jni_entrypoints_x86.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable resolution trampoline. + */ +DEFINE_FUNCTION art_jni_dlsym_lookup_stub + subl LITERAL(8), %esp // align stack + .cfi_adjust_cfa_offset 8 + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call SYMBOL(artFindNativeMethod) // (Thread*) + addl LITERAL(12), %esp // restore the stack + .cfi_adjust_cfa_offset -12 + cmpl LITERAL(0), %eax // check if returned method code is null + je no_native_code_found // if null, jump to return to handle + jmp *%eax // otherwise, tail call to intended method +no_native_code_found: + ret +END_FUNCTION art_jni_dlsym_lookup_stub diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S new file mode 100644 index 0000000000..a0fca6cee3 --- /dev/null +++ b/runtime/arch/x86/portable_entrypoints_x86.S @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable invocation stub. + * On entry: + * [sp] = return address + * [sp + 4] = method pointer + * [sp + 8] = argument array or NULL for no argument methods + * [sp + 12] = size of argument array in bytes + * [sp + 16] = (managed) thread pointer + * [sp + 20] = JValue* result + * [sp + 24] = result type char + */ +DEFINE_FUNCTION art_portable_invoke_stub + PUSH ebp // save ebp + PUSH ebx // save ebx + mov %esp, %ebp // copy value of stack pointer into base pointer + .cfi_def_cfa_register ebp + mov 20(%ebp), %ebx // get arg array size + addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame + andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes + subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp + subl %ebx, %esp // reserve stack space for argument array + lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy + pushl 20(%ebp) // push size of region to memcpy + pushl 16(%ebp) // push arg array as source of memcpy + pushl %eax // push stack pointer as destination of memcpy + call SYMBOL(memcpy) // (void*, const void*, size_t) + addl LITERAL(12), %esp // pop arguments to memcpy + mov 12(%ebp), %eax // move method pointer into eax + mov %eax, (%esp) // push method pointer onto stack + call *METHOD_CODE_OFFSET(%eax) // call the method + mov %ebp, %esp // restore stack pointer + POP ebx // pop ebx + POP ebp // pop ebp + mov 20(%esp), %ecx // get result pointer + cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' + je return_double_portable + cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' + je return_float_portable + mov %eax, (%ecx) // store the result + mov %edx, 4(%ecx) // store the other half of the result + ret +return_double_portable: + fstpl (%ecx) // store the floating point result as double + ret +return_float_portable: + fstps (%ecx) // store the floating point result as float + ret +END_FUNCTION art_portable_invoke_stub + +DEFINE_FUNCTION art_portable_proxy_invoke_handler + // Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + // TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + PUSH edi // Save callee saves + PUSH esi + PUSH ebp + PUSH ebx // Save args + PUSH edx + PUSH ecx + PUSH eax // Align stack, eax will be clobbered by Method* + // Begin argument set up. + PUSH esp // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass receiver + PUSH eax // pass proxy method + call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) + movd %eax, %xmm0 // place return value also into floating point return value + movd %edx, %xmm1 + punpckldq %xmm1, %xmm0 + addl LITERAL(44), %esp // pop arguments + .cfi_adjust_cfa_offset -44 + ret +END_FUNCTION art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. method* is at %esp + 4 on entry. + */ +DEFINE_FUNCTION art_portable_abstract_method_error_stub + PUSH ebp + movl %esp, %ebp // Remember SP. + .cfi_def_cfa_register ebp + subl LITERAL(12), %esp // Align stack. + PUSH esp // Pass sp (not used). + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + pushl 8(%ebp) // Pass Method*. + call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) + leave // Restore the stack and %ebp. + .cfi_def_cfa esp, 4 + .cfi_restore ebp + ret // Return to caller to handle pending exception. +END_FUNCTION art_portable_abstract_method_error_stub diff --git a/runtime/arch/x86/quick_entrypoints_init_x86.cc b/runtime/arch/x86/quick_entrypoints_init_x86.cc deleted file mode 100644 index cced91619c..0000000000 --- a/runtime/arch/x86/quick_entrypoints_init_x86.cc +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern "C" double art_quick_fmod_from_code(double, double); -extern "C" float art_quick_fmodf_from_code(float, float); -extern "C" double art_quick_l2d_from_code(int64_t); -extern "C" float art_quick_l2f_from_code(int64_t); -extern "C" int64_t art_quick_d2l_from_code(double); -extern "C" int64_t art_quick_f2l_from_code(float); -extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); -extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); -extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); -extern "C" void* art_quick_memcpy(void*, const void*, size_t); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - // points->pCmpgDouble = NULL; // Not needed on x86. - // points->pCmpgFloat = NULL; // Not needed on x86. - // points->pCmplDouble = NULL; // Not needed on x86. - // points->pCmplFloat = NULL; // Not needed on x86. - points->pFmod = art_quick_fmod_from_code; - points->pL2d = art_quick_l2d_from_code; - points->pFmodf = art_quick_fmodf_from_code; - points->pL2f = art_quick_l2f_from_code; - // points->pD2iz = NULL; // Not needed on x86. - // points->pF2iz = NULL; // Not needed on x86. - points->pIdivmod = art_quick_idivmod_from_code; - points->pD2l = art_quick_d2l_from_code; - points->pF2l = art_quick_f2l_from_code; - points->pLdiv = art_quick_ldiv_from_code; - points->pLdivmod = art_quick_ldivmod_from_code; - points->pLmul = art_quick_lmul_from_code; - points->pShlLong = art_quick_lshl_from_code; - points->pShrLong = art_quick_lshr_from_code; - points->pUshrLong = art_quick_lushr_from_code; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = art_quick_memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = art_quick_memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index ee6db0c3f8..89ea71a902 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -14,76 +14,7 @@ * limitations under the License. */ -#include "asm_support.h" - -#if defined(__APPLE__) - // Mac OS' as(1) doesn't let you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name - #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name - #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name - #define END_MACRO .endmacro - - // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names - // are mangled with an extra underscore prefix. The use of $x for arguments - // mean that literals need to be represented with $$x in macros. - #define SYMBOL(name) _ ## name - #define VAR(name,index) SYMBOL($index) - #define REG_VAR(name,index) %$index - #define CALL_MACRO(name,index) $index - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $$value -#else - // Regular gas(1) lets you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 - #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 - #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 - #define END_MACRO .endm - - // Regular gas(1) uses \argument_name for macro arguments. - // We need to turn on alternate macro syntax so we can use & instead or the preprocessor - // will screw us by inserting a space between the \ and the name. Even in this mode there's - // no special meaning to $, so literals are still just $x. The use of altmacro means % is a - // special character meaning care needs to be taken when passing registers as macro arguments. - .altmacro - #define SYMBOL(name) name - #define VAR(name,index) name& - #define REG_VAR(name,index) %name - #define CALL_MACRO(name,index) name& - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $value -#endif - - /* Cache alignment for function entry */ -MACRO0(ALIGN_FUNCTION_ENTRY) - .balign 16 -END_MACRO - -MACRO1(DEFINE_FUNCTION, c_name) - .type VAR(c_name, 0), @function - .globl VAR(c_name, 0) - ALIGN_FUNCTION_ENTRY -VAR(c_name, 0): - .cfi_startproc -END_MACRO - -MACRO1(END_FUNCTION, c_name) - .cfi_endproc - .size \c_name, .-\c_name -END_MACRO - -MACRO1(PUSH, reg) - pushl REG_VAR(reg, 0) - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset REG_VAR(reg, 0), 0 -END_MACRO - -MACRO1(POP, reg) - popl REG_VAR(reg,0) - .cfi_adjust_cfa_offset -4 - .cfi_restore REG_VAR(reg,0) -END_MACRO +#include "asm_support_x86.S" /* * Macro that sets up the callee save frame to conform with @@ -301,55 +232,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - /* - * Portable invocation stub. - * On entry: - * [sp] = return address - * [sp + 4] = method pointer - * [sp + 8] = argument array or NULL for no argument methods - * [sp + 12] = size of argument array in bytes - * [sp + 16] = (managed) thread pointer - * [sp + 20] = JValue* result - * [sp + 24] = result type char - */ -DEFINE_FUNCTION art_portable_invoke_stub - PUSH ebp // save ebp - PUSH ebx // save ebx - mov %esp, %ebp // copy value of stack pointer into base pointer - .cfi_def_cfa_register ebp - mov 20(%ebp), %ebx // get arg array size - addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame - andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes - subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp - subl %ebx, %esp // reserve stack space for argument array - lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy - pushl 20(%ebp) // push size of region to memcpy - pushl 16(%ebp) // push arg array as source of memcpy - pushl %eax // push stack pointer as destination of memcpy - call SYMBOL(memcpy) // (void*, const void*, size_t) - addl LITERAL(12), %esp // pop arguments to memcpy - mov 12(%ebp), %eax // move method pointer into eax - mov %eax, (%esp) // push method pointer onto stack - call *METHOD_CODE_OFFSET(%eax) // call the method - mov %ebp, %esp // restore stack pointer - POP ebx // pop ebx - POP ebp // pop ebp - mov 20(%esp), %ecx // get result pointer - cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' - je return_double_portable - cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' - je return_float_portable - mov %eax, (%ecx) // store the result - mov %edx, 4(%ecx) // store the other half of the result - ret -return_double_portable: - fstpl (%ecx) // store the floating point result as double - ret -return_float_portable: - fstps (%ecx) // store the floating point result as float - ret -END_FUNCTION art_portable_invoke_stub - /* * Quick invocation stub. * On entry: @@ -920,22 +802,6 @@ DEFINE_FUNCTION art_quick_get_obj_static_from_code RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception END_FUNCTION art_quick_get_obj_static_from_code -DEFINE_FUNCTION art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass receiver - PUSH eax // pass proxy method - call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(44), %esp // pop arguments - .cfi_adjust_cfa_offset -44 - ret -END_FUNCTION art_portable_proxy_invoke_handler - DEFINE_FUNCTION art_quick_proxy_invoke_handler SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* PUSH esp // pass SP @@ -1053,24 +919,6 @@ DEFINE_FUNCTION art_quick_deoptimize int3 // Unreachable. END_FUNCTION art_quick_deoptimize - /* - * Portable abstract method error stub. method* is at %esp + 4 on entry. - */ -DEFINE_FUNCTION art_portable_abstract_method_error_stub - PUSH ebp - movl %esp, %ebp // Remember SP. - .cfi_def_cfa_register ebp - subl LITERAL(12), %esp // Align stack. - PUSH esp // Pass sp (not used). - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - pushl 8(%ebp) // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - leave // Restore the stack and %ebp. - .cfi_def_cfa esp, 4 - .cfi_restore ebp - ret // Return to caller to handle pending exception. -END_FUNCTION art_portable_abstract_method_error_stub - /* * Quick abstract method error stub. %eax contains method* on entry. */ @@ -1086,24 +934,6 @@ DEFINE_FUNCTION art_quick_abstract_method_error_stub int3 // Unreachable. END_FUNCTION art_quick_abstract_method_error_stub - /* - * Portable resolution trampoline. - */ -DEFINE_FUNCTION art_jni_dlsym_lookup_stub - subl LITERAL(8), %esp // align stack - .cfi_adjust_cfa_offset 8 - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call SYMBOL(artFindNativeMethod) // (Thread*) - addl LITERAL(12), %esp // restore the stack - .cfi_adjust_cfa_offset -12 - cmpl LITERAL(0), %eax // check if returned method code is null - je no_native_code_found // if null, jump to return to handle - jmp *%eax // otherwise, tail call to intended method -no_native_code_found: - ret -END_FUNCTION art_jni_dlsym_lookup_stub - /* * String's indexOf. * diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc new file mode 100644 index 0000000000..dd3e7dd137 --- /dev/null +++ b/runtime/arch/x86/thread_x86.cc @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include +#include + +#include "asm_support_x86.h" +#include "base/macros.h" +#include "thread.h" +#include "thread_list.h" + +#if defined(__APPLE__) +#include +#include +struct descriptor_table_entry_t { + uint16_t limit0; + uint16_t base0; + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; +} __attribute__((packed)); +#define MODIFY_LDT_CONTENTS_DATA 0 +#else +#include +#endif + +namespace art { + +void Thread::InitCpu() { + static Mutex modify_ldt_lock("modify_ldt lock"); + MutexLock mu(Thread::Current(), modify_ldt_lock); + + const uintptr_t base = reinterpret_cast(this); + const size_t limit = kPageSize; + + const int contents = MODIFY_LDT_CONTENTS_DATA; + const int seg_32bit = 1; + const int read_exec_only = 0; + const int limit_in_pages = 0; + const int seg_not_present = 0; + const int useable = 1; + + int entry_number = -1; + +#if defined(__APPLE__) + descriptor_table_entry_t entry; + memset(&entry, 0, sizeof(entry)); + entry.limit0 = (limit & 0x0ffff); + entry.limit = (limit & 0xf0000) >> 16; + entry.base0 = (base & 0x0000ffff); + entry.base1 = (base & 0x00ff0000) >> 16; + entry.base2 = (base & 0xff000000) >> 24; + entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); + entry.s = 1; + entry.dpl = 0x3; + entry.p = seg_not_present ^ 1; + entry.avl = useable; + entry.l = 0; + entry.d = seg_32bit; + entry.g = limit_in_pages; + + entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); + if (entry_number == -1) { + PLOG(FATAL) << "i386_set_ldt failed"; + } +#else + // Read current LDT entries. + CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); + std::vector ldt(LDT_ENTRIES); + size_t ldt_size(sizeof(uint64_t) * ldt.size()); + memset(&ldt[0], 0, ldt_size); + // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? + syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); + + // Find the first empty slot. + for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { + } + if (entry_number >= LDT_ENTRIES) { + LOG(FATAL) << "Failed to find a free LDT slot"; + } + + // Update LDT entry. + user_desc ldt_entry; + memset(&ldt_entry, 0, sizeof(ldt_entry)); + ldt_entry.entry_number = entry_number; + ldt_entry.base_addr = base; + ldt_entry.limit = limit; + ldt_entry.seg_32bit = seg_32bit; + ldt_entry.contents = contents; + ldt_entry.read_exec_only = read_exec_only; + ldt_entry.limit_in_pages = limit_in_pages; + ldt_entry.seg_not_present = seg_not_present; + ldt_entry.useable = useable; + CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); + entry_number = ldt_entry.entry_number; +#endif + + // Change %fs to be new LDT entry. + uint16_t table_indicator = 1 << 2; // LDT + uint16_t rpl = 3; // Requested privilege level + uint16_t selector = (entry_number << 3) | table_indicator | rpl; + // TODO: use our assembler to generate code + __asm__ __volatile__("movw %w0, %%fs" + : // output + : "q"(selector) // input + :); // clobber + + // Allow easy indirection back to Thread*. + self_ = this; + + // Sanity check that reads from %fs point to this Thread*. + Thread* self_check; + // TODO: use our assembler to generate code + CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); + __asm__ __volatile__("movl %%fs:(%1), %0" + : "=r"(self_check) // output + : "r"(THREAD_SELF_OFFSET) // input + :); // clobber + CHECK_EQ(self_check, this); + + // Sanity check other offsets. + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 7b20c7aee0..aca93a5552 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -30,29 +30,4 @@ // Offset of field Method::entry_point_from_compiled_code_ #define METHOD_CODE_OFFSET 40 -#if defined(__arm__) -// Register holding suspend check count down. -#define rSUSPEND r4 -// Register holding Thread::Current(). -#define rSELF r9 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__mips__) -// Register holding suspend check count down. -#define rSUSPEND $s0 -// Register holding Thread::Current(). -#define rSELF $s1 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__i386__) -// Offset of field Thread::self_ verified in InitCpu -#define THREAD_SELF_OFFSET 40 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#endif - #endif // ART_RUNTIME_ASM_SUPPORT_H_ diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 72e0f48b3d..84f186d4b3 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -58,10 +58,7 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" -#if defined(ART_USE_PORTABLE_COMPILER) -#include "runtime_support_llvm.h" -#endif +#include "entrypoints/entrypoint_utils.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 75886cf7f0..4659fd1982 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -22,6 +22,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" @@ -32,7 +33,6 @@ #include "mirror/object_array-inl.h" #include "mirror/proxy.h" #include "mirror/stack_trace_element.h" -#include "runtime_support.h" #include "sirt_ref.h" using ::art::mirror::AbstractMethod; diff --git a/runtime/common_test.h b/runtime/common_test.h index 2c233401d2..7ee6fe20b2 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -31,6 +31,7 @@ #include "class_linker.h" #include "compiler/driver/compiler_driver.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "gtest/gtest.h" #include "instruction_set.h" @@ -39,7 +40,6 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "thread.h" diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc new file mode 100644 index 0000000000..c29784151c --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" + +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" +#include "reflection.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "well_known_classes.h" + +namespace art { + +// Helper function to allocate array for FILLED_NEW_ARRAY. +mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, + int32_t component_count, Thread* self, + bool access_check) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); + if (klass == NULL) { // Error + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { + if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { + ThrowRuntimeException("Bad filled array request for type %s", + PrettyDescriptor(klass).c_str()); + } else { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", + "Found type %s; filled-new-array not implemented for anything but \'int\'", + PrettyDescriptor(klass).c_str()); + } + return NULL; // Failure + } else { + if (access_check) { + mirror::Class* referrer_klass = referrer->GetDeclaringClass(); + if (UNLIKELY(!referrer_klass->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer_klass, klass); + return NULL; // Failure + } + } + DCHECK(klass->IsArrayClass()) << PrettyClass(klass); + return mirror::Array::Alloc(self, klass, component_count); + } +} + +mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) { + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. + default: is_primitive = true; is_set = true; is_static = true; break; + } + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); + if (UNLIKELY(resolved_field == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + if (access_check) { + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + // The referring class can't access the resolved field, this may occur as a result of a + // protected field being made public by a sub-class. Resort to the dex file to determine + // the correct class for the access check. + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + fields_class = class_linker->ResolveType(dex_file, + dex_file.GetFieldId(field_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(fields_class))) { + ThrowIllegalAccessErrorClass(referring_class, fields_class); + return NULL; // failure + } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + ThrowIllegalAccessErrorField(referring_class, resolved_field); + return NULL; // failure + } + } + if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { + ThrowIllegalAccessErrorFinalField(referrer, resolved_field); + return NULL; // failure + } else { + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", + "Attempted read of %zd-bit %s on field '%s'", + expected_size * (32 / sizeof(int32_t)), + is_primitive ? "primitive" : "non-primitive", + PrettyField(resolved_field, true).c_str()); + return NULL; // failure + } + } + } + if (!is_static) { + // instance fields must be being accessed on an initialized class + return resolved_field; + } else { + // If the class is initialized we're done. + if (fields_class->IsInitialized()) { + return resolved_field; + } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { + // Otherwise let's ensure the class is initialized before resolving the field. + return resolved_field; + } else { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind + return NULL; // failure + } + } +} + +// Slow path method resolution +mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + bool is_direct = type == kStatic || type == kDirect; + mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); + if (UNLIKELY(resolved_method == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } else if (UNLIKELY(this_object == NULL && type != kStatic)) { + // Maintain interpreter-like semantics where NullPointerException is thrown + // after potential NoSuchMethodError from class linker. + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(referrer == throw_location.GetMethod()); + ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); + return NULL; // Failure. + } else { + if (!access_check) { + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); + } else { + vtable = this_object->GetClass()->GetVTable(); + } + // TODO: eliminate bounds check? + return vtable->Get(vtable_index); + } + } else { + // Incompatible class change should have been handled in resolve method. + if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { + ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, + referrer); + return NULL; // Failure. + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // The referring class can't access the resolved method, this may occur as a result of a + // protected method being made public by implementing an interface that re-declares the + // method public. Resort to the dex file to determine the correct class for the access check + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + methods_class = class_linker->ResolveType(dex_file, + dex_file.GetMethodId(method_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(methods_class))) { + ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, + referrer, resolved_method, type); + return NULL; // Failure. + } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + ThrowIllegalAccessErrorMethod(referring_class, resolved_method); + return NULL; // Failure. + } + } + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + mirror::Class* super_class = referring_class->GetSuperClass(); + if (LIKELY(super_class != NULL)) { + vtable = referring_class->GetSuperClass()->GetVTable(); + } else { + vtable = NULL; + } + } else { + vtable = this_object->GetClass()->GetVTable(); + } + if (LIKELY(vtable != NULL && + vtable_index < static_cast(vtable->GetLength()))) { + return vtable->GetWithoutChecks(vtable_index); + } else { + // Behavior to agree with that of the verifier. + MethodHelper mh(resolved_method); + ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), + mh.GetSignature()); + return NULL; // Failure. + } + } + } + } +} + +void ThrowStackOverflowError(Thread* self) { + CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; + + if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { + // Remove extra entry pushed onto second stack during method tracing. + Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); + } + + self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. + JNIEnvExt* env = self->GetJniEnv(); + std::string msg("stack size "); + msg += PrettySize(self->GetStackSize()); + // Use low-level JNI routine and pre-baked error class to avoid class linking operations that + // would consume more stack. + int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, + msg.c_str(), NULL); + if (rc != JNI_OK) { + // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME + // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError + // instead. + LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; + CHECK(self->IsExceptionPending()); + } + self->ResetDefaultStackEnd(); // Return to default stack size. +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) { + DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); + + // Build argument array possibly triggering GC. + soa.Self()->AssertThreadSuspensionIsAllowable(); + jobjectArray args_jobj = NULL; + const JValue zero; + if (args.size() > 0) { + args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); + if (args_jobj == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + for (size_t i = 0; i < args.size(); ++i) { + if (shorty[i + 1] == 'L') { + jobject val = args.at(i).l; + soa.Env()->SetObjectArrayElement(args_jobj, i, val); + } else { + JValue jv; + jv.SetJ(args.at(i).j); + mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); + if (val == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + soa.Decode* >(args_jobj)->Set(i, val); + } + } + } + + // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). + jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, + WellKnownClasses::java_lang_reflect_Proxy_h); + jvalue invocation_args[3]; + invocation_args[0].l = rcvr_jobj; + invocation_args[1].l = interface_method_jobj; + invocation_args[2].l = args_jobj; + jobject result = + soa.Env()->CallObjectMethodA(inv_hand, + WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, + invocation_args); + + // Unbox result and handle error conditions. + if (LIKELY(!soa.Self()->IsExceptionPending())) { + if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { + // Do nothing. + return zero; + } else { + mirror::Object* result_ref = soa.Decode(result); + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); + mirror::AbstractMethod* proxy_method; + if (interface_method->GetDeclaringClass()->IsInterface()) { + proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + } else { + // Proxy dispatch to a method defined in Object. + DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); + proxy_method = interface_method; + } + ThrowLocation throw_location(rcvr, proxy_method, -1); + JValue result_unboxed; + if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { + DCHECK(soa.Self()->IsExceptionPending()); + return zero; + } + return result_unboxed; + } + } else { + // In the case of checked exceptions that aren't declared, the exception must be wrapped by + // a UndeclaredThrowableException. + mirror::Throwable* exception = soa.Self()->GetException(NULL); + if (exception->IsCheckedException()) { + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::SynthesizedProxyClass* proxy_class = + down_cast(rcvr->GetClass()); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::AbstractMethod* proxy_method = + rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + int throws_index = -1; + size_t num_virt_methods = proxy_class->NumVirtualMethods(); + for (size_t i = 0; i < num_virt_methods; i++) { + if (proxy_class->GetVirtualMethod(i) == proxy_method) { + throws_index = i; + break; + } + } + CHECK_NE(throws_index, -1); + mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); + mirror::Class* exception_class = exception->GetClass(); + bool declares_exception = false; + for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { + mirror::Class* declared_exception = declared_exceptions->Get(i); + declares_exception = declared_exception->IsAssignableFrom(exception_class); + } + if (!declares_exception) { + ThrowLocation throw_location(rcvr, proxy_method, -1); + soa.Self()->ThrowNewWrappedException(throw_location, + "Ljava/lang/reflect/UndeclaredThrowableException;", + NULL); + } + } + return zero; + } +} + +} // namespace art diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h new file mode 100644 index 0000000000..3f28b5e41f --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.h @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ +#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ + +#include "class_linker.h" +#include "common_throws.h" +#include "dex_file.h" +#include "indirect_reference_table.h" +#include "invoke_type.h" +#include "jni_internal.h" +#include "mirror/abstract_method.h" +#include "mirror/array.h" +#include "mirror/class-inl.h" +#include "mirror/throwable.h" +#include "object_utils.h" +#include "thread.h" + +extern "C" void art_interpreter_invoke_handler(); +extern "C" void art_jni_dlsym_lookup_stub(); +extern "C" void art_portable_abstract_method_error_stub(); +extern "C" void art_portable_proxy_invoke_handler(); +extern "C" void art_quick_abstract_method_error_stub(); +extern "C" void art_quick_deoptimize(); +extern "C" void art_quick_instrumentation_entry_from_code(void*); +extern "C" void art_quick_instrumentation_exit_from_code(); +extern "C" void art_quick_interpreter_entry(void*); +extern "C" void art_quick_proxy_invoke_handler(); +extern "C" void art_work_around_app_jni_bugs(); + +namespace art { +namespace mirror { +class Class; +class Field; +class Object; +} + +// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it +// cannot be resolved, throw an error. If it can, use it to create an instance. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(klass == NULL)) { + klass = runtime->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (access_check) { + if (UNLIKELY(!klass->IsInstantiable())) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", + PrettyDescriptor(klass).c_str()); + return NULL; // Failure + } + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + if (!klass->IsInitialized() && + !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + return klass->AllocObject(self); +} + +// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If +// it cannot be resolved, throw an error. If it can, use it to create an array. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { // Error + DCHECK(Thread::Current()->IsExceptionPending()); + return NULL; // Failure + } + CHECK(klass->IsArrayClass()) << PrettyClass(klass); + } + if (access_check) { + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + return mirror::Array::Alloc(self, klass, component_count); +} + +extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Type of find field operation for fast and slow case. +enum FindFieldType { + InstanceObjectRead, + InstanceObjectWrite, + InstancePrimitiveRead, + InstancePrimitiveWrite, + StaticObjectRead, + StaticObjectWrite, + StaticPrimitiveRead, + StaticPrimitiveWrite, +}; + +// Slow field find that can initialize classes and may throw exceptions. +extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Fast path field resolution that can't initialize classes or throw exceptions. +static inline mirror::Field* FindFieldFast(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + FindFieldType type, size_t expected_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* resolved_field = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); + if (UNLIKELY(resolved_field == NULL)) { + return NULL; + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + // Check class is initiliazed or initializing. + if (UNLIKELY(!fields_class->IsInitializing())) { + return NULL; + } + // Check for incompatible class change. + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; + default: + LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. + is_primitive = true; + is_set = true; + is_static = true; + break; + } + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + // Incompatible class change. + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()) || + (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { + // Illegal access. + return NULL; + } + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + return NULL; + } + return resolved_field; +} + +// Fast path method resolution that can't throw exceptions. +static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer, + bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool is_direct = type == kStatic || type == kDirect; + if (UNLIKELY(this_object == NULL && !is_direct)) { + return NULL; + } + mirror::AbstractMethod* resolved_method = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); + if (UNLIKELY(resolved_method == NULL)) { + return NULL; + } + if (access_check) { + // Check for incompatible class change errors and access. + bool icce = resolved_method->CheckIncompatibleClassChange(type); + if (UNLIKELY(icce)) { + return NULL; + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // Potential illegal access, may need to refine the method's class. + return NULL; + } + } + if (type == kInterface) { // Most common form of slow path dispatch. + return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + } else if (is_direct) { + return resolved_method; + } else if (type == kSuper) { + return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> + Get(resolved_method->GetMethodIndex()); + } else { + DCHECK(type == kVirtual); + return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); + } +} + +extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, bool can_run_clinit, + bool verify_access) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); + if (UNLIKELY(klass == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + // Perform access check if necessary. + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referring_class, klass); + return NULL; // Failure - Indicate to caller to deliver exception + } + // If we're just implementing const-class, we shouldn't call . + if (!can_run_clinit) { + return klass; + } + // If we are the of this class, just return our storage. + // + // Do not set the DexCache InitializedStaticStorage, since that implies has finished + // running. + if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { + return klass; + } + if (!class_linker->EnsureInitialized(klass, true, true)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); + return klass; +} + +extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + return class_linker->ResolveString(string_idx, referrer); +} + +static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_) { + // Save any pending exception over monitor exit call. + mirror::Throwable* saved_exception = NULL; + ThrowLocation saved_throw_location; + if (UNLIKELY(self->IsExceptionPending())) { + saved_exception = self->GetException(&saved_throw_location); + self->ClearException(); + } + // Decode locked object and unlock, before popping local references. + self->DecodeJObject(locked)->MonitorExit(self); + if (UNLIKELY(self->IsExceptionPending())) { + LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" + << saved_exception->Dump() + << "\nEncountered second exception during implicit MonitorExit:\n" + << self->GetException(NULL)->Dump(); + } + // Restore pending exception. + if (saved_exception != NULL) { + self->SetException(saved_throw_location, saved_exception); + } +} + +static inline void CheckReferenceResult(mirror::Object* o, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (o == NULL) { + return; + } + mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); + if (o == kInvalidIndirectRefObject) { + JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); + } + // Make sure that the result is an instance of the type this method was expected to return. + mirror::Class* return_type = MethodHelper(m).GetReturnType(); + + if (!o->InstanceOf(return_type)) { + JniAbortF(NULL, "attempt to return an instance of %s from %s", + PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); + } +} + +static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (;;) { + if (thread->ReadFlag(kCheckpointRequest)) { + thread->RunCheckpointFunction(); + thread->AtomicClearFlag(kCheckpointRequest); + } else if (thread->ReadFlag(kSuspendRequest)) { + thread->FullSuspendCheck(); + } else { + break; + } + } +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Entry point for deoptimization. +static inline uintptr_t GetDeoptimizationEntryPoint() { + return reinterpret_cast(art_quick_deoptimize); +} + +// Return address of instrumentation stub. +static inline void* GetInstrumentationEntryPoint() { + return reinterpret_cast(art_quick_instrumentation_entry_from_code); +} + +// The return_pc of instrumentation exit stub. +static inline uintptr_t GetInstrumentationExitPc() { + return reinterpret_cast(art_quick_instrumentation_exit_from_code); +} + +// Return address of interpreter stub. +static inline void* GetInterpreterEntryPoint() { + return reinterpret_cast(art_quick_interpreter_entry); +} + +static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetPortableResolutionTrampoline(); +} + +static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetQuickResolutionTrampoline(); +} + +// Return address of resolution trampoline stub for defined compiler. +static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableResolutionTrampoline(class_linker); +#else + return GetQuickResolutionTrampoline(class_linker); +#endif +} + +static inline void* GetPortableAbstractMethodErrorStub() { + return reinterpret_cast(art_portable_abstract_method_error_stub); +} + +static inline void* GetQuickAbstractMethodErrorStub() { + return reinterpret_cast(art_quick_abstract_method_error_stub); +} + +// Return address of abstract method error stub for defined compiler. +static inline void* GetAbstractMethodErrorStub() { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableAbstractMethodErrorStub(); +#else + return GetQuickAbstractMethodErrorStub(); +#endif +} + +static inline void* GetJniDlsymLookupStub() { + return reinterpret_cast(art_jni_dlsym_lookup_stub); +} + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc new file mode 100644 index 0000000000..98f7b1283c --- /dev/null +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/logging.h" +#include "mirror/abstract_method.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +namespace art { + +// Used by the JNI dlsym stub to find the native method to invoke if none is registered. +extern "C" void* artFindNativeMethod(Thread* self) { + Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. + DCHECK(Thread::Current() == self); + ScopedObjectAccess soa(self); + + mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); + DCHECK(method != NULL); + + // Lookup symbol address for method, on failure we'll return NULL with an + // exception set, otherwise we return the address of the method we found. + void* native_code = soa.Vm()->FindCodeForNativeMethod(method); + if (native_code == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; + } else { + // Register so that future calls don't come here + method->RegisterNative(self, native_code); + return native_code; + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc new file mode 100644 index 0000000000..31d13c8cd5 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.cc @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +namespace art { + +extern "C" double art_l2d(int64_t l) { + return static_cast(l); +} + +extern "C" float art_l2f(int64_t l) { + return static_cast(l); +} + +/* + * Float/double conversion requires clamping to min and max of integer form. If + * target doesn't support this normally, use these. + */ +extern "C" int64_t art_d2l(double d) { + static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (d >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (d <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int64_t art_f2l(float f) { + static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (f >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (f <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +extern "C" int32_t art_d2i(double d) { + static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const double kMinInt = static_cast(static_cast(0x80000000UL)); + if (d >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (d <= kMinInt) { + return static_cast(0x80000000UL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int32_t art_f2i(float f) { + static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const float kMinInt = static_cast(static_cast(0x80000000UL)); + if (f >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (f <= kMinInt) { + return static_cast(0x80000000UL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.h b/runtime/entrypoints/math_entrypoints.h new file mode 100644 index 0000000000..717c7349bd --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ + +#include + +extern "C" double art_l2d(int64_t l); +extern "C" float art_l2f(int64_t l); +extern "C" int64_t art_d2l(double d); +extern "C" int32_t art_d2i(double d); +extern "C" int64_t art_f2l(float f); +extern "C" int32_t art_f2i(float f); + +#endif // ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc new file mode 100644 index 0000000000..ca8b931309 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints_test.cc @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +#include "common_test.h" +#include + +namespace art { + +class MathEntrypointsTest : public CommonTest {}; + +TEST_F(MathEntrypointsTest, DoubleToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); + EXPECT_EQ(0LL, art_d2l(0)); + EXPECT_EQ(1LL, art_d2l(1.0)); + EXPECT_EQ(10LL, art_d2l(10.0)); + EXPECT_EQ(100LL, art_d2l(100.0)); + EXPECT_EQ(-1LL, art_d2l(-1.0)); + EXPECT_EQ(-10LL, art_d2l(-10.0)); + EXPECT_EQ(-100LL, art_d2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); + EXPECT_EQ(0LL, art_f2l(0)); + EXPECT_EQ(1LL, art_f2l(1.0)); + EXPECT_EQ(10LL, art_f2l(10.0)); + EXPECT_EQ(100LL, art_f2l(100.0)); + EXPECT_EQ(-1LL, art_f2l(-1.0)); + EXPECT_EQ(-10LL, art_f2l(-10.0)); + EXPECT_EQ(-100LL, art_f2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, DoubleToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); + EXPECT_EQ(0L, art_d2i(0)); + EXPECT_EQ(1L, art_d2i(1.0)); + EXPECT_EQ(10L, art_d2i(10.0)); + EXPECT_EQ(100L, art_d2i(100.0)); + EXPECT_EQ(-1L, art_d2i(-1.0)); + EXPECT_EQ(-10L, art_d2i(-10.0)); + EXPECT_EQ(-100L, art_d2i(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); + EXPECT_EQ(0L, art_f2i(0)); + EXPECT_EQ(1L, art_f2i(1.0)); + EXPECT_EQ(10L, art_f2i(10.0)); + EXPECT_EQ(100L, art_f2i(100.0)); + EXPECT_EQ(-1L, art_f2i(-1.0)); + EXPECT_EQ(-10L, art_f2i(-10.0)); + EXPECT_EQ(-100L, art_f2i(-100.0)); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc new file mode 100644 index 0000000000..286926909c --- /dev/null +++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, false); +} + +extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, true); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, false); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, true); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_argument_visitor.h b/runtime/entrypoints/portable/portable_argument_visitor.h new file mode 100644 index 0000000000..f268baf790 --- /dev/null +++ b/runtime/entrypoints/portable/portable_argument_visitor.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ + +#include "object_utils.h" + +namespace art { + +// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. +class PortableArgumentVisitor { + public: +// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. +// Size of Runtime::kRefAndArgs callee save frame. +// Size of Method* and register parameters in out stack arguments. +#if defined(__arm__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 +#define PORTABLE_STACK_ARG_SKIP 0 +#elif defined(__mips__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 +#define PORTABLE_STACK_ARG_SKIP 16 +#elif defined(__i386__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 +#define PORTABLE_STACK_ARG_SKIP 4 +#else +#error "Unsupported architecture" +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 +#define PORTABLE_STACK_ARG_SKIP 0 +#endif + + PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + caller_mh_(caller_mh), + args_in_regs_(ComputeArgsInRegs(caller_mh)), + num_params_(caller_mh.NumArgs()), + reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), + stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE + + PORTABLE_STACK_ARG_SKIP), + cur_args_(reg_args_), + cur_arg_index_(0), + param_index_(0) { + } + + virtual ~PortableArgumentVisitor() {} + + virtual void Visit() = 0; + + bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamAReference(param_index_); + } + + bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamALongOrDouble(param_index_); + } + + Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.GetParamPrimitiveType(param_index_); + } + + byte* GetParamAddress() const { + return cur_args_ + (cur_arg_index_ * kPointerSize); + } + + void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ == 2) { + break; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + cur_args_ = stack_args_; + cur_arg_index_ = 0; + while (param_index_ < num_params_) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { + cur_arg_index_++; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + } + + private: + static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if (defined(__i386__)) + return 0; +#else + size_t args_in_regs = 0; + size_t num_params = mh.NumArgs(); + for (size_t i = 0; i < num_params; i++) { + args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + return args_in_regs; +#endif + } + MethodHelper& caller_mh_; + const size_t args_in_regs_; + const size_t num_params_; + byte* const reg_args_; + byte* const stack_args_; + byte* cur_args_; + size_t cur_arg_index_; + size_t param_index_; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc new file mode 100644 index 0000000000..d343c5dc1f --- /dev/null +++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_throws.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type != NULL); + DCHECK(src_type != NULL); + return dest_type->IsAssignableFrom(src_type) ? 1 : 0; +} + +extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); + DCHECK(src_type->IsClass()) << PrettyClass(src_type); + if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { + ThrowClassCastException(dest_type, src_type); + } +} + +extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element, + const mirror::Object* array) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (element == NULL) { + return; + } + DCHECK(array != NULL); + mirror::Class* array_class = array->GetClass(); + DCHECK(array_class != NULL); + mirror::Class* component_type = array_class->GetComponentType(); + mirror::Class* element_class = element->GetClass(); + if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { + ThrowArrayStoreException(element_class, array_class); + } +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc new file mode 100644 index 0000000000..bdab587797 --- /dev/null +++ b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when caller isn't guaranteed to have access to a type and the dex cache may be + // unpopulated + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); +} + +extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveStringFromCode(referrer, string_idx); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h new file mode 100644 index 0000000000..a229c76dbd --- /dev/null +++ b/runtime/entrypoints/portable/portable_entrypoints.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ + +#include "dex_file-inl.h" +#include "runtime.h" + +namespace art { +namespace mirror { + class AbstractMethod; + class Object; +} // namespace mirror +class Thread; + +#define PORTABLE_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(PortableEntryPoints, x))) + +// Pointers to functions that are called by code generated by compiler's adhering to the portable +// compiler ABI. +struct PACKED(4) PortableEntryPoints { + // Invocation + const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, + mirror::AbstractMethod**, Thread*); +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc new file mode 100644 index 0000000000..aa0f03ce8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, + referrer, + StaticPrimitiveWrite, + sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint32_t), + true); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint64_t), + true); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, uint32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + return 0; +} + +extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + return 0; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc new file mode 100644 index 0000000000..771608b604 --- /dev/null +++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method, + uint32_t dex_pc, + mirror::Array* array, + uint32_t payload_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = MethodHelper(method).GetCodeItem(); + const Instruction::ArrayDataPayload* payload = + reinterpret_cast(code_item->insns_ + payload_offset); + DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); + if (UNLIKELY(array == NULL)) { + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); + return; // Error + } + DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); + if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count - 1); + return; // Error + } + uint32_t size_in_bytes = payload->element_count * payload->element_width; + memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc new file mode 100644 index 0000000000..5911ba3d8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + bool access_check, + InvokeType type, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = FindMethodFast(method_idx, + this_object, + caller_method, + access_check, + type); + if (UNLIKELY(method == NULL)) { + method = FindMethodFromCode(method_idx, this_object, caller_method, + thread, access_check, type); + if (UNLIKELY(method == NULL)) { + CHECK(thread->IsExceptionPending()); + return 0; // failure + } + } + DCHECK(!thread->IsExceptionPending()); + const void* code = method->GetEntryPointFromCompiledCode(); + + // When we return, the caller will branch to this address, so it had better not be 0! + if (UNLIKELY(code == NULL)) { + MethodHelper mh(method); + LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) + << " location: " << mh.GetDexFile().GetLocation(); + } + return method; +} + +extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); +} + +extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); +} + +extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); +} + +extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc new file mode 100644 index 0000000000..8df16ae931 --- /dev/null +++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "thread-inl.h" + +namespace art { + +// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. +extern "C" uint32_t art_portable_jni_method_start(Thread* self) + UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { + JNIEnvExt* env = self->GetJniEnv(); + uint32_t saved_local_ref_cookie = env->local_ref_cookie; + env->local_ref_cookie = env->locals.GetSegmentState(); + self->TransitionFromRunnableToSuspended(kNative); + return saved_local_ref_cookie; +} + +extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) { + self->DecodeJObject(to_lock)->MonitorEnter(self); + return art_portable_jni_method_start(self); +} + +static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + env->locals.SetSegmentState(env->local_ref_cookie); + env->local_ref_cookie = saved_local_ref_cookie; +} + +extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + PopLocalReferences(saved_local_ref_cookie, self); +} + + +extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result, + uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc new file mode 100644 index 0000000000..44d3da9897 --- /dev/null +++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + obj->MonitorEnter(thread); // May block. + DCHECK(thread->HoldsLock(obj)); + // Only possible exception is NPE and is handled before entry. + DCHECK(!thread->IsExceptionPending()); +} + +extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread) + UNLOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + // MonitorExit may throw exception. + obj->MonitorExit(thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_proxy_entrypoints.cc b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc new file mode 100644 index 0000000000..3db39cd0bd --- /dev/null +++ b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "portable_argument_visitor.h" +#include "scoped_thread_state_change.h" + +namespace art { + +// Visits arguments on the stack placing them into the args vector, Object* arguments are converted +// to jobjects. +class BuildPortableArgumentVisitor : public PortableArgumentVisitor { + public: + BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, + ScopedObjectAccessUnchecked& soa, std::vector& args) : + PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jvalue val; + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimNot: { + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); + val.l = soa_.AddLocalReference(obj); + break; + } + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + val.j = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + val.i = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + val.j = 0; + break; + } + args_.push_back(val); + } + + private: + ScopedObjectAccessUnchecked& soa_; + std::vector& args_; + + DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); +}; + +// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method +// which is responsible for recording callee save registers. We explicitly place into jobjects the +// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a +// field within the proxy object, which will box the primitive arguments and deal with error cases. +extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in jobjects. + const char* old_cause = + self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); + self->VerifyStack(); + // Start new JNI local reference state. + JNIEnvExt* env = self->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + // Create local ref. copies of proxy method and the receiver. + jobject rcvr_jobj = soa.AddLocalReference(receiver); + + // Placing arguments into args vector and remove the receiver. + MethodHelper proxy_mh(proxy_method); + std::vector args; + BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); + local_ref_visitor.VisitArguments(); + args.erase(args.begin()); + + // Convert proxy method into expected interface method. + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + DCHECK(interface_method != NULL); + DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); + jobject interface_method_jobj = soa.AddLocalReference(interface_method); + + // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code + // that performs allocations. + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), + rcvr_jobj, interface_method_jobj, args); + return result.GetJ(); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_stub_entrypoints.cc b/runtime/entrypoints/portable/portable_stub_entrypoints.cc new file mode 100644 index 0000000000..c510c653ba --- /dev/null +++ b/runtime/entrypoints/portable/portable_stub_entrypoints.cc @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +// Lazily resolve a method for portable. Called by stub code. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** called_addr, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t dex_pc; + mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); + + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + InvokeType invoke_type; + bool is_range; + if (called->IsRuntimeMethod()) { + const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + switch (instr_code) { + case Instruction::INVOKE_DIRECT: + invoke_type = kDirect; + is_range = false; + break; + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + is_range = true; + break; + case Instruction::INVOKE_STATIC: + invoke_type = kStatic; + is_range = false; + break; + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + is_range = true; + break; + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + is_range = false; + break; + case Instruction::INVOKE_SUPER_RANGE: + invoke_type = kSuper; + is_range = true; + break; + case Instruction::INVOKE_VIRTUAL: + invoke_type = kVirtual; + is_range = false; + break; + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + is_range = true; + break; + case Instruction::INVOKE_INTERFACE: + invoke_type = kInterface; + is_range = false; + break; + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + is_range = true; + break; + default: + LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); + // Avoid used uninitialized warnings. + invoke_type = kDirect; + is_range = true; + } + uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); + called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + // Refine called method based on receiver. + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else if (invoke_type == kInterface) { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } + } else { + CHECK(called->IsStatic()) << PrettyMethod(called); + invoke_type = kStatic; + } + const void* code = NULL; + if (LIKELY(!thread->IsExceptionPending())) { + // Incompatible class change should have been handled in resolve method. + CHECK(!called->CheckIncompatibleClassChange(invoke_type)); + // Ensure that the called method's class is initialized. + mirror::Class* called_class = called->GetDeclaringClass(); + linker->EnsureInitialized(called_class, true, true); + if (LIKELY(called_class->IsInitialized())) { + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } else if (called_class->IsInitializing()) { + if (invoke_type == kStatic) { + // Class is still initializing, go to oat and grab code (trampoline must be left in place + // until class is initialized to stop races between threads). + code = linker->GetOatCodeFor(called); + } else { + // No trampoline for non-static methods. + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } + } else { + DCHECK(called_class->IsErroneous()); + } + } + if (LIKELY(code != NULL)) { + // Expect class to at least be initializing. + DCHECK(called->GetDeclaringClass()->IsInitializing()); + // Don't want infinite recursion. + DCHECK(code != GetResolutionTrampoline(linker)); + // Set up entry into main method + *called_addr = called; + } + return code; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc new file mode 100644 index 0000000000..dac73885a5 --- /dev/null +++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "verifier/dex_gc_map.h" +#include "stack.h" + +namespace art { + +class ShadowFrameCopyVisitor : public StackVisitor { + public: + explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), + top_frame_(NULL) {} + + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsShadowFrame()) { + ShadowFrame* cur_frame = GetCurrentShadowFrame(); + size_t num_regs = cur_frame->NumberOfVRegs(); + mirror::AbstractMethod* method = cur_frame->GetMethod(); + uint32_t dex_pc = cur_frame->GetDexPC(); + ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); + + const uint8_t* gc_map = method->GetNativeGcMap(); + uint32_t gc_map_length = static_cast((gc_map[0] << 24) | + (gc_map[1] << 16) | + (gc_map[2] << 8) | + (gc_map[3] << 0)); + verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); + const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); + for (size_t reg = 0; reg < num_regs; ++reg) { + if (TestBitmap(reg, reg_bitmap)) { + new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); + } else { + new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); + } + } + + if (prev_frame_ != NULL) { + prev_frame_->SetLink(new_frame); + } else { + top_frame_ = new_frame; + } + prev_frame_ = new_frame; + } + return true; + } + + ShadowFrame* GetShadowFrameCopy() { + return top_frame_; + } + + private: + static bool TestBitmap(int reg, const uint8_t* reg_vector) { + return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; + } + + ShadowFrame* prev_frame_; + ShadowFrame* top_frame_; +}; + +extern "C" void art_portable_test_suspend_from_code(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckSuspend(self); + if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { + // Save out the shadow frame to the heap + ShadowFrameCopyVisitor visitor(self); + visitor.WalkStack(true); + self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); + self->SetDeoptimizationReturnValue(JValue()); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); + } +} + +extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, + ShadowFrame* new_shadow_frame, + mirror::AbstractMethod* method, + uint32_t num_vregs) { + ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); + new_shadow_frame->SetMethod(method); + new_shadow_frame->SetNumberOfVRegs(num_vregs); + return old_frame; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc new file mode 100644 index 0000000000..4b2b46b25f --- /dev/null +++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArithmeticExceptionDivideByZero(); +} + +extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArrayIndexOutOfBoundsException(index, length); +} + +extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowNoSuchMethodError(method_idx); +} + +extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // TODO: remove dex_pc argument from caller. + UNUSED(dex_pc); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); +} + +extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowStackOverflowError(Thread::Current()); +} + +extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + ThrowNullPointerException(NULL, "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } +} + +extern "C" void* art_portable_get_and_clear_exception(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(self->IsExceptionPending()); + // TODO: make this inline. + mirror::Throwable* exception = self->GetException(NULL); + self->ClearException(); + return exception; +} + +extern "C" int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method, + uint32_t ti_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); // TODO: make an argument. + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + // Check for special deoptimization exception. + if (UNLIKELY(reinterpret_cast(exception) == -1)) { + return -1; + } + mirror::Class* exception_type = exception->GetClass(); + MethodHelper mh(current_method); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + DCHECK_LT(ti_offset, code_item->tries_size_); + const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); + + int iter_index = 0; + int result = -1; + uint32_t catch_dex_pc = -1; + // Iterate over the catch handlers associated with dex_pc + for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { + uint16_t iter_type_idx = it.GetHandlerTypeIndex(); + // Catch all case + if (iter_type_idx == DexFile::kDexNoIndex16) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + // Does this catch exception type apply? + mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + if (UNLIKELY(iter_exception_type == NULL)) { + // TODO: check, the verifier (class linker?) should take care of resolving all exception + // classes early. + LOG(WARNING) << "Unresolved exception class when finding catch block: " + << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); + } else if (iter_exception_type->IsAssignableFrom(exception_type)) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + ++iter_index; + } + if (result != -1) { + // Handler found. + Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, + throw_location, + current_method, + catch_dex_pc, + exception); + } + return result; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index f66fc848d5..9ed802a2bb 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -15,11 +15,11 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h index 4f81151cd1..35fa97269c 100644 --- a/runtime/entrypoints/quick/quick_argument_visitor.h +++ b/runtime/entrypoints/quick/quick_argument_visitor.h @@ -21,116 +21,6 @@ namespace art { -// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. -class PortableArgumentVisitor { - public: -// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. -// Size of Runtime::kRefAndArgs callee save frame. -// Size of Method* and register parameters in out stack arguments. -#if defined(__arm__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 -#define PORTABLE_STACK_ARG_SKIP 0 -#elif defined(__mips__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 -#define PORTABLE_STACK_ARG_SKIP 16 -#elif defined(__i386__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 -#define PORTABLE_STACK_ARG_SKIP 4 -#else -#error "Unsupported architecture" -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 -#define PORTABLE_STACK_ARG_SKIP 0 -#endif - - PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : - caller_mh_(caller_mh), - args_in_regs_(ComputeArgsInRegs(caller_mh)), - num_params_(caller_mh.NumArgs()), - reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), - stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE - + PORTABLE_STACK_ARG_SKIP), - cur_args_(reg_args_), - cur_arg_index_(0), - param_index_(0) { - } - - virtual ~PortableArgumentVisitor() {} - - virtual void Visit() = 0; - - bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamAReference(param_index_); - } - - bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamALongOrDouble(param_index_); - } - - Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.GetParamPrimitiveType(param_index_); - } - - byte* GetParamAddress() const { - return cur_args_ + (cur_arg_index_ * kPointerSize); - } - - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ == 2) { - break; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - cur_args_ = stack_args_; - cur_arg_index_ = 0; - while (param_index_ < num_params_) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { - cur_arg_index_++; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - } - - private: - static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if (defined(__i386__)) - return 0; -#else - size_t args_in_regs = 0; - size_t num_params = mh.NumArgs(); - for (size_t i = 0; i < num_params; i++) { - args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - return args_in_regs; -#endif - } - MethodHelper& caller_mh_; - const size_t args_in_regs_; - const size_t num_params_; - byte* const reg_args_; - byte* const stack_args_; - byte* cur_args_; - size_t cur_arg_index_; - size_t param_index_; -}; - // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. class QuickArgumentVisitor { public: diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc index fe91e617bb..b810bb70a6 100644 --- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -15,10 +15,10 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index 0af7a6281d..6400161b3e 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -15,13 +15,13 @@ */ #include "callee_save_frame.h" -#include "gc/accounting/card_table-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index 8692e9267e..74b8cfd09b 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -20,15 +20,15 @@ #include "dex_file-inl.h" #include "runtime.h" -#define ENTRYPOINT_OFFSET(x) \ - (static_cast(OFFSETOF_MEMBER(Thread, entrypoints_)) + \ +#define QUICK_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \ static_cast(OFFSETOF_MEMBER(QuickEntryPoints, x))) namespace art { namespace mirror { -class AbstractMethod; -class Class; -class Object; + class AbstractMethod; + class Class; + class Object; } // namespace mirror class DvmDex; class MethodHelper; @@ -123,8 +123,6 @@ struct PACKED(4) QuickEntryPoints { void* (*pMemcpy)(void*, const void*, size_t); // Invocation - const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, mirror::AbstractMethod**, Thread*); void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); @@ -167,9 +165,6 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, jobject locked, Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -// Initialize an entry point data structure, architecture specific. -void InitEntryPoints(QuickEntryPoints* points); - } // namespace art #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index c20326c63e..a4e9dc9b27 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -16,10 +16,10 @@ #include "callee_save_frame.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/abstract_method-inl.h" #include "mirror/class-inl.h" #include "mirror/field-inl.h" -#include "runtime_support.h" #include diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index a0b06fb521..b81ad12b7b 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -52,7 +52,7 @@ extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, ThrowLocation throw_location = self->GetCurrentLocationForThrow(); self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", "failed FILL_ARRAY_DATA; length=%d, index=%d", - array->GetLength(), payload->element_count); + array->GetLength(), payload->element_count - 1); return -1; // Error } uint32_t size_in_bytes = payload->element_count * payload->element_width; diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc index 6a95f3c8ff..53b3628e2f 100644 --- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc @@ -16,12 +16,12 @@ #include "callee_save_frame.h" #include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 2d31160a4b..23a28f9cce 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -15,13 +15,13 @@ */ #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "object_utils.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc index e4ef45fdde..4e3d749e27 100644 --- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc @@ -16,12 +16,12 @@ #include "quick_argument_visitor.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "object_utils.h" #include "reflection.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "well_known_classes.h" @@ -30,50 +30,6 @@ namespace art { -// Visits arguments on the stack placing them into the args vector, Object* arguments are converted -// to jobjects. -class BuildPortableArgumentVisitor : public PortableArgumentVisitor { - public: - BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ScopedObjectAccessUnchecked& soa, std::vector& args) : - PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - jvalue val; - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimNot: { - mirror::Object* obj = *reinterpret_cast(GetParamAddress()); - val.l = soa_.AddLocalReference(obj); - break; - } - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - val.j = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - val.i = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args_.push_back(val); - } - - private: - ScopedObjectAccessUnchecked& soa_; - std::vector& args_; - - DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); -}; - // Visits arguments on the stack placing them into the args vector, Object* arguments are converted // to jobjects. class BuildQuickArgumentVisitor : public QuickArgumentVisitor { @@ -122,46 +78,6 @@ class BuildQuickArgumentVisitor : public QuickArgumentVisitor { DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); }; -// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method -// which is responsible for recording callee save registers. We explicitly place into jobjects the -// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a -// field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, - mirror::Object* receiver, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Placing arguments into args vector and remove the receiver. - MethodHelper proxy_mh(proxy_method); - std::vector args; - BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); - local_ref_visitor.VisitArguments(); - args.erase(args.begin()); - - // Convert proxy method into expected interface method. - mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code - // that performs allocations. - self->EndAssertNoThreadSuspension(old_cause); - JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), - rcvr_jobj, interface_method_jobj, args); - return result.GetJ(); -} - // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method // which is responsible for recording callee save registers. We explicitly place into jobjects the // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc index f2af6d28dc..d78bbf3bc8 100644 --- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_stub_entrypoints.cc @@ -30,127 +30,6 @@ extern "C" void art_quick_deliver_exception_from_code(void*); namespace art { -// Lazily resolve a method for portable. Called by stub code. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** called_addr, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t dex_pc; - mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); - - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - InvokeType invoke_type; - bool is_range; - if (called->IsRuntimeMethod()) { - const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - switch (instr_code) { - case Instruction::INVOKE_DIRECT: - invoke_type = kDirect; - is_range = false; - break; - case Instruction::INVOKE_DIRECT_RANGE: - invoke_type = kDirect; - is_range = true; - break; - case Instruction::INVOKE_STATIC: - invoke_type = kStatic; - is_range = false; - break; - case Instruction::INVOKE_STATIC_RANGE: - invoke_type = kStatic; - is_range = true; - break; - case Instruction::INVOKE_SUPER: - invoke_type = kSuper; - is_range = false; - break; - case Instruction::INVOKE_SUPER_RANGE: - invoke_type = kSuper; - is_range = true; - break; - case Instruction::INVOKE_VIRTUAL: - invoke_type = kVirtual; - is_range = false; - break; - case Instruction::INVOKE_VIRTUAL_RANGE: - invoke_type = kVirtual; - is_range = true; - break; - case Instruction::INVOKE_INTERFACE: - invoke_type = kInterface; - is_range = false; - break; - case Instruction::INVOKE_INTERFACE_RANGE: - invoke_type = kInterface; - is_range = true; - break; - default: - LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); - // Avoid used uninitialized warnings. - invoke_type = kDirect; - is_range = true; - } - uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); - called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - } else { - CHECK(called->IsStatic()) << PrettyMethod(called); - invoke_type = kStatic; - } - const void* code = NULL; - if (LIKELY(!thread->IsExceptionPending())) { - // Incompatible class change should have been handled in resolve method. - CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Ensure that the called method's class is initialized. - mirror::Class* called_class = called->GetDeclaringClass(); - linker->EnsureInitialized(called_class, true, true); - if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { - // Class is still initializing, go to oat and grab code (trampoline must be left in place - // until class is initialized to stop races between threads). - code = linker->GetOatCodeFor(called); - } else { - // No trampoline for non-static methods. - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } - } else { - DCHECK(called_class->IsErroneous()); - } - } - if (LIKELY(code != NULL)) { - // Expect class to at least be initializing. - DCHECK(called->GetDeclaringClass()->IsInitializing()); - // Don't want infinite recursion. - DCHECK(code != GetResolutionTrampoline(linker)); - // Set up entry into main method - *called_addr = called; - } - return code; -} - // Lazily resolve a method for quick. Called by stub code. extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, mirror::Object* receiver, @@ -413,26 +292,4 @@ extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* meth self->QuickDeliverException(); } -// Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern "C" void* artFindNativeMethod(Thread* self) { - Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. - DCHECK(Thread::Current() == self); - ScopedObjectAccess soa(self); - - mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); - DCHECK(method != NULL); - - // Lookup symbol address for method, on failure we'll return NULL with an - // exception set, otherwise we return the address of the method we found. - void* native_code = soa.Vm()->FindCodeForNativeMethod(method); - if (native_code == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; - } else { - // Register so that future calls don't come here - method->RegisterNative(self, native_code); - return native_code; - } -} - } // namespace art diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc index e7117147a9..b4d6c0ba8d 100644 --- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -15,7 +15,7 @@ */ #include "callee_save_frame.h" -#include "runtime_support.h" +#include "entrypoints/entrypoint_utils.h" #include "thread.h" #include "thread_list.h" diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 9588698bb2..3bfa2f2611 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -15,9 +15,9 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/object.h" #include "object_utils.h" -#include "runtime_support.h" #include "thread.h" #include "well_known_classes.h" diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 37c45fa6ec..ef4b95c037 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -24,6 +24,7 @@ #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/accounting/card_table-inl.h" #include "invoke_arg_array_builder.h" #include "nth_caller_visitor.h" @@ -35,7 +36,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "object_utils.h" -#include "runtime_support.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h index 2df1367637..d235e3eed8 100644 --- a/runtime/mirror/abstract_method-inl.h +++ b/runtime/mirror/abstract_method-inl.h @@ -20,9 +20,9 @@ #include "abstract_method.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "object_array.h" #include "runtime.h" -#include "runtime_support.h" namespace art { namespace mirror { diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 53a1df95a6..540ff9f68e 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -26,6 +26,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "field-inl.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -33,7 +34,6 @@ #include "abstract_method-inl.h" #include "object-inl.h" #include "object_array-inl.h" -#include "runtime_support.h" #include "sirt_ref.h" #include "UniquePtr.h" diff --git a/runtime/runtime_support.cc b/runtime/runtime_support.cc deleted file mode 100644 index d28aad1e8f..0000000000 --- a/runtime/runtime_support.cc +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "gc/accounting/card_table-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/proxy.h" -#include "reflection.h" -#include "scoped_thread_state_change.h" -#include "ScopedLocalRef.h" -#include "well_known_classes.h" - -double art_l2d(int64_t l) { - return static_cast(l); -} - -float art_l2f(int64_t l) { - return static_cast(l); -} - -/* - * Float/double conversion requires clamping to min and max of integer form. If - * target doesn't support this normally, use these. - */ -int64_t art_d2l(double d) { - static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (d >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (d <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int64_t art_f2l(float f) { - static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (f >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (f <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -int32_t art_d2i(double d) { - static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const double kMinInt = static_cast(static_cast(0x80000000UL)); - if (d >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (d <= kMinInt) { - return static_cast(0x80000000UL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int32_t art_f2i(float f) { - static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const float kMinInt = static_cast(static_cast(0x80000000UL)); - if (f >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (f <= kMinInt) { - return static_cast(0x80000000UL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -namespace art { - -// Helper function to allocate array for FILLED_NEW_ARRAY. -mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, - int32_t component_count, Thread* self, - bool access_check) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); - if (klass == NULL) { // Error - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { - if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { - ThrowRuntimeException("Bad filled array request for type %s", - PrettyDescriptor(klass).c_str()); - } else { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", - "Found type %s; filled-new-array not implemented for anything but \'int\'", - PrettyDescriptor(klass).c_str()); - } - return NULL; // Failure - } else { - if (access_check) { - mirror::Class* referrer_klass = referrer->GetDeclaringClass(); - if (UNLIKELY(!referrer_klass->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer_klass, klass); - return NULL; // Failure - } - } - DCHECK(klass->IsArrayClass()) << PrettyClass(klass); - return mirror::Array::Alloc(self, klass, component_count); - } -} - -mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) { - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. - default: is_primitive = true; is_set = true; is_static = true; break; - } - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); - if (UNLIKELY(resolved_field == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - if (access_check) { - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - // The referring class can't access the resolved field, this may occur as a result of a - // protected field being made public by a sub-class. Resort to the dex file to determine - // the correct class for the access check. - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - fields_class = class_linker->ResolveType(dex_file, - dex_file.GetFieldId(field_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(fields_class))) { - ThrowIllegalAccessErrorClass(referring_class, fields_class); - return NULL; // failure - } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - ThrowIllegalAccessErrorField(referring_class, resolved_field); - return NULL; // failure - } - } - if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { - ThrowIllegalAccessErrorFinalField(referrer, resolved_field); - return NULL; // failure - } else { - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", - "Attempted read of %zd-bit %s on field '%s'", - expected_size * (32 / sizeof(int32_t)), - is_primitive ? "primitive" : "non-primitive", - PrettyField(resolved_field, true).c_str()); - return NULL; // failure - } - } - } - if (!is_static) { - // instance fields must be being accessed on an initialized class - return resolved_field; - } else { - // If the class is initialized we're done. - if (fields_class->IsInitialized()) { - return resolved_field; - } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { - // Otherwise let's ensure the class is initialized before resolving the field. - return resolved_field; - } else { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind - return NULL; // failure - } - } -} - -// Slow path method resolution -mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - bool is_direct = type == kStatic || type == kDirect; - mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); - if (UNLIKELY(resolved_method == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } else if (UNLIKELY(this_object == NULL && type != kStatic)) { - // Maintain interpreter-like semantics where NullPointerException is thrown - // after potential NoSuchMethodError from class linker. - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(referrer == throw_location.GetMethod()); - ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); - return NULL; // Failure. - } else { - if (!access_check) { - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); - } else { - vtable = this_object->GetClass()->GetVTable(); - } - // TODO: eliminate bounds check? - return vtable->Get(vtable_index); - } - } else { - // Incompatible class change should have been handled in resolve method. - if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { - ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, - referrer); - return NULL; // Failure. - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // The referring class can't access the resolved method, this may occur as a result of a - // protected method being made public by implementing an interface that re-declares the - // method public. Resort to the dex file to determine the correct class for the access check - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - methods_class = class_linker->ResolveType(dex_file, - dex_file.GetMethodId(method_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(methods_class))) { - ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, - referrer, resolved_method, type); - return NULL; // Failure. - } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - ThrowIllegalAccessErrorMethod(referring_class, resolved_method); - return NULL; // Failure. - } - } - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - mirror::Class* super_class = referring_class->GetSuperClass(); - if (LIKELY(super_class != NULL)) { - vtable = referring_class->GetSuperClass()->GetVTable(); - } else { - vtable = NULL; - } - } else { - vtable = this_object->GetClass()->GetVTable(); - } - if (LIKELY(vtable != NULL && - vtable_index < static_cast(vtable->GetLength()))) { - return vtable->GetWithoutChecks(vtable_index); - } else { - // Behavior to agree with that of the verifier. - MethodHelper mh(resolved_method); - ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), - mh.GetSignature()); - return NULL; // Failure. - } - } - } - } -} - -void ThrowStackOverflowError(Thread* self) { - CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; - - if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { - // Remove extra entry pushed onto second stack during method tracing. - Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); - } - - self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. - JNIEnvExt* env = self->GetJniEnv(); - std::string msg("stack size "); - msg += PrettySize(self->GetStackSize()); - // Use low-level JNI routine and pre-baked error class to avoid class linking operations that - // would consume more stack. - int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, - msg.c_str(), NULL); - if (rc != JNI_OK) { - // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME - // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError - // instead. - LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; - CHECK(self->IsExceptionPending()); - } - self->ResetDefaultStackEnd(); // Return to default stack size. -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) { - DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); - - // Build argument array possibly triggering GC. - soa.Self()->AssertThreadSuspensionIsAllowable(); - jobjectArray args_jobj = NULL; - const JValue zero; - if (args.size() > 0) { - args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); - if (args_jobj == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - for (size_t i = 0; i < args.size(); ++i) { - if (shorty[i + 1] == 'L') { - jobject val = args.at(i).l; - soa.Env()->SetObjectArrayElement(args_jobj, i, val); - } else { - JValue jv; - jv.SetJ(args.at(i).j); - mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); - if (val == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - soa.Decode* >(args_jobj)->Set(i, val); - } - } - } - - // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). - jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, - WellKnownClasses::java_lang_reflect_Proxy_h); - jvalue invocation_args[3]; - invocation_args[0].l = rcvr_jobj; - invocation_args[1].l = interface_method_jobj; - invocation_args[2].l = args_jobj; - jobject result = - soa.Env()->CallObjectMethodA(inv_hand, - WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, - invocation_args); - - // Unbox result and handle error conditions. - if (LIKELY(!soa.Self()->IsExceptionPending())) { - if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { - // Do nothing. - return zero; - } else { - mirror::Object* result_ref = soa.Decode(result); - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); - mirror::AbstractMethod* proxy_method; - if (interface_method->GetDeclaringClass()->IsInterface()) { - proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - } else { - // Proxy dispatch to a method defined in Object. - DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); - proxy_method = interface_method; - } - ThrowLocation throw_location(rcvr, proxy_method, -1); - JValue result_unboxed; - if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { - DCHECK(soa.Self()->IsExceptionPending()); - return zero; - } - return result_unboxed; - } - } else { - // In the case of checked exceptions that aren't declared, the exception must be wrapped by - // a UndeclaredThrowableException. - mirror::Throwable* exception = soa.Self()->GetException(NULL); - if (exception->IsCheckedException()) { - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::SynthesizedProxyClass* proxy_class = - down_cast(rcvr->GetClass()); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::AbstractMethod* proxy_method = - rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - int throws_index = -1; - size_t num_virt_methods = proxy_class->NumVirtualMethods(); - for (size_t i = 0; i < num_virt_methods; i++) { - if (proxy_class->GetVirtualMethod(i) == proxy_method) { - throws_index = i; - break; - } - } - CHECK_NE(throws_index, -1); - mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - mirror::Class* exception_class = exception->GetClass(); - bool declares_exception = false; - for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { - mirror::Class* declared_exception = declared_exceptions->Get(i); - declares_exception = declared_exception->IsAssignableFrom(exception_class); - } - if (!declares_exception) { - ThrowLocation throw_location(rcvr, proxy_method, -1); - soa.Self()->ThrowNewWrappedException(throw_location, - "Ljava/lang/reflect/UndeclaredThrowableException;", - NULL); - } - } - return zero; - } -} - -} // namespace art diff --git a/runtime/runtime_support.h b/runtime/runtime_support.h deleted file mode 100644 index 43c678428b..0000000000 --- a/runtime/runtime_support.h +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_H_ - -#include "class_linker.h" -#include "common_throws.h" -#include "dex_file.h" -#include "indirect_reference_table.h" -#include "invoke_type.h" -#include "jni_internal.h" -#include "mirror/abstract_method.h" -#include "mirror/array.h" -#include "mirror/class-inl.h" -#include "mirror/throwable.h" -#include "object_utils.h" -#include "thread.h" - -extern "C" void art_interpreter_invoke_handler(); -extern "C" void art_jni_dlsym_lookup_stub(); -extern "C" void art_portable_abstract_method_error_stub(); -extern "C" void art_portable_proxy_invoke_handler(); -extern "C" void art_quick_abstract_method_error_stub(); -extern "C" void art_quick_deoptimize(); -extern "C" void art_quick_instrumentation_entry_from_code(void*); -extern "C" void art_quick_instrumentation_exit_from_code(); -extern "C" void art_quick_interpreter_entry(void*); -extern "C" void art_quick_proxy_invoke_handler(); -extern "C" void art_work_around_app_jni_bugs(); - -extern "C" double art_l2d(int64_t l); -extern "C" float art_l2f(int64_t l); -extern "C" int64_t art_d2l(double d); -extern "C" int32_t art_d2i(double d); -extern "C" int64_t art_f2l(float f); -extern "C" int32_t art_f2i(float f); - -namespace art { -namespace mirror { -class Class; -class Field; -class Object; -} - -// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it -// cannot be resolved, throw an error. If it can, use it to create an instance. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - Thread* self, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - Runtime* runtime = Runtime::Current(); - if (UNLIKELY(klass == NULL)) { - klass = runtime->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (access_check) { - if (UNLIKELY(!klass->IsInstantiable())) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", - PrettyDescriptor(klass).c_str()); - return NULL; // Failure - } - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - if (!klass->IsInitialized() && - !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - return klass->AllocObject(self); -} - -// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If -// it cannot be resolved, throw an error. If it can, use it to create an array. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { // Error - DCHECK(Thread::Current()->IsExceptionPending()); - return NULL; // Failure - } - CHECK(klass->IsArrayClass()) << PrettyClass(klass); - } - if (access_check) { - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - return mirror::Array::Alloc(self, klass, component_count); -} - -extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Type of find field operation for fast and slow case. -enum FindFieldType { - InstanceObjectRead, - InstanceObjectWrite, - InstancePrimitiveRead, - InstancePrimitiveWrite, - StaticObjectRead, - StaticObjectWrite, - StaticPrimitiveRead, - StaticPrimitiveWrite, -}; - -// Slow field find that can initialize classes and may throw exceptions. -extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Fast path field resolution that can't initialize classes or throw exceptions. -static inline mirror::Field* FindFieldFast(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* resolved_field = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); - if (UNLIKELY(resolved_field == NULL)) { - return NULL; - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - // Check class is initiliazed or initializing. - if (UNLIKELY(!fields_class->IsInitializing())) { - return NULL; - } - // Check for incompatible class change. - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; - default: - LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. - is_primitive = true; - is_set = true; - is_static = true; - break; - } - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - // Incompatible class change. - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()) || - (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { - // Illegal access. - return NULL; - } - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - return NULL; - } - return resolved_field; -} - -// Fast path method resolution that can't throw exceptions. -static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, - mirror::Object* this_object, - const mirror::AbstractMethod* referrer, - bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - bool is_direct = type == kStatic || type == kDirect; - if (UNLIKELY(this_object == NULL && !is_direct)) { - return NULL; - } - mirror::AbstractMethod* resolved_method = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); - if (UNLIKELY(resolved_method == NULL)) { - return NULL; - } - if (access_check) { - // Check for incompatible class change errors and access. - bool icce = resolved_method->CheckIncompatibleClassChange(type); - if (UNLIKELY(icce)) { - return NULL; - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // Potential illegal access, may need to refine the method's class. - return NULL; - } - } - if (type == kInterface) { // Most common form of slow path dispatch. - return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - } else if (is_direct) { - return resolved_method; - } else if (type == kSuper) { - return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> - Get(resolved_method->GetMethodIndex()); - } else { - DCHECK(type == kVirtual); - return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); - } -} - -extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, bool can_run_clinit, - bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); - if (UNLIKELY(klass == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - // Perform access check if necessary. - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referring_class, klass); - return NULL; // Failure - Indicate to caller to deliver exception - } - // If we're just implementing const-class, we shouldn't call . - if (!can_run_clinit) { - return klass; - } - // If we are the of this class, just return our storage. - // - // Do not set the DexCache InitializedStaticStorage, since that implies has finished - // running. - if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { - return klass; - } - if (!class_linker->EnsureInitialized(klass, true, true)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); - return klass; -} - -extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, - uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - return class_linker->ResolveString(string_idx, referrer); -} - -static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_) { - // Save any pending exception over monitor exit call. - mirror::Throwable* saved_exception = NULL; - ThrowLocation saved_throw_location; - if (UNLIKELY(self->IsExceptionPending())) { - saved_exception = self->GetException(&saved_throw_location); - self->ClearException(); - } - // Decode locked object and unlock, before popping local references. - self->DecodeJObject(locked)->MonitorExit(self); - if (UNLIKELY(self->IsExceptionPending())) { - LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" - << saved_exception->Dump() - << "\nEncountered second exception during implicit MonitorExit:\n" - << self->GetException(NULL)->Dump(); - } - // Restore pending exception. - if (saved_exception != NULL) { - self->SetException(saved_throw_location, saved_exception); - } -} - -static inline void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (o == NULL) { - return; - } - mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); - if (o == kInvalidIndirectRefObject) { - JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); - } - // Make sure that the result is an instance of the type this method was expected to return. - mirror::Class* return_type = MethodHelper(m).GetReturnType(); - - if (!o->InstanceOf(return_type)) { - JniAbortF(NULL, "attempt to return an instance of %s from %s", - PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); - } -} - -static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (;;) { - if (thread->ReadFlag(kCheckpointRequest)) { - thread->RunCheckpointFunction(); - thread->AtomicClearFlag(kCheckpointRequest); - } else if (thread->ReadFlag(kSuspendRequest)) { - thread->FullSuspendCheck(); - } else { - break; - } - } -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Entry point for deoptimization. -static inline uintptr_t GetDeoptimizationEntryPoint() { - return reinterpret_cast(art_quick_deoptimize); -} - -// Return address of instrumentation stub. -static inline void* GetInstrumentationEntryPoint() { - return reinterpret_cast(art_quick_instrumentation_entry_from_code); -} - -// The return_pc of instrumentation exit stub. -static inline uintptr_t GetInstrumentationExitPc() { - return reinterpret_cast(art_quick_instrumentation_exit_from_code); -} - -// Return address of interpreter stub. -static inline void* GetInterpreterEntryPoint() { - return reinterpret_cast(art_quick_interpreter_entry); -} - -static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetPortableResolutionTrampoline(); -} - -static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetQuickResolutionTrampoline(); -} - -// Return address of resolution trampoline stub for defined compiler. -static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableResolutionTrampoline(class_linker); -#else - return GetQuickResolutionTrampoline(class_linker); -#endif -} - -static inline void* GetPortableAbstractMethodErrorStub() { - return reinterpret_cast(art_portable_abstract_method_error_stub); -} - -static inline void* GetQuickAbstractMethodErrorStub() { - return reinterpret_cast(art_quick_abstract_method_error_stub); -} - -// Return address of abstract method error stub for defined compiler. -static inline void* GetAbstractMethodErrorStub() { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableAbstractMethodErrorStub(); -#else - return GetQuickAbstractMethodErrorStub(); -#endif -} - -static inline void* GetJniDlsymLookupStub() { - return reinterpret_cast(art_jni_dlsym_lookup_stub); -} - -} // namespace art - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_H_ diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc deleted file mode 100644 index 93396d6a96..0000000000 --- a/runtime/runtime_support_llvm.cc +++ /dev/null @@ -1,930 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support_llvm.h" - -#include "ScopedLocalRef.h" -#include "asm_support.h" -#include "class_linker.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "dex_instruction.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "nth_caller_visitor.h" -#include "object_utils.h" -#include "reflection.h" -#include "runtime_support.h" -#include "scoped_thread_state_change.h" -#include "thread.h" -#include "thread_list.h" -#include "verifier/dex_gc_map.h" -#include "verifier/method_verifier.h" -#include "well_known_classes.h" - -#include -#include -#include -#include -#include - -namespace art { - -using ::art::mirror::AbstractMethod; - -class ShadowFrameCopyVisitor : public StackVisitor { - public: - explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), - top_frame_(NULL) {} - - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsShadowFrame()) { - ShadowFrame* cur_frame = GetCurrentShadowFrame(); - size_t num_regs = cur_frame->NumberOfVRegs(); - AbstractMethod* method = cur_frame->GetMethod(); - uint32_t dex_pc = cur_frame->GetDexPC(); - ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); - - const uint8_t* gc_map = method->GetNativeGcMap(); - uint32_t gc_map_length = static_cast((gc_map[0] << 24) | - (gc_map[1] << 16) | - (gc_map[2] << 8) | - (gc_map[3] << 0)); - verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); - const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); - for (size_t reg = 0; reg < num_regs; ++reg) { - if (TestBitmap(reg, reg_bitmap)) { - new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); - } else { - new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); - } - } - - if (prev_frame_ != NULL) { - prev_frame_->SetLink(new_frame); - } else { - top_frame_ = new_frame; - } - prev_frame_ = new_frame; - } - return true; - } - - ShadowFrame* GetShadowFrameCopy() { - return top_frame_; - } - - private: - static bool TestBitmap(int reg, const uint8_t* reg_vector) { - return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; - } - - ShadowFrame* prev_frame_; - ShadowFrame* top_frame_; -}; - -} // namespace art - -extern "C" { -using ::art::CatchHandlerIterator; -using ::art::DexFile; -using ::art::FindFieldFast; -using ::art::FindMethodFast; -using ::art::InstanceObjectRead; -using ::art::InstanceObjectWrite; -using ::art::InstancePrimitiveRead; -using ::art::InstancePrimitiveWrite; -using ::art::Instruction; -using ::art::InvokeType; -using ::art::JNIEnvExt; -using ::art::JValue; -using ::art::Locks; -using ::art::MethodHelper; -using ::art::PrettyClass; -using ::art::PrettyMethod; -using ::art::Primitive; -using ::art::ResolveStringFromCode; -using ::art::Runtime; -using ::art::ScopedJniEnvLocalRefState; -using ::art::ScopedObjectAccessUnchecked; -using ::art::ShadowFrame; -using ::art::ShadowFrameCopyVisitor; -using ::art::StaticObjectRead; -using ::art::StaticObjectWrite; -using ::art::StaticPrimitiveRead; -using ::art::StaticPrimitiveWrite; -using ::art::Thread; -using ::art::Thread; -using ::art::ThrowArithmeticExceptionDivideByZero; -using ::art::ThrowArrayIndexOutOfBoundsException; -using ::art::ThrowArrayStoreException; -using ::art::ThrowClassCastException; -using ::art::ThrowLocation; -using ::art::ThrowNoSuchMethodError; -using ::art::ThrowNullPointerException; -using ::art::ThrowNullPointerExceptionFromDexPC; -using ::art::ThrowStackOverflowError; -using ::art::kDirect; -using ::art::kInterface; -using ::art::kNative; -using ::art::kStatic; -using ::art::kSuper; -using ::art::kVirtual; -using ::art::mirror::AbstractMethod; -using ::art::mirror::Array; -using ::art::mirror::Class; -using ::art::mirror::Field; -using ::art::mirror::Object; -using ::art::mirror::Throwable; - -//---------------------------------------------------------------------------- -// Thread -//---------------------------------------------------------------------------- - -Thread* art_portable_get_current_thread_from_code() { -#if defined(__arm__) || defined(__i386__) - LOG(FATAL) << "UNREACHABLE"; -#endif - return Thread::Current(); -} - -void* art_portable_set_current_thread_from_code(void* thread_object_addr) { - // Hijacked to set r9 on ARM. - LOG(FATAL) << "UNREACHABLE"; - return NULL; -} - -void art_portable_lock_object_from_code(Object* obj, Thread* thread) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - obj->MonitorEnter(thread); // May block - DCHECK(thread->HoldsLock(obj)); - // Only possible exception is NPE and is handled before entry - DCHECK(!thread->IsExceptionPending()); -} - -void art_portable_unlock_object_from_code(Object* obj, Thread* thread) - UNLOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - // MonitorExit may throw exception - obj->MonitorExit(thread); -} - -void art_portable_test_suspend_from_code(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckSuspend(self); - if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { - // Save out the shadow frame to the heap - ShadowFrameCopyVisitor visitor(self); - visitor.WalkStack(true); - self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); - self->SetDeoptimizationReturnValue(JValue()); - self->SetException(ThrowLocation(), reinterpret_cast(-1)); - } -} - -ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, - ShadowFrame* new_shadow_frame, - AbstractMethod* method, - uint32_t num_vregs) { - ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); - new_shadow_frame->SetMethod(method); - new_shadow_frame->SetNumberOfVRegs(num_vregs); - return old_frame; -} - -void art_portable_pop_shadow_frame_from_code(void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -void art_portable_mark_gc_card_from_code(void *, void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -//---------------------------------------------------------------------------- -// Exception -//---------------------------------------------------------------------------- - -bool art_portable_is_exception_pending_from_code() { - LOG(FATAL) << "Implemented by IRBuilder."; - return false; -} - -void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArithmeticExceptionDivideByZero(); -} - -void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArrayIndexOutOfBoundsException(index, length); -} - -void art_portable_throw_no_such_method_from_code(int32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowNoSuchMethodError(method_idx); -} - -void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: remove dex_pc argument from caller. - UNUSED(dex_pc); - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionFromDexPC(throw_location); -} - -void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowStackOverflowError(Thread::Current()); -} - -void art_portable_throw_exception_from_code(Throwable* exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - if (exception == NULL) { - ThrowNullPointerException(NULL, "throw with null exception"); - } else { - self->SetException(throw_location, exception); - } -} - -void* art_portable_get_and_clear_exception(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(self->IsExceptionPending()); - // TODO: make this inline. - Throwable* exception = self->GetException(NULL); - self->ClearException(); - return exception; -} - -int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method, - uint32_t ti_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); // TODO: make an argument. - ThrowLocation throw_location; - Throwable* exception = self->GetException(&throw_location); - // Check for special deoptimization exception. - if (UNLIKELY(reinterpret_cast(exception) == -1)) { - return -1; - } - Class* exception_type = exception->GetClass(); - MethodHelper mh(current_method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - DCHECK_LT(ti_offset, code_item->tries_size_); - const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); - - int iter_index = 0; - int result = -1; - uint32_t catch_dex_pc = -1; - // Iterate over the catch handlers associated with dex_pc - for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { - uint16_t iter_type_idx = it.GetHandlerTypeIndex(); - // Catch all case - if (iter_type_idx == DexFile::kDexNoIndex16) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - // Does this catch exception type apply? - Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); - if (UNLIKELY(iter_exception_type == NULL)) { - // TODO: check, the verifier (class linker?) should take care of resolving all exception - // classes early. - LOG(WARNING) << "Unresolved exception class when finding catch block: " - << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); - } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - ++iter_index; - } - if (result != -1) { - // Handler found. - Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, - throw_location, - current_method, - catch_dex_pc, - exception); - // If the catch block has no move-exception then clear the exception for it. - const Instruction* first_catch_instr = - Instruction::At(&mh.GetCodeItem()->insns_[catch_dex_pc]); - if (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION) { - self->ClearException(); - } - } - return result; -} - - -//---------------------------------------------------------------------------- -// Object Space -//---------------------------------------------------------------------------- - -Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, false); -} - -Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, true); -} - -Object* art_portable_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, false); -} - -Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, true); -} - -Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); -} - -Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); -} - -static AbstractMethod* FindMethodHelper(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, - bool access_check, - InvokeType type, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = FindMethodFast(method_idx, - this_object, - caller_method, - access_check, - type); - if (UNLIKELY(method == NULL)) { - method = FindMethodFromCode(method_idx, this_object, caller_method, - thread, access_check, type); - if (UNLIKELY(method == NULL)) { - CHECK(thread->IsExceptionPending()); - return 0; // failure - } - } - DCHECK(!thread->IsExceptionPending()); - const void* code = method->GetEntryPointFromCompiledCode(); - - // When we return, the caller will branch to this address, so it had better not be 0! - if (UNLIKELY(code == NULL)) { - MethodHelper mh(method); - LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) - << " location: " << mh.GetDexFile().GetLocation(); - } - return method; -} - -Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); -} - -Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); -} - -Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); -} - -Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); -} - -Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); -} - -Object* art_portable_find_interface_method_from_code(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); -} - -Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); -} - -Object* art_portable_initialize_type_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); -} - -Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when caller isn't guaranteed to have access to a type and the dex cache may be - // unpopulated - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); -} - -Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveStringFromCode(referrer, string_idx); -} - -int32_t art_portable_set32_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, - referrer, - StaticPrimitiveWrite, - sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint32_t), - true); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint64_t), - true); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - return 0; -} - -int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - return 0; -} - -Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - return 0; -} - -int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, uint32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - return 0; -} - -int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - return 0; -} - -Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - return 0; -} - -void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc, - Array* array, uint32_t payload_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Test: Is array equal to null? (Guard NullPointerException) - if (UNLIKELY(array == NULL)) { - art_portable_throw_null_pointer_exception_from_code(dex_pc); - return; - } - - // Find the payload from the CodeItem - MethodHelper mh(method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - - DCHECK_GT(code_item->insns_size_in_code_units_, payload_offset); - - const Instruction::ArrayDataPayload* payload = - reinterpret_cast( - code_item->insns_ + payload_offset); - - DCHECK_EQ(payload->ident, - static_cast(Instruction::kArrayDataSignature)); - - // Test: Is array big enough? - uint32_t array_len = static_cast(array->GetLength()); - if (UNLIKELY(array_len < payload->element_count)) { - int32_t last_index = payload->element_count - 1; - art_portable_throw_array_bounds_from_code(array_len, last_index); - return; - } - - // Copy the data - size_t size = payload->element_width * payload->element_count; - memcpy(array->GetRawData(payload->element_width), payload->data, size); -} - - - -//---------------------------------------------------------------------------- -// Type checking, in the nature of casting -//---------------------------------------------------------------------------- - -int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type != NULL); - DCHECK(src_type != NULL); - return dest_type->IsAssignableFrom(src_type) ? 1 : 0; -} - -void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); - DCHECK(src_type->IsClass()) << PrettyClass(src_type); - if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { - ThrowClassCastException(dest_type, src_type); - } -} - -void art_portable_check_put_array_element_from_code(const Object* element, - const Object* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (element == NULL) { - return; - } - DCHECK(array != NULL); - Class* array_class = array->GetClass(); - DCHECK(array_class != NULL); - Class* component_type = array_class->GetComponentType(); - Class* element_class = element->GetClass(); - if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { - ThrowArrayStoreException(element_class, array_class); - } - return; -} - -//---------------------------------------------------------------------------- -// JNI -//---------------------------------------------------------------------------- - -// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. -uint32_t art_portable_jni_method_start(Thread* self) - UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { - JNIEnvExt* env = self->GetJniEnv(); - uint32_t saved_local_ref_cookie = env->local_ref_cookie; - env->local_ref_cookie = env->locals.GetSegmentState(); - self->TransitionFromRunnableToSuspended(kNative); - return saved_local_ref_cookie; -} - -uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) { - self->DecodeJObject(to_lock)->MonitorEnter(self); - return art_portable_jni_method_start(self); -} - -static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { - JNIEnvExt* env = self->GetJniEnv(); - env->locals.SetSegmentState(env->local_ref_cookie); - env->local_ref_cookie = saved_local_ref_cookie; -} - -void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - PopLocalReferences(saved_local_ref_cookie, self); -} - - -void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); -} - -Object* art_portable_jni_method_end_with_reference(jobject result, - uint32_t saved_local_ref_cookie, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - Object* o = self->DecodeJObject(result); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - Object* o = self->DecodeJObject(result); - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation -// handler which is a field within the proxy object receiver. The var args encode the arguments -// with the last argument being a pointer to a JValue to store the result in. -void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - va_list ap; - va_start(ap, proxy_method); - - Object* receiver = va_arg(ap, Object*); - Thread* self = va_arg(ap, Thread*); - MethodHelper proxy_mh(proxy_method); - - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - - // Create local ref. copies of the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Convert proxy method into expected interface method. - AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // Record arguments and turn Object* arguments into jobject to survive GC. - std::vector args; - const size_t num_params = proxy_mh.NumArgs(); - for (size_t i = 1; i < num_params; ++i) { - jvalue val; - switch (proxy_mh.GetParamPrimitiveType(i)) { - case Primitive::kPrimNot: - val.l = soa.AddLocalReference(va_arg(ap, Object*)); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimFloat: - // TODO: should this be jdouble? Floats aren't passed to var arg routines. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimDouble: - val.d = (va_arg(ap, jdouble)); - break; - case Primitive::kPrimLong: - val.j = (va_arg(ap, jlong)); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args.push_back(val); - } - self->EndAssertNoThreadSuspension(old_cause); - JValue* result_location = NULL; - const char* shorty = proxy_mh.GetShorty(); - if (shorty[0] != 'V') { - result_location = va_arg(ap, JValue*); - } - va_end(ap); - JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); - if (result_location != NULL) { - *result_location = result; - } -} - -//---------------------------------------------------------------------------- -// Memory barrier -//---------------------------------------------------------------------------- - -void art_portable_constructor_barrier() { - LOG(FATAL) << "Implemented by IRBuilder."; -} -} // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h deleted file mode 100644 index 43ea953a96..0000000000 --- a/runtime/runtime_support_llvm.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ - -extern "C" { -//---------------------------------------------------------------------------- -// Runtime Support Function Lookup Callback -//---------------------------------------------------------------------------- -void* art_portable_find_runtime_support_func(void* context, const char* name); -} // extern "C" - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/runtime_support_test.cc b/runtime/runtime_support_test.cc deleted file mode 100644 index b827813146..0000000000 --- a/runtime/runtime_support_test.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "common_test.h" -#include - -namespace art { - -class RuntimeSupportTest : public CommonTest {}; - -TEST_F(RuntimeSupportTest, DoubleToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); - EXPECT_EQ(0LL, art_d2l(0)); - EXPECT_EQ(1LL, art_d2l(1.0)); - EXPECT_EQ(10LL, art_d2l(10.0)); - EXPECT_EQ(100LL, art_d2l(100.0)); - EXPECT_EQ(-1LL, art_d2l(-1.0)); - EXPECT_EQ(-10LL, art_d2l(-10.0)); - EXPECT_EQ(-100LL, art_d2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); - EXPECT_EQ(0LL, art_f2l(0)); - EXPECT_EQ(1LL, art_f2l(1.0)); - EXPECT_EQ(10LL, art_f2l(10.0)); - EXPECT_EQ(100LL, art_f2l(100.0)); - EXPECT_EQ(-1LL, art_f2l(-1.0)); - EXPECT_EQ(-10LL, art_f2l(-10.0)); - EXPECT_EQ(-100LL, art_f2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, DoubleToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); - EXPECT_EQ(0L, art_d2i(0)); - EXPECT_EQ(1L, art_d2i(1.0)); - EXPECT_EQ(10L, art_d2i(10.0)); - EXPECT_EQ(100L, art_d2i(100.0)); - EXPECT_EQ(-1L, art_d2i(-1.0)); - EXPECT_EQ(-10L, art_d2i(-10.0)); - EXPECT_EQ(-100L, art_d2i(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); - EXPECT_EQ(0L, art_f2i(0)); - EXPECT_EQ(1L, art_f2i(1.0)); - EXPECT_EQ(10L, art_f2i(10.0)); - EXPECT_EQ(100L, art_f2i(100.0)); - EXPECT_EQ(-1L, art_f2i(-1.0)); - EXPECT_EQ(-10L, art_f2i(-10.0)); - EXPECT_EQ(-100L, art_f2i(-100.0)); -} - -} // namespace art diff --git a/runtime/thread.cc b/runtime/thread.cc index d5fdd20400..97a1410892 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -38,6 +38,7 @@ #include "cutils/atomic-inline.h" #include "debugger.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc_map.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -54,7 +55,6 @@ #include "object_utils.h" #include "reflection.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" @@ -86,16 +86,23 @@ static void UnimplementedEntryPoint() { } #endif +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints); + void Thread::InitFunctionPointers() { #if !defined(__APPLE__) // The Mac GCC is too old to accept this code. // Insert a placeholder so we can easily tell if we call an unimplemented entry point. - uintptr_t* begin = reinterpret_cast(&entrypoints_); - uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(entrypoints_)); + uintptr_t* begin = reinterpret_cast(&quick_entrypoints_); + uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(quick_entrypoints_)); + for (uintptr_t* it = begin; it != end; ++it) { + *it = reinterpret_cast(UnimplementedEntryPoint); + } + begin = reinterpret_cast(&portable_entrypoints_); + end = reinterpret_cast(reinterpret_cast(begin) + sizeof(portable_entrypoints_)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } #endif - InitEntryPoints(&entrypoints_); + InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_); } void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { @@ -1582,86 +1589,87 @@ struct EntryPointInfo { uint32_t offset; const char* name; }; -#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x } +#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x } +#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x } static const EntryPointInfo gThreadEntryPointInfo[] = { - ENTRY_POINT_INFO(pAllocArrayFromCode), - ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pAllocObjectFromCode), - ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), - ENTRY_POINT_INFO(pCanPutArrayElementFromCode), - ENTRY_POINT_INFO(pCheckCastFromCode), - ENTRY_POINT_INFO(pInitializeStaticStorage), - ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), - ENTRY_POINT_INFO(pInitializeTypeFromCode), - ENTRY_POINT_INFO(pResolveStringFromCode), - ENTRY_POINT_INFO(pSet32Instance), - ENTRY_POINT_INFO(pSet32Static), - ENTRY_POINT_INFO(pSet64Instance), - ENTRY_POINT_INFO(pSet64Static), - ENTRY_POINT_INFO(pSetObjInstance), - ENTRY_POINT_INFO(pSetObjStatic), - ENTRY_POINT_INFO(pGet32Instance), - ENTRY_POINT_INFO(pGet32Static), - ENTRY_POINT_INFO(pGet64Instance), - ENTRY_POINT_INFO(pGet64Static), - ENTRY_POINT_INFO(pGetObjInstance), - ENTRY_POINT_INFO(pGetObjStatic), - ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), - ENTRY_POINT_INFO(pJniMethodStart), - ENTRY_POINT_INFO(pJniMethodStartSynchronized), - ENTRY_POINT_INFO(pJniMethodEnd), - ENTRY_POINT_INFO(pJniMethodEndSynchronized), - ENTRY_POINT_INFO(pJniMethodEndWithReference), - ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), - ENTRY_POINT_INFO(pLockObjectFromCode), - ENTRY_POINT_INFO(pUnlockObjectFromCode), - ENTRY_POINT_INFO(pCmpgDouble), - ENTRY_POINT_INFO(pCmpgFloat), - ENTRY_POINT_INFO(pCmplDouble), - ENTRY_POINT_INFO(pCmplFloat), - ENTRY_POINT_INFO(pFmod), - ENTRY_POINT_INFO(pSqrt), - ENTRY_POINT_INFO(pL2d), - ENTRY_POINT_INFO(pFmodf), - ENTRY_POINT_INFO(pL2f), - ENTRY_POINT_INFO(pD2iz), - ENTRY_POINT_INFO(pF2iz), - ENTRY_POINT_INFO(pIdivmod), - ENTRY_POINT_INFO(pD2l), - ENTRY_POINT_INFO(pF2l), - ENTRY_POINT_INFO(pLdiv), - ENTRY_POINT_INFO(pLdivmod), - ENTRY_POINT_INFO(pLmul), - ENTRY_POINT_INFO(pShlLong), - ENTRY_POINT_INFO(pShrLong), - ENTRY_POINT_INFO(pUshrLong), - ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), - ENTRY_POINT_INFO(pInterpreterToQuickEntry), - ENTRY_POINT_INFO(pIndexOf), - ENTRY_POINT_INFO(pMemcmp16), - ENTRY_POINT_INFO(pStringCompareTo), - ENTRY_POINT_INFO(pMemcpy), - ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), - ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pCheckSuspendFromCode), - ENTRY_POINT_INFO(pTestSuspendFromCode), - ENTRY_POINT_INFO(pDeliverException), - ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), - ENTRY_POINT_INFO(pThrowDivZeroFromCode), - ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), - ENTRY_POINT_INFO(pThrowNullPointerFromCode), - ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), + QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode), + QUICK_ENTRY_POINT_INFO(pCheckCastFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage), + QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode), + QUICK_ENTRY_POINT_INFO(pResolveStringFromCode), + QUICK_ENTRY_POINT_INFO(pSet32Instance), + QUICK_ENTRY_POINT_INFO(pSet32Static), + QUICK_ENTRY_POINT_INFO(pSet64Instance), + QUICK_ENTRY_POINT_INFO(pSet64Static), + QUICK_ENTRY_POINT_INFO(pSetObjInstance), + QUICK_ENTRY_POINT_INFO(pSetObjStatic), + QUICK_ENTRY_POINT_INFO(pGet32Instance), + QUICK_ENTRY_POINT_INFO(pGet32Static), + QUICK_ENTRY_POINT_INFO(pGet64Instance), + QUICK_ENTRY_POINT_INFO(pGet64Static), + QUICK_ENTRY_POINT_INFO(pGetObjInstance), + QUICK_ENTRY_POINT_INFO(pGetObjStatic), + QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), + QUICK_ENTRY_POINT_INFO(pJniMethodStart), + QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEnd), + QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), + QUICK_ENTRY_POINT_INFO(pLockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pCmpgDouble), + QUICK_ENTRY_POINT_INFO(pCmpgFloat), + QUICK_ENTRY_POINT_INFO(pCmplDouble), + QUICK_ENTRY_POINT_INFO(pCmplFloat), + QUICK_ENTRY_POINT_INFO(pFmod), + QUICK_ENTRY_POINT_INFO(pSqrt), + QUICK_ENTRY_POINT_INFO(pL2d), + QUICK_ENTRY_POINT_INFO(pFmodf), + QUICK_ENTRY_POINT_INFO(pL2f), + QUICK_ENTRY_POINT_INFO(pD2iz), + QUICK_ENTRY_POINT_INFO(pF2iz), + QUICK_ENTRY_POINT_INFO(pIdivmod), + QUICK_ENTRY_POINT_INFO(pD2l), + QUICK_ENTRY_POINT_INFO(pF2l), + QUICK_ENTRY_POINT_INFO(pLdiv), + QUICK_ENTRY_POINT_INFO(pLdivmod), + QUICK_ENTRY_POINT_INFO(pLmul), + QUICK_ENTRY_POINT_INFO(pShlLong), + QUICK_ENTRY_POINT_INFO(pShrLong), + QUICK_ENTRY_POINT_INFO(pUshrLong), + QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), + QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry), + QUICK_ENTRY_POINT_INFO(pIndexOf), + QUICK_ENTRY_POINT_INFO(pMemcmp16), + QUICK_ENTRY_POINT_INFO(pStringCompareTo), + QUICK_ENTRY_POINT_INFO(pMemcpy), + QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), + QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pDeliverException), + QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), + QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode), + QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), }; -#undef ENTRY_POINT_INFO +#undef QUICK_ENTRY_POINT_INFO void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. @@ -1686,8 +1694,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ #undef DO_THREAD_OFFSET size_t entry_point_count = arraysize(gThreadEntryPointInfo); - CHECK_EQ(entry_point_count * size_of_pointers, sizeof(QuickEntryPoints)); - uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_); + CHECK_EQ(entry_point_count * size_of_pointers, + sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints)); + uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_); for (size_t i = 0; i < entry_point_count; ++i) { CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; expected_offset += size_of_pointers; diff --git a/runtime/thread.h b/runtime/thread.h index d02ab361a9..ff0fe228c0 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -26,9 +26,10 @@ #include #include "base/macros.h" +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "globals.h" #include "jvalue.h" -#include "entrypoints/quick/quick_entrypoints.h" #include "locks.h" #include "offsets.h" #include "root_visitor.h" @@ -773,9 +774,10 @@ class PACKED(4) Thread { Closure* checkpoint_function_; public: - // Runtime support function pointers + // Entrypoint function pointers // TODO: move this near the top, since changing its offset requires all oats to be recompiled! - QuickEntryPoints entrypoints_; + QuickEntryPoints quick_entrypoints_; + PortableEntryPoints portable_entrypoints_; private: // How many times has our pthread key's destructor been called? diff --git a/runtime/thread_arm.cc b/runtime/thread_arm.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_arm.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_mips.cc b/runtime/thread_mips.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_mips.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_x86.cc b/runtime/thread_x86.cc deleted file mode 100644 index c398b2877a..0000000000 --- a/runtime/thread_x86.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include -#include - -#include "asm_support.h" -#include "base/macros.h" -#include "thread.h" -#include "thread_list.h" - -#if defined(__APPLE__) -#include -#include -struct descriptor_table_entry_t { - uint16_t limit0; - uint16_t base0; - unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; - unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; -} __attribute__((packed)); -#define MODIFY_LDT_CONTENTS_DATA 0 -#else -#include -#endif - -namespace art { - -void Thread::InitCpu() { - static Mutex modify_ldt_lock("modify_ldt lock"); - MutexLock mu(Thread::Current(), modify_ldt_lock); - - const uintptr_t base = reinterpret_cast(this); - const size_t limit = kPageSize; - - const int contents = MODIFY_LDT_CONTENTS_DATA; - const int seg_32bit = 1; - const int read_exec_only = 0; - const int limit_in_pages = 0; - const int seg_not_present = 0; - const int useable = 1; - - int entry_number = -1; - -#if defined(__APPLE__) - descriptor_table_entry_t entry; - memset(&entry, 0, sizeof(entry)); - entry.limit0 = (limit & 0x0ffff); - entry.limit = (limit & 0xf0000) >> 16; - entry.base0 = (base & 0x0000ffff); - entry.base1 = (base & 0x00ff0000) >> 16; - entry.base2 = (base & 0xff000000) >> 24; - entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); - entry.s = 1; - entry.dpl = 0x3; - entry.p = seg_not_present ^ 1; - entry.avl = useable; - entry.l = 0; - entry.d = seg_32bit; - entry.g = limit_in_pages; - - entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); - if (entry_number == -1) { - PLOG(FATAL) << "i386_set_ldt failed"; - } -#else - // Read current LDT entries. - CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); - std::vector ldt(LDT_ENTRIES); - size_t ldt_size(sizeof(uint64_t) * ldt.size()); - memset(&ldt[0], 0, ldt_size); - // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? - syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); - - // Find the first empty slot. - for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { - } - if (entry_number >= LDT_ENTRIES) { - LOG(FATAL) << "Failed to find a free LDT slot"; - } - - // Update LDT entry. - user_desc ldt_entry; - memset(&ldt_entry, 0, sizeof(ldt_entry)); - ldt_entry.entry_number = entry_number; - ldt_entry.base_addr = base; - ldt_entry.limit = limit; - ldt_entry.seg_32bit = seg_32bit; - ldt_entry.contents = contents; - ldt_entry.read_exec_only = read_exec_only; - ldt_entry.limit_in_pages = limit_in_pages; - ldt_entry.seg_not_present = seg_not_present; - ldt_entry.useable = useable; - CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); - entry_number = ldt_entry.entry_number; -#endif - - // Change %fs to be new LDT entry. - uint16_t table_indicator = 1 << 2; // LDT - uint16_t rpl = 3; // Requested privilege level - uint16_t selector = (entry_number << 3) | table_indicator | rpl; - // TODO: use our assembler to generate code - __asm__ __volatile__("movw %w0, %%fs" - : // output - : "q"(selector) // input - :); // clobber - - // Allow easy indirection back to Thread*. - self_ = this; - - // Sanity check that reads from %fs point to this Thread*. - Thread* self_check; - // TODO: use our assembler to generate code - CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); - __asm__ __volatile__("movl %%fs:(%1), %0" - : "=r"(self_check) // output - : "r"(THREAD_SELF_OFFSET) // input - :); // clobber - CHECK_EQ(self_check, this); - - // Sanity check other offsets. - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art -- cgit v1.2.3-59-g8ed1b From 468532ea115657709bc32ee498e701a4c71762d4 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 5 Aug 2013 10:56:33 -0700 Subject: Entry point clean up. Create set of entry points needed for image methods to avoid fix-up at load time: - interpreter - bridge to interpreter, bridge to compiled code - jni - dlsym lookup - quick - resolution and bridge to interpreter - portable - resolution and bridge to interpreter Fix JNI work around to use JNI work around argument rewriting code that'd been accidentally disabled. Remove abstact method error stub, use interpreter bridge instead. Consolidate trampoline (previously stub) generation in generic helper. Simplify trampolines to jump directly into assembly code, keeps stack crawlable. Dex: replace use of int with ThreadOffset for values that are thread offsets. Tidy entry point routines between interpreter, jni, quick and portable. Change-Id: I52a7c2bbb1b7e0ff8a3c3100b774212309d0828e (cherry picked from commit 848871b4d8481229c32e0d048a9856e5a9a17ef9) --- compiler/Android.mk | 3 +- compiler/dex/quick/arm/call_arm.cc | 6 +- compiler/dex/quick/arm/codegen_arm.h | 6 +- compiler/dex/quick/arm/int_arm.cc | 6 +- compiler/dex/quick/arm/target_arm.cc | 4 +- compiler/dex/quick/arm/utility_arm.cc | 2 +- compiler/dex/quick/gen_common.cc | 96 ++-- compiler/dex/quick/gen_invoke.cc | 67 +-- compiler/dex/quick/mips/call_mips.cc | 6 +- compiler/dex/quick/mips/codegen_mips.h | 6 +- compiler/dex/quick/mips/fp_mips.cc | 2 +- compiler/dex/quick/mips/int_mips.cc | 4 +- compiler/dex/quick/mips/target_mips.cc | 4 +- compiler/dex/quick/mips/utility_mips.cc | 2 +- compiler/dex/quick/mir_to_lir.h | 44 +- compiler/dex/quick/x86/call_x86.cc | 8 +- compiler/dex/quick/x86/codegen_x86.h | 8 +- compiler/dex/quick/x86/int_x86.cc | 12 +- compiler/dex/quick/x86/target_x86.cc | 2 +- compiler/dex/quick/x86/utility_x86.cc | 4 +- compiler/driver/compiler_driver.cc | 78 +-- compiler/driver/compiler_driver.h | 23 +- compiler/image_writer.cc | 107 ++-- compiler/image_writer.h | 11 +- compiler/jni/quick/jni_compiler.cc | 6 +- compiler/oat_writer.cc | 128 +++-- compiler/oat_writer.h | 16 +- compiler/stubs/portable/stubs.cc | 138 ----- compiler/stubs/quick/stubs.cc | 263 ---------- compiler/stubs/stubs.h | 59 --- compiler/utils/arm/assembler_arm.cc | 10 +- compiler/utils/mips/assembler_mips.cc | 33 +- runtime/Android.mk | 7 +- runtime/arch/arm/asm_support_arm.S | 7 + runtime/arch/arm/entrypoints_init_arm.cc | 193 +++---- runtime/arch/arm/jni_entrypoints_arm.S | 7 +- runtime/arch/arm/portable_entrypoints_arm.S | 3 + runtime/arch/arm/quick_entrypoints_arm.S | 183 +++---- runtime/arch/mips/asm_support_mips.S | 8 + runtime/arch/mips/entrypoints_init_mips.cc | 192 +++---- runtime/arch/mips/jni_entrypoints_mips.S | 4 +- runtime/arch/mips/portable_entrypoints_mips.S | 12 +- runtime/arch/mips/quick_entrypoints_mips.S | 186 +++---- runtime/arch/x86/asm_support_x86.S | 12 + runtime/arch/x86/entrypoints_init_x86.cc | 246 ++++----- runtime/arch/x86/portable_entrypoints_x86.S | 19 +- runtime/arch/x86/quick_entrypoints_x86.S | 209 ++++---- runtime/class_linker.cc | 99 ++-- runtime/class_linker.h | 20 +- runtime/common_test.h | 11 +- runtime/entrypoints/entrypoint_utils.h | 71 +-- .../interpreter/interpreter_entrypoints.cc | 43 ++ .../interpreter/interpreter_entrypoints.h | 47 ++ runtime/entrypoints/jni/jni_entrypoints.cc | 87 +++- runtime/entrypoints/jni/jni_entrypoints.h | 37 ++ .../entrypoints/portable/portable_entrypoints.h | 8 +- runtime/entrypoints/quick/quick_argument_visitor.h | 138 ----- runtime/entrypoints/quick/quick_entrypoints.h | 74 ++- .../quick/quick_instrumentation_entrypoints.cc | 2 +- .../quick/quick_interpreter_entrypoints.cc | 128 ----- runtime/entrypoints/quick/quick_jni_entrypoints.cc | 74 --- .../entrypoints/quick/quick_proxy_entrypoints.cc | 126 ----- .../entrypoints/quick/quick_stub_entrypoints.cc | 295 ----------- .../quick/quick_trampoline_entrypoints.cc | 558 +++++++++++++++++++++ runtime/instrumentation.cc | 26 +- runtime/interpreter/interpreter.cc | 16 +- runtime/interpreter/interpreter.h | 6 +- runtime/jni_internal.h | 4 + runtime/mirror/abstract_method-inl.h | 4 +- runtime/mirror/abstract_method.cc | 7 +- runtime/native/dalvik_system_VMRuntime.cc | 7 - runtime/oat.cc | 112 ++++- runtime/oat.h | 33 +- runtime/oat_test.cc | 2 +- runtime/object_utils.h | 4 + runtime/stack.cc | 2 +- runtime/thread.cc | 82 +-- runtime/thread.h | 30 +- 78 files changed, 2129 insertions(+), 2476 deletions(-) delete mode 100644 compiler/stubs/portable/stubs.cc delete mode 100644 compiler/stubs/quick/stubs.cc delete mode 100644 compiler/stubs/stubs.h create mode 100644 runtime/entrypoints/interpreter/interpreter_entrypoints.cc create mode 100644 runtime/entrypoints/interpreter/interpreter_entrypoints.h create mode 100644 runtime/entrypoints/jni/jni_entrypoints.h delete mode 100644 runtime/entrypoints/quick/quick_argument_visitor.h delete mode 100644 runtime/entrypoints/quick/quick_interpreter_entrypoints.cc delete mode 100644 runtime/entrypoints/quick/quick_proxy_entrypoints.cc delete mode 100644 runtime/entrypoints/quick/quick_stub_entrypoints.cc create mode 100644 runtime/entrypoints/quick/quick_trampoline_entrypoints.cc (limited to 'compiler/utils/mips/assembler_mips.cc') diff --git a/compiler/Android.mk b/compiler/Android.mk index fec1e11c47..f81b460ee4 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -75,8 +75,7 @@ LIBART_COMPILER_SRC_FILES := \ llvm/runtime_support_builder_arm.cc \ llvm/runtime_support_builder_thumb2.cc \ llvm/runtime_support_builder_x86.cc \ - stubs/portable/stubs.cc \ - stubs/quick/stubs.cc \ + trampolines/trampoline_compiler.cc \ utils/arm/assembler_arm.cc \ utils/arm/managed_register_arm.cc \ utils/assembler.cc \ diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 745e43dc38..2d8e24f58e 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -432,7 +432,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, r0); - LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(), rARM_LR); // Materialize a pointer to the fill data image NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast(tab_rec)); @@ -488,7 +488,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { OpRegImm(kOpCmp, r1, 0); OpIT(kCondNe, "T"); // Go expensive route - artLockObjectFromCode(self, obj); - LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); @@ -519,7 +519,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpIT(kCondEq, "EE"); StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3); // Go expensive route - UnlockObjectFromCode(obj); - LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h index 1599941ef6..f1ccfa015e 100644 --- a/compiler/dex/quick/arm/codegen_arm.h +++ b/compiler/dex/quick/arm/codegen_arm.h @@ -28,7 +28,7 @@ class ArmMir2Lir : public Mir2Lir { // Required for target - codegen helpers. bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit); - int LoadHelper(int offset); + int LoadHelper(ThreadOffset offset); LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg); LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg); @@ -153,12 +153,12 @@ class ArmMir2Lir : public Mir2Lir { LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value); LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2); LIR* OpTestSuspend(LIR* target); - LIR* OpThreadMem(OpKind op, int thread_offset); + LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset); LIR* OpVldm(int rBase, int count); LIR* OpVstm(int rBase, int count); void OpLea(int rBase, int reg1, int reg2, int scale, int offset); void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi); - void OpTlsCmp(int offset, int val); + void OpTlsCmp(ThreadOffset offset, int val); RegLocation ArgLoc(RegLocation loc); LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size, diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 9db1016efa..c258019daa 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -498,7 +498,7 @@ void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void ArmMir2Lir::OpTlsCmp(int offset, int val) { +void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -665,7 +665,7 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, */ RegLocation rl_result; if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) { - int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); + ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); FlushAllRegs(); CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); rl_result = GetReturnWide(false); @@ -956,7 +956,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc index 6f3779879c..47d3d974ef 100644 --- a/compiler/dex/quick/arm/target_arm.cc +++ b/compiler/dex/quick/arm/target_arm.cc @@ -714,8 +714,8 @@ void ArmMir2Lir::FreeCallTemps() { FreeTemp(r3); } -int ArmMir2Lir::LoadHelper(int offset) { - LoadWordDisp(rARM_SELF, offset, rARM_LR); +int ArmMir2Lir::LoadHelper(ThreadOffset offset) { + LoadWordDisp(rARM_SELF, offset.Int32Value(), rARM_LR); return rARM_LR; } diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index afc8a66d8a..c63de69284 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -1029,7 +1029,7 @@ LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) { return res; } -LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) { +LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for Arm"; return NULL; } diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index ebe10bb57e..298d3898c8 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -208,12 +208,12 @@ void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { FlushAllRegs(); /* Everything to home location */ - int func_offset; + ThreadOffset func_offset(-1); if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); } else { - func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); + func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck); } CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); RegLocation rl_result = GetReturn(false); @@ -230,12 +230,12 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { int elems = info->num_arg_words; int type_idx = info->index; FlushAllRegs(); /* Everything to home location */ - int func_offset; + ThreadOffset func_offset(-1); if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray); } else { - func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck); } CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); FreeTemp(TargetReg(kArg2)); @@ -408,9 +408,10 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do FreeTemp(rBase); } else { FlushAllRegs(); // Everything to home locations - int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) : - (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) - : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); + ThreadOffset setter_offset = + is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) + : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true); } } @@ -483,9 +484,10 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } else { FlushAllRegs(); // Everything to home locations - int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) : - (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) - : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); + ThreadOffset getterOffset = + is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) + :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); CallRuntimeHelperImm(getterOffset, field_idx, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -499,7 +501,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); - int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode); + ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend); for (int i = 0; i < num_elems; i++) { ResetRegPool(); ResetDefTracking(); @@ -539,13 +541,13 @@ void Mir2Lir::HandleThrowLaunchPads() { LIR* lab = throw_launchpads_.Get(i); current_dalvik_offset_ = lab->operands[1]; AppendLIR(lab); - int func_offset = 0; + ThreadOffset func_offset(-1); int v1 = lab->operands[2]; int v2 = lab->operands[3]; bool target_x86 = (cu_->instruction_set == kX86); switch (lab->operands[0]) { case kThrowNullPointer: - func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer); break; case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. @@ -557,7 +559,7 @@ void Mir2Lir::HandleThrowLaunchPads() { // Make sure the following LoadConstant doesn't mess with kArg1. LockTemp(TargetReg(kArg1)); LoadConstant(TargetReg(kArg0), v2); - func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); break; case kThrowArrayBounds: // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 @@ -590,18 +592,18 @@ void Mir2Lir::HandleThrowLaunchPads() { OpRegCopy(TargetReg(kArg0), v1); } } - func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds); break; case kThrowDivZero: - func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero); break; case kThrowNoSuchMethod: OpRegCopy(TargetReg(kArg0), v1); func_offset = - QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); + QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod); break; case kThrowStackOverflow: - func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow); // Restore stack alignment if (target_x86) { OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); @@ -664,9 +666,10 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, StoreValue(rl_dest, rl_result); } } else { - int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) : - (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) - : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); + ThreadOffset getterOffset = + is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) + : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -719,9 +722,10 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } } else { - int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) : - (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) - : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); + ThreadOffset setter_offset = + is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) + : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true); } } @@ -735,7 +739,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -764,7 +768,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // TUNING: move slow path to end & remove unconditional branch LIR* target1 = NewLIR0(kPseudoTargetLabel); // Call out to helper, which will return resolved type in kArg0 - CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -797,7 +801,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { LoadWordDisp(TargetReg(kArg2), mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 - int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode)); + int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString)); LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); LoadConstant(TargetReg(kArg1), string_idx); if (cu_->instruction_set == kThumb2) { @@ -821,7 +825,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { branch->target = target; } else { DCHECK_EQ(cu_->instruction_set, kX86); - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2), TargetReg(kArg1), true); } GenBarrier(); @@ -845,12 +849,12 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { FlushAllRegs(); /* Everything to home location */ // alloc will always check for resolution, do we also need to verify // access because the verifier was unable to? - int func_offset; + ThreadOffset func_offset(-1); if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks( cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject); } else { - func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck); } CallRuntimeHelperImmMethod(func_offset, type_idx, true); RegLocation rl_result = GetReturn(false); @@ -929,7 +933,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 - CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), type_idx, true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref @@ -951,7 +955,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); // Not resolved // Call out to helper, which will return resolved type in kRet0 - CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true); OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ // Rejoin code paths @@ -986,7 +990,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } } else { if (cu_->instruction_set == kThumb2) { - int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); if (!type_known_abstract) { /* Uses conditional nullification */ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? @@ -1003,13 +1007,13 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } if (cu_->instruction_set != kX86) { - int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } else { OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); - OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial)); } } } @@ -1069,7 +1073,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 // InitializeTypeAndVerifyAccess(idx, method) - CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess), type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path } else if (use_declaring_class) { @@ -1089,7 +1093,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Not resolved // Call out to helper, which will return resolved type in kArg0 // InitializeTypeFromCode(idx, method) - CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path // Rejoin code paths @@ -1109,7 +1113,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (!type_known_abstract) { branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL); } - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg1), TargetReg(kArg2), true); /* branch target here */ LIR* target = NewLIR0(kPseudoTargetLabel); @@ -1168,7 +1172,7 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_shift) { - int func_offset = -1; // Make gcc happy + ThreadOffset func_offset(-1); switch (opcode) { case Instruction::SHL_LONG: @@ -1303,7 +1307,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); } else { - int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); + ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); FlushAllRegs(); /* Send everything to home location */ LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); int r_tgt = CallHelperSetup(func_offset); @@ -1558,7 +1562,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, TargetReg(kArg0)); Clobber(TargetReg(kArg0)); - int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); + ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); if (is_div) rl_result = GetReturn(false); @@ -1589,7 +1593,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, OpKind second_op = kOpBkpt; bool call_out = false; bool check_zero = false; - int func_offset; + ThreadOffset func_offset(-1); int ret_reg = TargetReg(kRet0); switch (opcode) { @@ -1709,7 +1713,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } } -void Mir2Lir::GenConversionCall(int func_offset, +void Mir2Lir::GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest, RegLocation rl_src) { /* * Don't optimize the register usage since it calls out to support diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 1b34e99a72..20d683a947 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -37,12 +37,12 @@ namespace art { * has a memory call operation, part 1 is a NOP for x86. For other targets, * load arguments between the two parts. */ -int Mir2Lir::CallHelperSetup(int helper_offset) { +int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) { return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset); } /* NOTE: if r_tgt is a temp, it will be freed following use */ -LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) { +LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) { LIR* call_inst; if (cu_->instruction_set == kX86) { call_inst = OpThreadMem(kOpBlx, helper_offset); @@ -56,21 +56,22 @@ LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) { return call_inst; } -void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadConstant(TargetReg(kArg0), arg0); ClobberCalleeSave(); CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); OpRegCopy(TargetReg(kArg0), arg0); ClobberCalleeSave(); CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0, + bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); if (arg0.wide == 0) { LoadValueDirectFixed(arg0, TargetReg(kArg0)); @@ -81,7 +82,7 @@ void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1, +void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadConstant(TargetReg(kArg0), arg0); @@ -90,7 +91,7 @@ void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0, +void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); if (arg1.wide == 0) { @@ -103,7 +104,7 @@ void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1, +void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg0, TargetReg(kArg0)); @@ -112,7 +113,7 @@ void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1, +void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); OpRegCopy(TargetReg(kArg1), arg1); @@ -121,8 +122,8 @@ void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1, - bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1, + bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); OpRegCopy(TargetReg(kArg0), arg0); LoadConstant(TargetReg(kArg1), arg1); @@ -130,7 +131,7 @@ void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadCurrMethodDirect(TargetReg(kArg1)); LoadConstant(TargetReg(kArg0), arg0); @@ -138,7 +139,7 @@ void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safep CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0, +void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); if (arg0.wide == 0) { @@ -168,7 +169,8 @@ void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLoca CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) { +void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1, + bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 OpRegCopy(TargetReg(kArg0), arg0); @@ -177,7 +179,7 @@ void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, boo CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1, +void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1, int arg2, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1 @@ -188,7 +190,7 @@ void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset, +void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg2, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadValueDirectFixed(arg2, TargetReg(kArg2)); @@ -198,7 +200,7 @@ void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, +void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); LoadCurrMethodDirect(TargetReg(kArg1)); @@ -208,7 +210,7 @@ void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, CallHelper(r_tgt, helper_offset, safepoint_pc); } -void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset, +void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg1, RegLocation arg2, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); @@ -470,14 +472,14 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, // Disable sharpening direct_method = 0; } - int trampoline = (cu->instruction_set == kX86) ? 0 - : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); if (direct_method != 0) { switch (state) { case 0: // Load the trampoline target [sets kInvokeTgt]. if (cu->instruction_set != kX86) { - cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), + cg->TargetReg(kInvokeTgt)); } // Get the interface Method* [sets kArg0] if (direct_method != static_cast(-1)) { @@ -506,7 +508,8 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, cg->LoadCurrMethodDirect(cg->TargetReg(kArg0)); // Load the trampoline target [sets kInvokeTgt]. if (cu->instruction_set != kX86) { - cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), + cg->TargetReg(kInvokeTgt)); } break; case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0] @@ -528,7 +531,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, return state + 1; } -static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline, +static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline, int state, const MethodReference& target_method, uint32_t method_idx) { Mir2Lir* cg = static_cast(cu->cg.get()); @@ -539,7 +542,7 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline, if (state == 0) { if (cu->instruction_set != kX86) { // Load trampoline target - cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt)); + cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt)); } // Load kArg0 with method index CHECK_EQ(cu->dex_file, target_method.dex_file); @@ -555,7 +558,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -563,7 +566,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -571,7 +574,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,7 +582,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -589,7 +592,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { - int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -1108,9 +1111,9 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - int offset = Thread::PeerOffset().Int32Value(); + ThreadOffset offset = Thread::PeerOffset(); if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { - LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg); + LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg); } else { CHECK(cu_->instruction_set == kX86); reinterpret_cast(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset); @@ -1406,7 +1409,7 @@ void Mir2Lir::GenInvoke(CallInfo* info) { call_inst = OpMem(kOpBlx, TargetReg(kArg0), mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value()); } else { - int trampoline = 0; + ThreadOffset trampoline(-1); switch (info->type) { case kInterface: trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index 846c055ac2..eaae0e1964 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -247,7 +247,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { GenBarrier(); NewLIR0(kMipsCurrPC); // Really a jal to .+8 // Now, fill the branch delay slot with the helper load - int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData)); GenBarrier(); // Scheduling barrier // Construct BaseLabel and set up table base register @@ -272,7 +272,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - artLockObjectFromCode(self, obj); - int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObject)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); @@ -287,7 +287,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - UnlockObjectFromCode(obj); - int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObject)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h index 802ff625c9..6100396e5f 100644 --- a/compiler/dex/quick/mips/codegen_mips.h +++ b/compiler/dex/quick/mips/codegen_mips.h @@ -29,7 +29,7 @@ class MipsMir2Lir : public Mir2Lir { // Required for target - codegen utilities. bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit); - int LoadHelper(int offset); + int LoadHelper(ThreadOffset offset); LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg); LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg); @@ -154,12 +154,12 @@ class MipsMir2Lir : public Mir2Lir { LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value); LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2); LIR* OpTestSuspend(LIR* target); - LIR* OpThreadMem(OpKind op, int thread_offset); + LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset); LIR* OpVldm(int rBase, int count); LIR* OpVstm(int rBase, int count); void OpLea(int rBase, int reg1, int reg2, int scale, int offset); void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi); - void OpTlsCmp(int offset, int val); + void OpTlsCmp(ThreadOffset offset, int val); LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg); diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 320301726b..9e2fea94de 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -176,7 +176,7 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { bool wide = true; - int offset = -1; // Make gcc happy. + ThreadOffset offset(-1); switch (opcode) { case Instruction::CMPL_FLOAT: diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index bd044c66bd..4a48c87ed9 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -254,7 +254,7 @@ void MipsMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { LOG(FATAL) << "Unexpected use of OpLea for Arm"; } -void MipsMir2Lir::OpTlsCmp(int offset, int val) { +void MipsMir2Lir::OpTlsCmp(ThreadOffset offset, int val) { LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm"; } @@ -579,7 +579,7 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc index 0a17fb1078..7a9e91a994 100644 --- a/compiler/dex/quick/mips/target_mips.cc +++ b/compiler/dex/quick/mips/target_mips.cc @@ -505,8 +505,8 @@ void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { * ensure that all branch instructions can be restarted if * there is a trap in the shadow. Allocate a temp register. */ -int MipsMir2Lir::LoadHelper(int offset) { - LoadWordDisp(rMIPS_SELF, offset, r_T9); +int MipsMir2Lir::LoadHelper(ThreadOffset offset) { + LoadWordDisp(rMIPS_SELF, offset.Int32Value(), r_T9); return r_T9; } diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc index 68b26f1936..5d9ae33921 100644 --- a/compiler/dex/quick/mips/utility_mips.cc +++ b/compiler/dex/quick/mips/utility_mips.cc @@ -632,7 +632,7 @@ LIR* MipsMir2Lir::StoreBaseDispWide(int rBase, int displacement, return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); } -LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) { +LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS"; return NULL; } diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index a34e9295e9..2794bf5e5b 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -424,42 +424,42 @@ class Mir2Lir : public Backend { RegLocation rl_src, int lit); void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); - void GenConversionCall(int func_offset, RegLocation rl_dest, + void GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest, RegLocation rl_src); void GenSuspendTest(int opt_flags); void GenSuspendTestAndBranch(int opt_flags, LIR* target); // Shared by all targets - implemented in gen_invoke.cc. - int CallHelperSetup(int helper_offset); - LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc); - void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc); - void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc); - void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, - bool safepoint_pc); - void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1, + int CallHelperSetup(ThreadOffset helper_offset); + LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc); + void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc); + void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc); + void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0, + bool safepoint_pc); + void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc); - void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0, + void CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg1, bool safepoint_pc); - void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, + void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1, bool safepoint_pc); - void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1, + void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc); - void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1, + void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc); - void CallRuntimeHelperImmMethod(int helper_offset, int arg0, + void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc); - void CallRuntimeHelperRegLocationRegLocation(int helper_offset, + void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc); - void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, + void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1, bool safepoint_pc); - void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1, + void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1, int arg2, bool safepoint_pc); - void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0, + void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg2, bool safepoint_pc); - void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2, + void CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2, bool safepoint_pc); - void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset, + void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset, int arg0, RegLocation arg1, RegLocation arg2, bool safepoint_pc); void GenInvoke(CallInfo* info); @@ -526,7 +526,7 @@ class Mir2Lir : public Backend { // Required for target - codegen helpers. virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit) = 0; - virtual int LoadHelper(int offset) = 0; + virtual int LoadHelper(ThreadOffset offset) = 0; virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0; virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg) = 0; @@ -674,14 +674,14 @@ class Mir2Lir : public Backend { virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) = 0; virtual LIR* OpTestSuspend(LIR* target) = 0; - virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0; + virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0; virtual LIR* OpVldm(int rBase, int count) = 0; virtual LIR* OpVstm(int rBase, int count) = 0; virtual void OpLea(int rBase, int reg1, int reg2, int scale, int offset) = 0; virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi) = 0; - virtual void OpTlsCmp(int offset, int val) = 0; + virtual void OpTlsCmp(ThreadOffset offset, int val) = 0; virtual bool InexpensiveConstantInt(int32_t value) = 0; virtual bool InexpensiveConstantFloat(int32_t value) = 0; virtual bool InexpensiveConstantLong(int64_t value) = 0; diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 1c395def55..6e3e55fc4e 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -148,7 +148,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { NewLIR1(kX86StartOfMethod, rX86_ARG2); NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast(tab_rec)); NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2); - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0, rX86_ARG1, true); } @@ -165,7 +165,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX); LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); // If lock is held, go the expensive route - artLockObjectFromCode(self, obj); - CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObject), rCX, true); branch->target = NewLIR0(kPseudoTargetLabel); } @@ -185,7 +185,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LIR* branch2 = NewLIR1(kX86Jmp8, 0); branch->target = NewLIR0(kPseudoTargetLabel); // Otherwise, go the expensive route - UnlockObjectFromCode(obj); - CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rAX, true); branch2->target = NewLIR0(kPseudoTargetLabel); } @@ -243,7 +243,7 @@ void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { if (!skip_overflow_check) { // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0); - OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value()); + OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset()); OpCondBranch(kCondUlt, tgt); // Remember branch target - will process later throw_launchpads_.Insert(tgt); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index edb5ae57c2..21328d5440 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -29,7 +29,7 @@ class X86Mir2Lir : public Mir2Lir { // Required for target - codegen helpers. bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src, RegLocation rl_dest, int lit); - int LoadHelper(int offset); + int LoadHelper(ThreadOffset offset); LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg); LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, int s_reg); @@ -154,14 +154,14 @@ class X86Mir2Lir : public Mir2Lir { LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value); LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2); LIR* OpTestSuspend(LIR* target); - LIR* OpThreadMem(OpKind op, int thread_offset); + LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset); LIR* OpVldm(int rBase, int count); LIR* OpVstm(int rBase, int count); void OpLea(int rBase, int reg1, int reg2, int scale, int offset); void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi); - void OpTlsCmp(int offset, int val); + void OpTlsCmp(ThreadOffset offset, int val); - void OpRegThreadMem(OpKind op, int r_dest, int thread_offset); + void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset); void SpillCoreRegs(); void UnSpillCoreRegs(); static const X86EncodingMap EncodingMap[kX86Last]; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 0b4b4be04e..377d134c80 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -240,8 +240,8 @@ void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) { NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset); } -void X86Mir2Lir::OpTlsCmp(int offset, int val) { - NewLIR2(kX86Cmp16TI8, offset, val); +void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) { + NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val); } bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) { @@ -285,7 +285,7 @@ void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) { // Test suspend flag, return target of taken suspend branch LIR* X86Mir2Lir::OpTestSuspend(LIR* target) { - OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0); + OpTlsCmp(Thread::ThreadFlagsOffset(), 0); return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target); } @@ -403,7 +403,7 @@ void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) { StoreValueWide(rl_dest, rl_result); } -void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) { +void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpCmp: opcode = kX86Cmp32RT; break; @@ -412,7 +412,7 @@ void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) { LOG(FATAL) << "Bad opcode: " << op; break; } - NewLIR2(opcode, r_dest, thread_offset); + NewLIR2(opcode, r_dest, thread_offset.Int32Value()); } /* @@ -532,7 +532,7 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 2c9b3c837c..699f3ae5bb 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -524,7 +524,7 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, } // Not used in x86 -int X86Mir2Lir::LoadHelper(int offset) { +int X86Mir2Lir::LoadHelper(ThreadOffset offset) { LOG(FATAL) << "Unexpected use of LoadHelper in x86"; return INVALID_REG; } diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc index e15995fef4..c519bfec44 100644 --- a/compiler/dex/quick/x86/utility_x86.cc +++ b/compiler/dex/quick/x86/utility_x86.cc @@ -292,7 +292,7 @@ LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src, return OpRegImm(op, r_dest, value); } -LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) { +LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { X86OpCode opcode = kX86Bkpt; switch (op) { case kOpBlx: opcode = kX86CallT; break; @@ -300,7 +300,7 @@ LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) { LOG(FATAL) << "Bad opcode: " << op; break; } - return NewLIR1(opcode, thread_offset); + return NewLIR1(opcode, thread_offset.Int32Value()); } LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) { diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index e7ba402b21..56b629c576 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -41,9 +41,9 @@ #include "mirror/throwable.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" -#include "stubs/stubs.h" #include "thread.h" #include "thread_pool.h" +#include "trampolines/trampoline_compiler.h" #include "verifier/method_verifier.h" #if defined(ART_USE_PORTABLE_COMPILER) @@ -433,64 +433,38 @@ CompilerTls* CompilerDriver::GetTls() { return res; } +const std::vector* CompilerDriver::CreateInterpreterToInterpreterBridge() const { + return CreateTrampoline(instruction_set_, kInterpreterAbi, + INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToInterpreterBridge)); +} + +const std::vector* CompilerDriver::CreateInterpreterToCompiledCodeBridge() const { + return CreateTrampoline(instruction_set_, kInterpreterAbi, + INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToCompiledCodeBridge)); +} + +const std::vector* CompilerDriver::CreateJniDlsymLookup() const { + return CreateTrampoline(instruction_set_, kJniAbi, JNI_ENTRYPOINT_OFFSET(pDlsymLookup)); +} + const std::vector* CompilerDriver::CreatePortableResolutionTrampoline() const { - switch (instruction_set_) { - case kArm: - case kThumb2: - return arm::CreatePortableResolutionTrampoline(); - case kMips: - return mips::CreatePortableResolutionTrampoline(); - case kX86: - return x86::CreatePortableResolutionTrampoline(); - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; - return NULL; - } + return CreateTrampoline(instruction_set_, kPortableAbi, + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampoline)); } -const std::vector* CompilerDriver::CreateQuickResolutionTrampoline() const { - switch (instruction_set_) { - case kArm: - case kThumb2: - return arm::CreateQuickResolutionTrampoline(); - case kMips: - return mips::CreateQuickResolutionTrampoline(); - case kX86: - return x86::CreateQuickResolutionTrampoline(); - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; - return NULL; - } +const std::vector* CompilerDriver::CreatePortableToInterpreterBridge() const { + return CreateTrampoline(instruction_set_, kPortableAbi, + PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge)); } -const std::vector* CompilerDriver::CreateInterpreterToInterpreterEntry() const { - switch (instruction_set_) { - case kArm: - case kThumb2: - return arm::CreateInterpreterToInterpreterEntry(); - case kMips: - return mips::CreateInterpreterToInterpreterEntry(); - case kX86: - return x86::CreateInterpreterToInterpreterEntry(); - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; - return NULL; - } +const std::vector* CompilerDriver::CreateQuickResolutionTrampoline() const { + return CreateTrampoline(instruction_set_, kQuickAbi, + QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampoline)); } -const std::vector* CompilerDriver::CreateInterpreterToQuickEntry() const { - switch (instruction_set_) { - case kArm: - case kThumb2: - return arm::CreateInterpreterToQuickEntry(); - case kMips: - return mips::CreateInterpreterToQuickEntry(); - case kX86: - return x86::CreateInterpreterToQuickEntry(); - default: - LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_; - return NULL; - } +const std::vector* CompilerDriver::CreateQuickToInterpreterBridge() const { + return CreateTrampoline(instruction_set_, kQuickAbi, + QUICK_ENTRYPOINT_OFFSET(pQuickToInterpreterBridge)); } void CompilerDriver::CompileAll(jobject class_loader, diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 18f852dc6f..b5222c99b8 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -48,6 +48,17 @@ enum CompilerBackend { kNoBackend }; +enum EntryPointCallingConvention { + // ABI of invocations to a method's interpreter entry point. + kInterpreterAbi, + // ABI of calls to a method's native code, only used for native methods. + kJniAbi, + // ABI of calls to a method's portable code entry point. + kPortableAbi, + // ABI of calls to a method's quick code entry point. + kQuickAbi +}; + enum DexToDexCompilationLevel { kDontDexToDexCompile, // Only meaning wrt image time interpretation. kRequired, // Dex-to-dex compilation required for correctness. @@ -110,13 +121,19 @@ class CompilerDriver { CompilerTls* GetTls(); // Generate the trampolines that are invoked by unresolved direct methods. + const std::vector* CreateInterpreterToInterpreterBridge() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const std::vector* CreateInterpreterToCompiledCodeBridge() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + const std::vector* CreateJniDlsymLookup() const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const std::vector* CreatePortableResolutionTrampoline() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const std::vector* CreateQuickResolutionTrampoline() const + const std::vector* CreatePortableToInterpreterBridge() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const std::vector* CreateInterpreterToInterpreterEntry() const + const std::vector* CreateQuickResolutionTrampoline() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - const std::vector* CreateInterpreterToQuickEntry() const + const std::vector* CreateQuickToInterpreterBridge() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); CompiledClass* GetCompiledClass(ClassReference ref) const diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index e73d021c0c..550d642753 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -90,11 +90,23 @@ bool ImageWriter::Write(const std::string& image_filename, return false; } class_linker->RegisterOatFile(*oat_file_); - interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset(); - interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset(); - portable_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset(); - quick_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset(); + interpreter_to_interpreter_bridge_offset_ = + oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset(); + interpreter_to_compiled_code_bridge_offset_ = + oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset(); + + jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset(); + + portable_resolution_trampoline_offset_ = + oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset(); + portable_to_interpreter_bridge_offset_ = + oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset(); + + quick_resolution_trampoline_offset_ = + oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset(); + quick_to_interpreter_bridge_offset_ = + oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset(); { Thread::Current()->TransitionFromSuspendedToRunnable(); PruneNonImageClasses(); // Remove junk @@ -490,57 +502,62 @@ void ImageWriter::FixupClass(const Class* orig, Class* copy) { void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) { FixupInstanceFields(orig, copy); - // OatWriter replaces the code_ with an offset value. - // Here we readjust to a pointer relative to oat_begin_ - if (orig->IsAbstract()) { - // Code for abstract methods is set to the abstract method error stub when we load the image. - copy->SetEntryPointFromCompiledCode(NULL); - copy->SetEntryPointFromInterpreter(reinterpret_cast - (GetOatAddress(interpreter_to_interpreter_entry_offset_))); - return; - } else { - copy->SetEntryPointFromInterpreter(reinterpret_cast - (GetOatAddress(interpreter_to_quick_entry_offset_))); - } + // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to + // oat_begin_ - if (orig == Runtime::Current()->GetResolutionMethod()) { + // The resolution method has a special trampoline to call. + if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) { #if defined(ART_USE_PORTABLE_COMPILER) copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); #else copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); #endif - return; - } - - // Use original code if it exists. Otherwise, set the code pointer to the resolution trampoline. - const byte* code = GetOatAddress(orig->GetOatCodeOffset()); - if (code != NULL) { - copy->SetEntryPointFromCompiledCode(code); } else { + // We assume all methods have code. If they don't currently then we set them to the use the + // resolution trampoline. Abstract methods never have code and so we need to make sure their + // use results in an AbstractMethodError. We use the interpreter to achieve this. + if (UNLIKELY(orig->IsAbstract())) { #if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); + copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_)); #else - copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); + copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_)); #endif - } - - if (orig->IsNative()) { - // The native method's pointer is set to a stub to lookup via dlsym when we load the image. - // Note this is not the code_ pointer, that is handled above. - copy->SetNativeMethod(NULL); - } else { - // normal (non-abstract non-native) methods have mapping tables to relocate - uint32_t mapping_table_off = orig->GetOatMappingTableOffset(); - const byte* mapping_table = GetOatAddress(mapping_table_off); - copy->SetMappingTable(reinterpret_cast(mapping_table)); - - uint32_t vmap_table_offset = orig->GetOatVmapTableOffset(); - const byte* vmap_table = GetOatAddress(vmap_table_offset); - copy->SetVmapTable(reinterpret_cast(vmap_table)); - - uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset(); - const byte* native_gc_map = GetOatAddress(native_gc_map_offset); - copy->SetNativeGcMap(reinterpret_cast(native_gc_map)); + copy->SetEntryPointFromInterpreter(reinterpret_cast + (GetOatAddress(interpreter_to_interpreter_bridge_offset_))); + } else { + copy->SetEntryPointFromInterpreter(reinterpret_cast + (GetOatAddress(interpreter_to_compiled_code_bridge_offset_))); + // Use original code if it exists. Otherwise, set the code pointer to the resolution + // trampoline. + const byte* code = GetOatAddress(orig->GetOatCodeOffset()); + if (code != NULL) { + copy->SetEntryPointFromCompiledCode(code); + } else { +#if defined(ART_USE_PORTABLE_COMPILER) + copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); +#else + copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); +#endif + } + if (orig->IsNative()) { + // The native method's pointer is set to a stub to lookup via dlsym. + // Note this is not the code_ pointer, that is handled above. + copy->SetNativeMethod(GetOatAddress(jni_dlsym_lookup_offset_)); + } else { + // Normal (non-abstract non-native) methods have various tables to relocate. + uint32_t mapping_table_off = orig->GetOatMappingTableOffset(); + const byte* mapping_table = GetOatAddress(mapping_table_off); + copy->SetMappingTable(reinterpret_cast(mapping_table)); + + uint32_t vmap_table_offset = orig->GetOatVmapTableOffset(); + const byte* vmap_table = GetOatAddress(vmap_table_offset); + copy->SetVmapTable(reinterpret_cast(vmap_table)); + + uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset(); + const byte* native_gc_map = GetOatAddress(native_gc_map_offset); + copy->SetNativeGcMap(reinterpret_cast(native_gc_map)); + } + } } } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index e43ec6338f..545534fff7 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -39,8 +39,8 @@ class ImageWriter { public: explicit ImageWriter(const CompilerDriver& compiler_driver) : compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL), - oat_data_begin_(NULL), interpreter_to_interpreter_entry_offset_(0), - interpreter_to_quick_entry_offset_(0), portable_resolution_trampoline_offset_(0), + oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0), + interpreter_to_compiled_code_bridge_offset_(0), portable_resolution_trampoline_offset_(0), quick_resolution_trampoline_offset_(0) {} ~ImageWriter() {} @@ -195,10 +195,13 @@ class ImageWriter { const byte* oat_data_begin_; // Offset from oat_data_begin_ to the stubs. - uint32_t interpreter_to_interpreter_entry_offset_; - uint32_t interpreter_to_quick_entry_offset_; + uint32_t interpreter_to_interpreter_bridge_offset_; + uint32_t interpreter_to_compiled_code_bridge_offset_; + uint32_t jni_dlsym_lookup_offset_; uint32_t portable_resolution_trampoline_offset_; + uint32_t portable_to_interpreter_bridge_offset_; uint32_t quick_resolution_trampoline_offset_; + uint32_t quick_to_interpreter_bridge_offset_; // DexCaches seen while scanning for fixing up CodeAndDirectMethods typedef std::set Set; diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index b069fbd4a1..9713fe9da9 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -172,8 +172,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. - uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) - : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart); + ThreadOffset jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); FrameOffset locked_object_sirt_offset(0); if (is_synchronized) { @@ -301,7 +301,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, // 12. Call into JNI method end possibly passing a returned reference, the method and the current // thread. end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size)); - uintptr_t jni_end; + ThreadOffset jni_end(-1); if (reference_return) { // Pass result. jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 5eb837b25c..21c5317b69 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -51,11 +51,14 @@ OatWriter::OatWriter(const std::vector& dex_files, size_oat_header_(0), size_oat_header_image_file_location_(0), size_dex_file_(0), - size_interpreter_to_interpreter_entry_(0), - size_interpreter_to_quick_entry_(0), + size_interpreter_to_interpreter_bridge_(0), + size_interpreter_to_compiled_code_bridge_(0), + size_jni_dlsym_lookup_(0), size_portable_resolution_trampoline_(0), + size_portable_to_interpreter_bridge_(0), size_quick_resolution_trampoline_(0), - size_stubs_alignment_(0), + size_quick_to_interpreter_bridge_(0), + size_trampoline_alignment_(0), size_code_size_(0), size_code_(0), size_code_alignment_(0), @@ -176,30 +179,30 @@ size_t OatWriter::InitOatCode(size_t offset) { size_executable_offset_alignment_ = offset - old_offset; if (compiler_driver_->IsImage()) { InstructionSet instruction_set = compiler_driver_->GetInstructionSet(); - oat_header_->SetInterpreterToInterpreterEntryOffset(offset); - interpreter_to_interpreter_entry_.reset( - compiler_driver_->CreateInterpreterToInterpreterEntry()); - offset += interpreter_to_interpreter_entry_->size(); - - offset = CompiledCode::AlignCode(offset, instruction_set); - oat_header_->SetInterpreterToQuickEntryOffset(offset); - interpreter_to_quick_entry_.reset(compiler_driver_->CreateInterpreterToQuickEntry()); - offset += interpreter_to_quick_entry_->size(); - - offset = CompiledCode::AlignCode(offset, instruction_set); - oat_header_->SetPortableResolutionTrampolineOffset(offset); - portable_resolution_trampoline_.reset(compiler_driver_->CreatePortableResolutionTrampoline()); - offset += portable_resolution_trampoline_->size(); - - offset = CompiledCode::AlignCode(offset, instruction_set); - oat_header_->SetQuickResolutionTrampolineOffset(offset); - quick_resolution_trampoline_.reset(compiler_driver_->CreateQuickResolutionTrampoline()); - offset += quick_resolution_trampoline_->size(); + + #define DO_TRAMPOLINE(field, fn_name) \ + offset = CompiledCode::AlignCode(offset, instruction_set); \ + oat_header_->Set ## fn_name ## Offset(offset); \ + field.reset(compiler_driver_->Create ## fn_name()); \ + offset += field->size(); + + DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge); + DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge); + DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup); + DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline); + DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge); + DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline); + DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge); + + #undef DO_TRAMPOLINE } else { - oat_header_->SetInterpreterToInterpreterEntryOffset(0); - oat_header_->SetInterpreterToQuickEntryOffset(0); + oat_header_->SetInterpreterToInterpreterBridgeOffset(0); + oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0); + oat_header_->SetJniDlsymLookupOffset(0); oat_header_->SetPortableResolutionTrampolineOffset(0); + oat_header_->SetPortableToInterpreterBridgeOffset(0); oat_header_->SetQuickResolutionTrampolineOffset(0); + oat_header_->SetQuickToInterpreterBridgeOffset(0); } return offset; } @@ -469,11 +472,14 @@ bool OatWriter::Write(OutputStream& out) { DO_STAT(size_oat_header_); DO_STAT(size_oat_header_image_file_location_); DO_STAT(size_dex_file_); - DO_STAT(size_interpreter_to_interpreter_entry_); - DO_STAT(size_interpreter_to_quick_entry_); + DO_STAT(size_interpreter_to_interpreter_bridge_); + DO_STAT(size_interpreter_to_compiled_code_bridge_); + DO_STAT(size_jni_dlsym_lookup_); DO_STAT(size_portable_resolution_trampoline_); + DO_STAT(size_portable_to_interpreter_bridge_); DO_STAT(size_quick_resolution_trampoline_); - DO_STAT(size_stubs_alignment_); + DO_STAT(size_quick_to_interpreter_bridge_); + DO_STAT(size_trampoline_alignment_); DO_STAT(size_code_size_); DO_STAT(size_code_); DO_STAT(size_code_alignment_); @@ -545,52 +551,30 @@ size_t OatWriter::WriteCode(OutputStream& out, const size_t file_offset) { DCHECK_OFFSET(); if (compiler_driver_->IsImage()) { InstructionSet instruction_set = compiler_driver_->GetInstructionSet(); - if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0], - interpreter_to_interpreter_entry_->size())) { - PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation(); - return false; - } - size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size(); - relative_offset += interpreter_to_interpreter_entry_->size(); - DCHECK_OFFSET(); - - uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); - uint32_t alignment_padding = aligned_offset - relative_offset; - out.Seek(alignment_padding, kSeekCurrent); - size_stubs_alignment_ += alignment_padding; - if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) { - PLOG(ERROR) << "Failed to write interpreter to quick entry to " << out.GetLocation(); - return false; - } - size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size(); - relative_offset += alignment_padding + interpreter_to_quick_entry_->size(); - DCHECK_OFFSET(); - aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); - alignment_padding = aligned_offset - relative_offset; - out.Seek(alignment_padding, kSeekCurrent); - size_stubs_alignment_ += alignment_padding; - if (!out.WriteFully(&(*portable_resolution_trampoline_)[0], - portable_resolution_trampoline_->size())) { - PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation(); - return false; - } - size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size(); - relative_offset += alignment_padding + portable_resolution_trampoline_->size(); - DCHECK_OFFSET(); - - aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); - alignment_padding = aligned_offset - relative_offset; - out.Seek(alignment_padding, kSeekCurrent); - size_stubs_alignment_ += alignment_padding; - if (!out.WriteFully(&(*quick_resolution_trampoline_)[0], - quick_resolution_trampoline_->size())) { - PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation(); - return false; - } - size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size(); - relative_offset += alignment_padding + quick_resolution_trampoline_->size(); - DCHECK_OFFSET(); + #define DO_TRAMPOLINE(field) \ + do { \ + uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \ + uint32_t alignment_padding = aligned_offset - relative_offset; \ + out.Seek(alignment_padding, kSeekCurrent); \ + size_trampoline_alignment_ += alignment_padding; \ + if (!out.WriteFully(&(*field)[0], field->size())) { \ + PLOG(ERROR) << "Failed to write " # field " to " << out.GetLocation(); \ + return false; \ + } \ + size_ ## field += field->size(); \ + relative_offset += alignment_padding + field->size(); \ + DCHECK_OFFSET(); \ + } while (false) + + DO_TRAMPOLINE(interpreter_to_interpreter_bridge_); + DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_); + DO_TRAMPOLINE(jni_dlsym_lookup_); + DO_TRAMPOLINE(portable_resolution_trampoline_); + DO_TRAMPOLINE(portable_to_interpreter_bridge_); + DO_TRAMPOLINE(quick_resolution_trampoline_); + DO_TRAMPOLINE(quick_to_interpreter_bridge_); + #undef DO_TRAMPOLINE } return relative_offset; } diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index f2c5626b4d..e6cc0bce80 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -181,10 +181,13 @@ class OatWriter { OatHeader* oat_header_; std::vector oat_dex_files_; std::vector oat_classes_; - UniquePtr > interpreter_to_interpreter_entry_; - UniquePtr > interpreter_to_quick_entry_; + UniquePtr > interpreter_to_interpreter_bridge_; + UniquePtr > interpreter_to_compiled_code_bridge_; + UniquePtr > jni_dlsym_lookup_; UniquePtr > portable_resolution_trampoline_; + UniquePtr > portable_to_interpreter_bridge_; UniquePtr > quick_resolution_trampoline_; + UniquePtr > quick_to_interpreter_bridge_; // output stats uint32_t size_dex_file_alignment_; @@ -192,11 +195,14 @@ class OatWriter { uint32_t size_oat_header_; uint32_t size_oat_header_image_file_location_; uint32_t size_dex_file_; - uint32_t size_interpreter_to_interpreter_entry_; - uint32_t size_interpreter_to_quick_entry_; + uint32_t size_interpreter_to_interpreter_bridge_; + uint32_t size_interpreter_to_compiled_code_bridge_; + uint32_t size_jni_dlsym_lookup_; uint32_t size_portable_resolution_trampoline_; + uint32_t size_portable_to_interpreter_bridge_; uint32_t size_quick_resolution_trampoline_; - uint32_t size_stubs_alignment_; + uint32_t size_quick_to_interpreter_bridge_; + uint32_t size_trampoline_alignment_; uint32_t size_code_size_; uint32_t size_code_; uint32_t size_code_alignment_; diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc deleted file mode 100644 index def43e2bd2..0000000000 --- a/compiler/stubs/portable/stubs.cc +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "stubs/stubs.h" - -#include "entrypoints/quick/quick_entrypoints.h" -#include "jni_internal.h" -#include "utils/arm/assembler_arm.h" -#include "utils/mips/assembler_mips.h" -#include "utils/x86/assembler_x86.h" -#include "stack_indirect_reference_table.h" -#include "sirt_ref.h" - -#define __ assembler-> - -namespace art { - -namespace arm { -const std::vector* CreatePortableResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kArm))); - RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR); - - __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, - PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); - __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 - __ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr - __ IncreaseFrameSize(12); // 3 words of space for alignment - // Call to resolution trampoline (callee, receiver, callee_addr, Thread*) - __ blx(R12); - __ mov(R12, ShifterOperand(R0)); // Save code address returned into R12 - __ DecreaseFrameSize(12); - __ PopList(save); - __ cmp(R12, ShifterOperand(0)); - __ bx(R12, NE); // If R12 != 0 tail call method's code - __ bx(LR); // Return to caller to handle exception - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} -} // namespace arm - -namespace mips { -const std::vector* CreatePortableResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kMips))); - // Build frame and save argument registers and RA. - __ AddConstant(SP, SP, -32); - __ StoreToOffset(kStoreWord, RA, SP, 28); - __ StoreToOffset(kStoreWord, A3, SP, 12); - __ StoreToOffset(kStoreWord, A2, SP, 8); - __ StoreToOffset(kStoreWord, A1, SP, 4); - __ StoreToOffset(kStoreWord, A0, SP, 0); - - __ LoadFromOffset(kLoadWord, T9, S1, - PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); - __ Move(A3, S1); // Pass Thread::Current() in A3 - __ Move(A2, SP); // Pass SP for Method** callee_addr - __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*) - - // Restore frame, argument registers, and RA. - __ LoadFromOffset(kLoadWord, A0, SP, 0); - __ LoadFromOffset(kLoadWord, A1, SP, 4); - __ LoadFromOffset(kLoadWord, A2, SP, 8); - __ LoadFromOffset(kLoadWord, A3, SP, 12); - __ LoadFromOffset(kLoadWord, RA, SP, 28); - __ AddConstant(SP, SP, 32); - - Label resolve_fail; - __ EmitBranch(V0, ZERO, &resolve_fail, true); - __ Jr(V0); // If V0 != 0 tail call method's code - __ Bind(&resolve_fail, false); - __ Jr(RA); // Return to caller to handle exception - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} -} // namespace mips - -namespace x86 { -const std::vector* CreatePortableResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kX86))); - - __ pushl(EBP); - __ movl(EBP, ESP); // save ESP - __ subl(ESP, Immediate(8)); // Align stack - __ movl(EAX, Address(EBP, 8)); // Method* called - __ leal(EDX, Address(EBP, 8)); // Method** called_addr - __ fs()->pushl(Address::Absolute(Thread::SelfOffset())); // pass thread - __ pushl(EDX); // pass called_addr - __ pushl(ECX); // pass receiver - __ pushl(EAX); // pass called - // Call to resolve method. - __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), - X86ManagedRegister::FromCpuRegister(ECX)); - __ leave(); - - Label resolve_fail; // forward declaration - __ cmpl(EAX, Immediate(0)); - __ j(kEqual, &resolve_fail); - __ jmp(EAX); - // Tail call to intended method. - __ Bind(&resolve_fail); - __ ret(); - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} -} // namespace x86 - -} // namespace art diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc deleted file mode 100644 index 912f1c0746..0000000000 --- a/compiler/stubs/quick/stubs.cc +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "stubs/stubs.h" - -#include "entrypoints/quick/quick_entrypoints.h" -#include "jni_internal.h" -#include "utils/arm/assembler_arm.h" -#include "utils/mips/assembler_mips.h" -#include "utils/x86/assembler_x86.h" -#include "sirt_ref.h" -#include "stack_indirect_reference_table.h" - -#define __ assembler-> - -namespace art { - -namespace arm { -const std::vector* CreateQuickResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kArm))); - // | Out args | - // | Method* | <- SP on entry - // | LR | return address into caller - // | ... | callee saves - // | R3 | possible argument - // | R2 | possible argument - // | R1 | possible argument - // | R0 | junk on call to QuickResolutionTrampolineFromCode, holds result Method* - // | Method* | Callee save Method* set up by QuickResoltuionTrampolineFromCode - // Save callee saves and ready frame for exception delivery - RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) | - (1 << R10) | (1 << R11) | (1 << LR); - // TODO: enable when GetCalleeSaveMethod is available at stub generation time - // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask()); - __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); - __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 - __ IncreaseFrameSize(8); // 2 words of space for alignment - __ mov(R2, ShifterOperand(SP)); // Pass SP - // Call to resolution trampoline (method_idx, receiver, sp, Thread*) - __ blx(R12); - __ mov(R12, ShifterOperand(R0)); // Save code address returned into R12 - // Restore registers which may have been modified by GC, "R0" will hold the Method* - __ DecreaseFrameSize(4); - __ PopList((1 << R0) | save); - __ bx(R12); // Leaf call to method's code - __ bkpt(0); - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} - -const std::vector* CreateInterpreterToInterpreterEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kArm))); - - __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); - __ bkpt(0); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} - -const std::vector* CreateInterpreterToQuickEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kArm))); - - __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); - __ bkpt(0); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} -} // namespace arm - -namespace mips { -const std::vector* CreateQuickResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kMips))); - // | Out args | - // | Method* | <- SP on entry - // | RA | return address into caller - // | ... | callee saves - // | A3 | possible argument - // | A2 | possible argument - // | A1 | possible argument - // | A0/Method* | Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode - // Save callee saves and ready frame for exception delivery - __ AddConstant(SP, SP, -64); - __ StoreToOffset(kStoreWord, RA, SP, 60); - __ StoreToOffset(kStoreWord, FP, SP, 56); - __ StoreToOffset(kStoreWord, GP, SP, 52); - __ StoreToOffset(kStoreWord, S7, SP, 48); - __ StoreToOffset(kStoreWord, S6, SP, 44); - __ StoreToOffset(kStoreWord, S5, SP, 40); - __ StoreToOffset(kStoreWord, S4, SP, 36); - __ StoreToOffset(kStoreWord, S3, SP, 32); - __ StoreToOffset(kStoreWord, S2, SP, 28); - __ StoreToOffset(kStoreWord, A3, SP, 12); - __ StoreToOffset(kStoreWord, A2, SP, 8); - __ StoreToOffset(kStoreWord, A1, SP, 4); - - __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); - __ Move(A3, S1); // Pass Thread::Current() in A3 - __ Move(A2, SP); // Pass SP for Method** callee_addr - __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*) - - // Restore registers which may have been modified by GC - __ LoadFromOffset(kLoadWord, A0, SP, 0); - __ LoadFromOffset(kLoadWord, A1, SP, 4); - __ LoadFromOffset(kLoadWord, A2, SP, 8); - __ LoadFromOffset(kLoadWord, A3, SP, 12); - __ LoadFromOffset(kLoadWord, S2, SP, 28); - __ LoadFromOffset(kLoadWord, S3, SP, 32); - __ LoadFromOffset(kLoadWord, S4, SP, 36); - __ LoadFromOffset(kLoadWord, S5, SP, 40); - __ LoadFromOffset(kLoadWord, S6, SP, 44); - __ LoadFromOffset(kLoadWord, S7, SP, 48); - __ LoadFromOffset(kLoadWord, GP, SP, 52); - __ LoadFromOffset(kLoadWord, FP, SP, 56); - __ LoadFromOffset(kLoadWord, RA, SP, 60); - __ AddConstant(SP, SP, 64); - - __ Move(T9, V0); // Put method's code in T9 - __ Jr(T9); // Leaf call to method's code - - __ Break(); - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} - -const std::vector* CreateInterpreterToInterpreterEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kMips))); - - __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); - __ Jr(T9); - __ Break(); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} - -const std::vector* CreateInterpreterToQuickEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kMips))); - - __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); - __ Jr(T9); - __ Break(); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} -} // namespace mips - -namespace x86 { -const std::vector* CreateQuickResolutionTrampoline() { - UniquePtr assembler(static_cast(Assembler::Create(kX86))); - // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs) - // return address - __ pushl(EDI); - __ pushl(ESI); - __ pushl(EBP); - __ pushl(EBX); - __ pushl(EDX); - __ pushl(ECX); - __ pushl(EAX); // <-- callee save Method* to go here - __ movl(EDX, ESP); // save ESP - __ fs()->pushl(Address::Absolute(Thread::SelfOffset())); // pass Thread* - __ pushl(EDX); // pass ESP for Method* - __ pushl(ECX); // pass receiver - __ pushl(EAX); // pass Method* - - // Call to resolve method. - __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), - X86ManagedRegister::FromCpuRegister(ECX)); - - __ movl(EDI, EAX); // save code pointer in EDI - __ addl(ESP, Immediate(16)); // Pop arguments - __ popl(EAX); // Restore args. - __ popl(ECX); - __ popl(EDX); - __ popl(EBX); - __ popl(EBP); // Restore callee saves. - __ popl(ESI); - // Swap EDI callee save with code pointer - __ xchgl(EDI, Address(ESP, 0)); - // Tail call to intended method. - __ ret(); - - assembler->EmitSlowPaths(); - size_t cs = assembler->CodeSize(); - UniquePtr > resolution_trampoline(new std::vector(cs)); - MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size()); - assembler->FinalizeInstructions(code); - - return resolution_trampoline.release(); -} - -const std::vector* CreateInterpreterToInterpreterEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kX86))); - - __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} - -const std::vector* CreateInterpreterToQuickEntry() { - UniquePtr assembler(static_cast(Assembler::Create(kX86))); - - __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); - - size_t cs = assembler->CodeSize(); - UniquePtr > entry_stub(new std::vector(cs)); - MemoryRegion code(&(*entry_stub)[0], entry_stub->size()); - assembler->FinalizeInstructions(code); - - return entry_stub.release(); -} -} // namespace x86 - -} // namespace art diff --git a/compiler/stubs/stubs.h b/compiler/stubs/stubs.h deleted file mode 100644 index d85eae8e1e..0000000000 --- a/compiler/stubs/stubs.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_STUBS_STUBS_H_ -#define ART_COMPILER_STUBS_STUBS_H_ - -#include "runtime.h" - -namespace art { - -namespace arm { -const std::vector* CreatePortableResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateQuickResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToInterpreterEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToQuickEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -} - -namespace mips { -const std::vector* CreatePortableResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateQuickResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToInterpreterEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToQuickEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -} - -namespace x86 { -const std::vector* CreatePortableResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateQuickResolutionTrampoline() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToInterpreterEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -const std::vector* CreateInterpreterToQuickEntry() - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -} - -} // namespace art - -#endif // ART_COMPILER_STUBS_STUBS_H_ diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index fa202c3017..f0d11d8f90 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -1246,10 +1246,10 @@ bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) { // Implementation note: this method must emit at most one instruction when // Address::CanHoldLoadOffset. void ArmAssembler::LoadFromOffset(LoadOperandType type, - Register reg, - Register base, - int32_t offset, - Condition cond) { + Register reg, + Register base, + int32_t offset, + Condition cond) { if (!Address::CanHoldLoadOffset(type, offset)) { CHECK(base != IP); LoadImmediate(IP, offset, cond); @@ -1884,7 +1884,7 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving R0 as this call won't return __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value()); __ blx(R12); // Call never returns __ bkpt(0); diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 931d7ab0f7..2be3d56cfa 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -813,14 +813,7 @@ void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOff void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - movl(scratch, Address(ESP, src_base)); - movl(scratch, Address(scratch, src_offset)); - movl(Address(ESP, dest), scratch); -#endif + UNIMPLEMENTED(FATAL) << "no mips implementation"; } void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, @@ -834,24 +827,11 @@ void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, ManagedRegister /*mscratch*/, size_t /*size*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - Register scratch = mscratch.AsMips().AsCoreRegister(); - CHECK_EQ(size, 4u); - CHECK_EQ(dest.Int32Value(), src.Int32Value()); - movl(scratch, Address(ESP, src)); - pushl(Address(scratch, src_offset)); - popl(Address(scratch, dest_offset)); -#endif + UNIMPLEMENTED(FATAL) << "no mips implementation"; } void MipsAssembler::MemoryBarrier(ManagedRegister) { - UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED"; -#if 0 -#if ANDROID_SMP != 0 - mfence(); -#endif -#endif + UNIMPLEMENTED(FATAL) << "no mips implementation"; } void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg, @@ -953,10 +933,7 @@ void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscrat } void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) { - UNIMPLEMENTED(FATAL) << "no arm implementation"; -#if 0 - fs()->call(Address::Absolute(offset)); -#endif + UNIMPLEMENTED(FATAL) << "no mips implementation"; } void MipsAssembler::GetCurrentThread(ManagedRegister tr) { @@ -988,7 +965,7 @@ void MipsExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving A0 as this call won't return __ Move(A0, scratch_.AsCoreRegister()); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value()); __ Jr(T9); // Call never returns __ Break(); diff --git a/runtime/Android.mk b/runtime/Android.mk index 51bb3eb2d3..4f25c00546 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -142,6 +142,7 @@ LIBART_COMMON_SRC_FILES += \ arch/x86/registers_x86.cc \ arch/mips/registers_mips.cc \ entrypoints/entrypoint_utils.cc \ + entrypoints/interpreter/interpreter_entrypoints.cc \ entrypoints/jni/jni_entrypoints.cc \ entrypoints/math_entrypoints.cc \ entrypoints/portable/portable_alloc_entrypoints.cc \ @@ -163,15 +164,13 @@ LIBART_COMMON_SRC_FILES += \ entrypoints/quick/quick_field_entrypoints.cc \ entrypoints/quick/quick_fillarray_entrypoints.cc \ entrypoints/quick/quick_instrumentation_entrypoints.cc \ - entrypoints/quick/quick_interpreter_entrypoints.cc \ entrypoints/quick/quick_invoke_entrypoints.cc \ entrypoints/quick/quick_jni_entrypoints.cc \ entrypoints/quick/quick_lock_entrypoints.cc \ entrypoints/quick/quick_math_entrypoints.cc \ - entrypoints/quick/quick_proxy_entrypoints.cc \ - entrypoints/quick/quick_stub_entrypoints.cc \ entrypoints/quick/quick_thread_entrypoints.cc \ - entrypoints/quick/quick_throw_entrypoints.cc + entrypoints/quick/quick_throw_entrypoints.cc \ + entrypoints/quick/quick_trampoline_entrypoints.cc LIBART_TARGET_SRC_FILES := \ $(LIBART_COMMON_SRC_FILES) \ diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S index ed655e95b1..559788f1ba 100644 --- a/runtime/arch/arm/asm_support_arm.S +++ b/runtime/arch/arm/asm_support_arm.S @@ -35,4 +35,11 @@ .size \name, .-\name .endm +.macro UNIMPLEMENTED name + ENTRY \name + bkpt + bkpt + END \name +.endm + #endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index b71a158289..848bacca52 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "entrypoints/interpreter/interpreter_entrypoints.h" #include "entrypoints/portable/portable_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" @@ -21,49 +22,61 @@ namespace art { +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Portable entrypoints. +extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*); + // Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); +extern "C" void art_quick_can_put_array_element(void*, void*); +extern "C" void art_quick_check_cast(void*, void*); // DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); +extern "C" void* art_quick_initialize_static_storage(uint32_t, void*); +extern "C" void* art_quick_initialize_type(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); +extern "C" void* art_quick_resolve_string(void*, uint32_t); // Exception entrypoints. extern "C" void* GetAndClearException(Thread*); // Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); +extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static(uint32_t, int32_t); +extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance(uint32_t, void*); +extern "C" int32_t art_quick_get32_static(uint32_t); +extern "C" int64_t art_quick_get64_instance(uint32_t, void*); +extern "C" int64_t art_quick_get64_static(uint32_t); +extern "C" void* art_quick_get_obj_instance(uint32_t, void*); +extern "C" void* art_quick_get_obj_static(uint32_t); // FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); +extern "C" void art_quick_handle_fill_data(void*, void*); // Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); +extern "C" void art_quick_lock_object(void*); +extern "C" void art_quick_unlock_object(void*); // Math entrypoints. extern int32_t CmpgDouble(double a, double b); @@ -93,26 +106,14 @@ extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - // Intrinsic entrypoints. extern "C" int32_t __memcmp16(void*, void*, int32_t); extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); extern "C" int32_t art_quick_string_compareto(void*, void*); // Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*); extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); @@ -125,49 +126,61 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_quick_test_suspend(); // Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { +extern "C" void art_quick_deliver_exception(void*); +extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero(); +extern "C" void art_quick_throw_no_such_method(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception(); +extern "C" void art_quick_throw_stack_overflow(void*); + +void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, + PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) { + // Interpreter + ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge; + ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge; + + // JNI + jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; + + // Portable + ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline; + ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge; + // Alloc - qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; - qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; - qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + qpoints->pAllocArray = art_quick_alloc_array; + qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check; + qpoints->pAllocObject = art_quick_alloc_object; + qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check; + qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array; + qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check; // Cast - qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pCanPutArrayElement = art_quick_can_put_array_element; + qpoints->pCheckCast = art_quick_check_cast; // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; + qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; + qpoints->pInitializeType = art_quick_initialize_type; + qpoints->pResolveString = art_quick_resolve_string; // Field - qpoints->pSet32Instance = art_quick_set32_instance_from_code; - qpoints->pSet32Static = art_quick_set32_static_from_code; - qpoints->pSet64Instance = art_quick_set64_instance_from_code; - qpoints->pSet64Static = art_quick_set64_static_from_code; - qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; - qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; - qpoints->pGet32Instance = art_quick_get32_instance_from_code; - qpoints->pGet64Instance = art_quick_get64_instance_from_code; - qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; - qpoints->pGet32Static = art_quick_get32_static_from_code; - qpoints->pGet64Static = art_quick_get64_static_from_code; - qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + qpoints->pSet32Instance = art_quick_set32_instance; + qpoints->pSet32Static = art_quick_set32_static; + qpoints->pSet64Instance = art_quick_set64_instance; + qpoints->pSet64Static = art_quick_set64_static; + qpoints->pSetObjInstance = art_quick_set_obj_instance; + qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGet32Instance = art_quick_get32_instance; + qpoints->pGet64Instance = art_quick_get64_instance; + qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGet32Static = art_quick_get32_static; + qpoints->pGet64Static = art_quick_get64_static; + qpoints->pGetObjStatic = art_quick_get_obj_static; // FillArray - qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + qpoints->pHandleFillArrayData = art_quick_handle_fill_data; // JNI qpoints->pJniMethodStart = JniMethodStart; @@ -178,8 +191,8 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; // Locks - qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; - qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; // Math qpoints->pCmpgDouble = CmpgDouble; @@ -203,10 +216,6 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pShrLong = art_quick_shr_long; qpoints->pUshrLong = art_quick_ushr_long; - // Interpreter - qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - // Intrinsics qpoints->pIndexOf = art_quick_indexof; qpoints->pMemcmp16 = __memcmp16; @@ -214,7 +223,8 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pMemcpy = memcpy; // Invocation - qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; + qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; @@ -223,19 +233,16 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; // Thread - qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; - qpoints->pTestSuspendFromCode = art_quick_test_suspend; + qpoints->pCheckSuspend = CheckSuspendFromCode; + qpoints->pTestSuspend = art_quick_test_suspend; // Throws - qpoints->pDeliverException = art_quick_deliver_exception_from_code; - qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; - - // Portable - ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; + qpoints->pDeliverException = art_quick_deliver_exception; + qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; + qpoints->pThrowDivZero = art_quick_throw_div_zero; + qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; + qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; + qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; }; } // namespace art diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S index 0a0d06a22a..322c40ba6d 100644 --- a/runtime/arch/arm/jni_entrypoints_arm.S +++ b/runtime/arch/arm/jni_entrypoints_arm.S @@ -28,8 +28,7 @@ ENTRY art_jni_dlsym_lookup_stub sub sp, #12 @ pad stack pointer to align frame .pad #12 .cfi_adjust_cfa_offset 12 - mov r0, r9 @ pass Thread::Current - blx artFindNativeMethod @ (Thread*) + blx artFindNativeMethod mov r12, r0 @ save result in r12 add sp, #12 @ restore stack pointer .cfi_adjust_cfa_offset -12 @@ -44,7 +43,7 @@ END art_jni_dlsym_lookup_stub * Entry point of native methods when JNI bug compatibility is enabled. */ .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs +ENTRY art_work_around_app_jni_bugs @ save registers that may contain arguments and LR that will be crushed by a call push {r0-r3, lr} .save {r0-r3, lr} @@ -62,4 +61,4 @@ ENTRY art_quick_work_around_app_jni_bugs pop {r0-r3, lr} @ restore possibly modified argument registers .cfi_adjust_cfa_offset -16 bx r12 @ tail call into JNI routine -END art_quick_work_around_app_jni_bugs +END art_work_around_app_jni_bugs diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S index 4cc6654ebb..9b1014a140 100644 --- a/runtime/arch/arm/portable_entrypoints_arm.S +++ b/runtime/arch/arm/portable_entrypoints_arm.S @@ -94,3 +94,6 @@ ENTRY art_portable_proxy_invoke_handler .cfi_adjust_cfa_offset -48 bx lr @ return END art_portable_proxy_invoke_handler + +UNIMPLEMENTED art_portable_resolution_trampoline +UNIMPLEMENTED art_portable_to_interpreter_bridge diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 9b8d238ab8..e23f5a8da2 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -157,33 +157,33 @@ END \c_name * Called by managed code, saves callee saves and then calls artThrowException * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode +ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * Called by managed code to create and deliver a StackOverflowError. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode /* * Called by managed code to create and deliver a NoSuchMethodError. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode +ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode /* * All generated callsites for interface invokes and invocation slow paths will load arguments @@ -294,7 +294,7 @@ END art_quick_do_long_jump * failure. */ .extern artHandleFillArrayDataFromCode -ENTRY art_quick_handle_fill_data_from_code +ENTRY art_quick_handle_fill_data SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -303,25 +303,25 @@ ENTRY art_quick_handle_fill_data_from_code cmp r0, #0 @ success? bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_handle_fill_data_from_code +END art_quick_handle_fill_data /* * Entry from managed code that calls artLockObjectFromCode, may block for GC. */ .extern artLockObjectFromCode -ENTRY art_quick_lock_object_from_code +ENTRY art_quick_lock_object SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case we block mov r1, r9 @ pass Thread::Current mov r2, sp @ pass SP bl artLockObjectFromCode @ (Object* obj, Thread*, SP) RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_lock_object_from_code +END art_quick_lock_object /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ .extern artUnlockObjectFromCode -ENTRY art_quick_unlock_object_from_code +ENTRY art_quick_unlock_object SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC mov r1, r9 @ pass Thread::Current mov r2, sp @ pass SP @@ -330,13 +330,13 @@ ENTRY art_quick_unlock_object_from_code cmp r0, #0 @ success? bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_unlock_object_from_code +END art_quick_unlock_object /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ .extern artCheckCastFromCode -ENTRY art_quick_check_cast_from_code +ENTRY art_quick_check_cast SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -345,14 +345,14 @@ ENTRY art_quick_check_cast_from_code cmp r0, #0 @ success? bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_check_cast_from_code +END art_quick_check_cast /* * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on * failure. */ .extern artCanPutArrayElementFromCode -ENTRY art_quick_can_put_array_element_from_code +ENTRY art_quick_can_put_array_element SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case exception allocation triggers GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -361,7 +361,7 @@ ENTRY art_quick_can_put_array_element_from_code cmp r0, #0 @ success? bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_can_put_array_element_from_code +END art_quick_can_put_array_element /* * Entry from managed code when uninitialized static storage, this stub will run the class @@ -369,7 +369,7 @@ END art_quick_can_put_array_element_from_code * returned. */ .extern artInitializeStaticStorageFromCode -ENTRY art_quick_initialize_static_storage_from_code +ENTRY art_quick_initialize_static_storage SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -379,13 +379,13 @@ ENTRY art_quick_initialize_static_storage_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_initialize_static_storage_from_code +END art_quick_initialize_static_storage /* * Entry from managed code when dex cache misses for a type_idx */ .extern artInitializeTypeFromCode -ENTRY art_quick_initialize_type_from_code +ENTRY art_quick_initialize_type SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -395,14 +395,14 @@ ENTRY art_quick_initialize_type_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_initialize_type_from_code +END art_quick_initialize_type /* * Entry from managed code when type_idx needs to be checked for access and dex cache may also * miss. */ .extern artInitializeTypeAndVerifyAccessFromCode -ENTRY art_quick_initialize_type_and_verify_access_from_code +ENTRY art_quick_initialize_type_and_verify_access SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -412,13 +412,13 @@ ENTRY art_quick_initialize_type_and_verify_access_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_initialize_type_and_verify_access_from_code +END art_quick_initialize_type_and_verify_access /* * Called by managed code to resolve a static field and load a 32-bit primitive value. */ .extern artGet32StaticFromCode -ENTRY art_quick_get32_static_from_code +ENTRY art_quick_get32_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r1, [sp, #32] @ pass referrer mov r2, r9 @ pass Thread::Current @@ -429,13 +429,13 @@ ENTRY art_quick_get32_static_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get32_static_from_code +END art_quick_get32_static /* * Called by managed code to resolve a static field and load a 64-bit primitive value. */ .extern artGet64StaticFromCode -ENTRY art_quick_get64_static_from_code +ENTRY art_quick_get64_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r1, [sp, #32] @ pass referrer mov r2, r9 @ pass Thread::Current @@ -446,13 +446,13 @@ ENTRY art_quick_get64_static_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get64_static_from_code +END art_quick_get64_static /* * Called by managed code to resolve a static field and load an object reference. */ .extern artGetObjStaticFromCode -ENTRY art_quick_get_obj_static_from_code +ENTRY art_quick_get_obj_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r1, [sp, #32] @ pass referrer mov r2, r9 @ pass Thread::Current @@ -463,13 +463,13 @@ ENTRY art_quick_get_obj_static_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_static_from_code +END art_quick_get_obj_static /* * Called by managed code to resolve an instance field and load a 32-bit primitive value. */ .extern artGet32InstanceFromCode -ENTRY art_quick_get32_instance_from_code +ENTRY art_quick_get32_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r2, [sp, #32] @ pass referrer mov r3, r9 @ pass Thread::Current @@ -482,13 +482,13 @@ ENTRY art_quick_get32_instance_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get32_instance_from_code +END art_quick_get32_instance /* * Called by managed code to resolve an instance field and load a 64-bit primitive value. */ .extern artGet64InstanceFromCode -ENTRY art_quick_get64_instance_from_code +ENTRY art_quick_get64_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r2, [sp, #32] @ pass referrer mov r3, r9 @ pass Thread::Current @@ -504,13 +504,13 @@ ENTRY art_quick_get64_instance_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get64_instance_from_code +END art_quick_get64_instance /* * Called by managed code to resolve an instance field and load an object reference. */ .extern artGetObjInstanceFromCode -ENTRY art_quick_get_obj_instance_from_code +ENTRY art_quick_get_obj_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r2, [sp, #32] @ pass referrer mov r3, r9 @ pass Thread::Current @@ -526,13 +526,13 @@ ENTRY art_quick_get_obj_instance_from_code cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_get_obj_instance_from_code +END art_quick_get_obj_instance /* * Called by managed code to resolve a static field and store a 32-bit primitive value. */ .extern artSet32StaticFromCode -ENTRY art_quick_set32_static_from_code +ENTRY art_quick_set32_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r2, [sp, #32] @ pass referrer mov r3, r9 @ pass Thread::Current @@ -547,14 +547,14 @@ ENTRY art_quick_set32_static_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set32_static_from_code +END art_quick_set32_static /* * Called by managed code to resolve a static field and store a 64-bit primitive value. * On entry r0 holds field index, r1:r2 hold new_val */ .extern artSet64StaticFromCode -ENTRY art_quick_set64_static_from_code +ENTRY art_quick_set64_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r3, r2 @ pass one half of wide argument mov r2, r1 @ pass other half of wide argument @@ -573,13 +573,13 @@ ENTRY art_quick_set64_static_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set64_static_from_code +END art_quick_set64_static /* * Called by managed code to resolve a static field and store an object reference. */ .extern artSetObjStaticFromCode -ENTRY art_quick_set_obj_static_from_code +ENTRY art_quick_set_obj_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r2, [sp, #32] @ pass referrer mov r3, r9 @ pass Thread::Current @@ -594,13 +594,13 @@ ENTRY art_quick_set_obj_static_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_static_from_code +END art_quick_set_obj_static /* * Called by managed code to resolve an instance field and store a 32-bit primitive value. */ .extern artSet32InstanceFromCode -ENTRY art_quick_set32_instance_from_code +ENTRY art_quick_set32_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r3, [sp, #32] @ pass referrer mov r12, sp @ save SP @@ -619,13 +619,13 @@ ENTRY art_quick_set32_instance_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set32_instance_from_code +END art_quick_set32_instance /* * Called by managed code to resolve an instance field and store a 64-bit primitive value. */ .extern artSet32InstanceFromCode -ENTRY art_quick_set64_instance_from_code +ENTRY art_quick_set64_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r12, sp @ save SP sub sp, #8 @ grow frame for alignment with stack args @@ -642,13 +642,13 @@ ENTRY art_quick_set64_instance_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set64_instance_from_code +END art_quick_set64_instance /* * Called by managed code to resolve an instance field and store an object reference. */ .extern artSetObjInstanceFromCode -ENTRY art_quick_set_obj_instance_from_code +ENTRY art_quick_set_obj_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC ldr r3, [sp, #32] @ pass referrer mov r12, sp @ save SP @@ -666,7 +666,7 @@ ENTRY art_quick_set_obj_instance_from_code cmp r0, #0 @ success if result is 0 bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_set_obj_instance_from_code +END art_quick_set_obj_instance /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an @@ -675,7 +675,7 @@ END art_quick_set_obj_instance_from_code * performed. */ .extern artResolveStringFromCode -ENTRY art_quick_resolve_string_from_code +ENTRY art_quick_resolve_string SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -685,13 +685,13 @@ ENTRY art_quick_resolve_string_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_resolve_string_from_code +END art_quick_resolve_string /* * Called by managed code to allocate an object */ .extern artAllocObjectFromCode -ENTRY art_quick_alloc_object_from_code +ENTRY art_quick_alloc_object SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -700,14 +700,14 @@ ENTRY art_quick_alloc_object_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_alloc_object_from_code +END art_quick_alloc_object /* * Called by managed code to allocate an object when the caller doesn't know whether it has * access to the created type. */ .extern artAllocObjectFromCodeWithAccessCheck -ENTRY art_quick_alloc_object_from_code_with_access_check +ENTRY art_quick_alloc_object_with_access_check SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r2, r9 @ pass Thread::Current mov r3, sp @ pass SP @@ -716,13 +716,13 @@ ENTRY art_quick_alloc_object_from_code_with_access_check cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_alloc_object_from_code_with_access_check +END art_quick_alloc_object_with_access_check /* * Called by managed code to allocate an array. */ .extern artAllocArrayFromCode -ENTRY art_quick_alloc_array_from_code +ENTRY art_quick_alloc_array SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r3, r9 @ pass Thread::Current mov r12, sp @@ -737,14 +737,14 @@ ENTRY art_quick_alloc_array_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_alloc_array_from_code +END art_quick_alloc_array /* * Called by managed code to allocate an array when the caller doesn't know whether it has * access to the created type. */ .extern artAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_alloc_array_from_code_with_access_check +ENTRY art_quick_alloc_array_with_access_check SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r3, r9 @ pass Thread::Current mov r12, sp @@ -759,13 +759,13 @@ ENTRY art_quick_alloc_array_from_code_with_access_check cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_alloc_array_from_code_with_access_check +END art_quick_alloc_array_with_access_check /* * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */ .extern artCheckAndAllocArrayFromCode -ENTRY art_quick_check_and_alloc_array_from_code +ENTRY art_quick_check_and_alloc_array SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r3, r9 @ pass Thread::Current mov r12, sp @@ -780,13 +780,13 @@ ENTRY art_quick_check_and_alloc_array_from_code cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_check_and_alloc_array_from_code +END art_quick_check_and_alloc_array /* * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */ .extern artCheckAndAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_check_and_alloc_array_from_code_with_access_check +ENTRY art_quick_check_and_alloc_array_with_access_check SETUP_REF_ONLY_CALLEE_SAVE_FRAME @ save callee saves in case of GC mov r3, r9 @ pass Thread::Current mov r12, sp @@ -801,7 +801,7 @@ ENTRY art_quick_check_and_alloc_array_from_code_with_access_check cmp r0, #0 @ success if result is non-null bxne lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_check_and_alloc_array_from_code_with_access_check +END art_quick_check_and_alloc_array_with_access_check /* * Called by managed code when the value in rSUSPEND has been decremented to 0. @@ -840,13 +840,33 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler - .extern artInterpreterEntry -ENTRY art_quick_interpreter_entry + .extern artQuickResolutionTrampoline +ENTRY art_quick_resolution_trampoline + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP) + cmp r0, #0 @ is code pointer null? + beq 1f @ goto exception + mov r12, r0 + ldr r0, [sp, #0] @ load resolved method in r0 + ldr r1, [sp, #8] @ restore non-callee save r1 + ldrd r2, [sp, #12] @ restore non-callee saves r2-r3 + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ rewind sp + .cfi_adjust_cfa_offset -48 + bx r12 @ tail-call into actual code +1: + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + DELIVER_PENDING_EXCEPTION +END art_quick_resolution_trampoline + + .extern artQuickToInterpreterBridge +ENTRY art_quick_to_interpreter_bridge SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame mov r1, r9 @ pass Thread::Current mov r2, sp @ pass SP - blx artInterpreterEntry @ (Method* method, Thread*, SP) + blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP) ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ ldr lr, [sp, #44] @ restore lr add sp, #48 @ pop frame @@ -854,14 +874,14 @@ ENTRY art_quick_interpreter_entry cmp r12, #0 @ success if no exception is pending bxeq lr @ return on success DELIVER_PENDING_EXCEPTION -END art_quick_interpreter_entry +END art_quick_to_interpreter_bridge /* * Routine that intercepts method calls and returns. */ .extern artInstrumentationMethodEntryFromCode .extern artInstrumentationMethodExitFromCode -ENTRY art_quick_instrumentation_entry_from_code +ENTRY art_quick_instrumentation_entry SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME str r0, [sp, #4] @ preserve r0 mov r12, sp @ remember sp @@ -877,11 +897,11 @@ ENTRY art_quick_instrumentation_entry_from_code mov r12, r0 @ r12 holds reference to code ldr r0, [sp, #4] @ restore r0 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME - blx r12 @ call method with lr set to art_quick_instrumentation_exit_from_code -END art_quick_instrumentation_entry_from_code - .type art_quick_instrumentation_exit_from_code, #function - .global art_quick_instrumentation_exit_from_code -art_quick_instrumentation_exit_from_code: + blx r12 @ call method with lr set to art_quick_instrumentation_exit +END art_quick_instrumentation_entry + .type art_quick_instrumentation_exit, #function + .global art_quick_instrumentation_exit +art_quick_instrumentation_exit: .cfi_startproc .fnstart mov lr, #0 @ link register is to here, so clobber with 0 for later checks @@ -910,7 +930,7 @@ art_quick_instrumentation_exit_from_code: add sp, #32 @ remove callee save frame .cfi_adjust_cfa_offset -32 bx r2 @ return -END art_quick_instrumentation_exit_from_code +END art_quick_instrumentation_exit /* * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization @@ -924,25 +944,6 @@ ENTRY art_quick_deoptimize blx artDeoptimize @ artDeoptimize(Thread*, SP) END art_quick_deoptimize - /* - * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - mov r1, r9 @ pass Thread::Current - b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) -END art_portable_abstract_method_error_stub - - /* - * Quick abstract method error stub. r0 contains method* on entry. - */ -ENTRY art_quick_abstract_method_error_stub - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - mov r1, r9 @ pass Thread::Current - mov r2, sp @ pass SP - b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) -END art_quick_abstract_method_error_stub - /* * Signed 64-bit integer multiply. * diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S index 8a34b9dbd0..fe932d20c2 100644 --- a/runtime/arch/mips/asm_support_mips.S +++ b/runtime/arch/mips/asm_support_mips.S @@ -38,4 +38,12 @@ .cpload $t9 .endm +.macro UNIMPLEMENTED name + ENTRY \name + break + break + END \name +.endm + + #endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 0a62a4096d..a18079b628 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -21,49 +21,61 @@ namespace art { +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Portable entrypoints. +extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*); + // Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); +extern "C" void art_quick_can_put_array_element(void*, void*); +extern "C" void art_quick_check_cast(void*, void*); // DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); +extern "C" void* art_quick_initialize_static_storage(uint32_t, void*); +extern "C" void* art_quick_initialize_type(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); +extern "C" void* art_quick_resolve_string(void*, uint32_t); // Exception entrypoints. extern "C" void* GetAndClearException(Thread*); // Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); +extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static(uint32_t, int32_t); +extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance(uint32_t, void*); +extern "C" int32_t art_quick_get32_static(uint32_t); +extern "C" int64_t art_quick_get64_instance(uint32_t, void*); +extern "C" int64_t art_quick_get64_static(uint32_t); +extern "C" void* art_quick_get_obj_instance(uint32_t, void*); +extern "C" void* art_quick_get_obj_static(uint32_t); // FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); +extern "C" void art_quick_handle_fill_data(void*, void*); // Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); +extern "C" void art_quick_lock_object(void*); +extern "C" void art_quick_unlock_object(void*); // Math entrypoints. extern int32_t CmpgDouble(double a, double b); @@ -95,26 +107,14 @@ extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - // Intrinsic entrypoints. extern "C" int32_t __memcmp16(void*, void*, int32_t); extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); extern "C" int32_t art_quick_string_compareto(void*, void*); // Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*); extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); @@ -127,49 +127,61 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_quick_test_suspend(); // Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { +extern "C" void art_quick_deliver_exception(void*); +extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero(); +extern "C" void art_quick_throw_no_such_method(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception(); +extern "C" void art_quick_throw_stack_overflow(void*); + +void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, + PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) { + // Interpreter + ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge; + ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge; + + // JNI + jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; + + // Portable + ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline; + ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge; + // Alloc - qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; - qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; - qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + qpoints->pAllocArray = art_quick_alloc_array; + qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check; + qpoints->pAllocObject = art_quick_alloc_object; + qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check; + qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array; + qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check; // Cast - qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; + qpoints->pCanPutArrayElement = art_quick_can_put_array_element; + qpoints->pCheckCast = art_quick_check_cast; // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; + qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; + qpoints->pInitializeType = art_quick_initialize_type; + qpoints->pResolveString = art_quick_resolve_string; // Field - qpoints->pSet32Instance = art_quick_set32_instance_from_code; - qpoints->pSet32Static = art_quick_set32_static_from_code; - qpoints->pSet64Instance = art_quick_set64_instance_from_code; - qpoints->pSet64Static = art_quick_set64_static_from_code; - qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; - qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; - qpoints->pGet32Instance = art_quick_get32_instance_from_code; - qpoints->pGet64Instance = art_quick_get64_instance_from_code; - qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; - qpoints->pGet32Static = art_quick_get32_static_from_code; - qpoints->pGet64Static = art_quick_get64_static_from_code; - qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + qpoints->pSet32Instance = art_quick_set32_instance; + qpoints->pSet32Static = art_quick_set32_static; + qpoints->pSet64Instance = art_quick_set64_instance; + qpoints->pSet64Static = art_quick_set64_static; + qpoints->pSetObjInstance = art_quick_set_obj_instance; + qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGet32Instance = art_quick_get32_instance; + qpoints->pGet64Instance = art_quick_get64_instance; + qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGet32Static = art_quick_get32_static; + qpoints->pGet64Static = art_quick_get64_static; + qpoints->pGetObjStatic = art_quick_get_obj_static; // FillArray - qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + qpoints->pHandleFillArrayData = art_quick_handle_fill_data; // JNI qpoints->pJniMethodStart = JniMethodStart; @@ -180,8 +192,8 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; // Locks - qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; - qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; // Math qpoints->pCmpgDouble = CmpgDouble; @@ -204,10 +216,6 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pShrLong = art_quick_shr_long; qpoints->pUshrLong = art_quick_ushr_long; - // Interpreter - qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - // Intrinsics qpoints->pIndexOf = art_quick_indexof; qpoints->pMemcmp16 = __memcmp16; @@ -215,7 +223,8 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pMemcpy = memcpy; // Invocation - qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; + qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; @@ -224,19 +233,16 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; // Thread - qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; - qpoints->pTestSuspendFromCode = art_quick_test_suspend; + qpoints->pCheckSuspend = CheckSuspendFromCode; + qpoints->pTestSuspend = art_quick_test_suspend; // Throws - qpoints->pDeliverException = art_quick_deliver_exception_from_code; - qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; - - // Portable - ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; + qpoints->pDeliverException = art_quick_deliver_exception; + qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; + qpoints->pThrowDivZero = art_quick_throw_div_zero; + qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; + qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; + qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; }; } // namespace art diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S index fca6d777ab..ad7c021762 100644 --- a/runtime/arch/mips/jni_entrypoints_mips.S +++ b/runtime/arch/mips/jni_entrypoints_mips.S @@ -59,7 +59,7 @@ END art_jni_dlsym_lookup_stub * Entry point of native methods when JNI bug compatibility is enabled. */ .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs +ENTRY art_work_around_app_jni_bugs GENERATE_GLOBAL_POINTER # save registers that may contain arguments and LR that will be crushed by a call addiu $sp, $sp, -32 @@ -86,4 +86,4 @@ ENTRY art_quick_work_around_app_jni_bugs jr $t9 # tail call into JNI routine addiu $sp, $sp, 32 .cfi_adjust_cfa_offset -32 -END art_quick_work_around_app_jni_bugs +END art_work_around_app_jni_bugs diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S index e7a9b0fb60..9208a8a4f5 100644 --- a/runtime/arch/mips/portable_entrypoints_mips.S +++ b/runtime/arch/mips/portable_entrypoints_mips.S @@ -61,13 +61,5 @@ ENTRY art_portable_proxy_invoke_handler .cfi_adjust_cfa_offset -64 END art_portable_proxy_invoke_handler - /* - * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a1, $s1 # pass Thread::Current -END art_portable_abstract_method_error_stub +UNIMPLEMENTED art_portable_resolution_trampoline +UNIMPLEMENTED art_portable_to_interpreter_bridge diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index d32a2b4a15..004fda60f1 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -143,7 +143,7 @@ lw $a1, 4($sp) # restore non-callee save $a1 lw $a2, 8($sp) # restore non-callee save $a2 lw $a3, 12($sp) # restore non-callee save $a3 - addiu $sp, $sp, 64 # strip frame + addiu $sp, $sp, 64 # pop frame .cfi_adjust_cfa_offset -64 .endm @@ -268,79 +268,79 @@ END art_quick_do_long_jump * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at * the bottom of the thread. On entry r0 holds Throwable* */ -ENTRY art_quick_deliver_exception_from_code +ENTRY art_quick_deliver_exception GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a1, rSELF # pass Thread::Current la $t9, artDeliverExceptionFromCode jr $t9 # artDeliverExceptionFromCode(Throwable*, Thread*, $sp) move $a2, $sp # pass $sp -END art_quick_deliver_exception_from_code +END art_quick_deliver_exception /* * Called by managed code to create and deliver a NullPointerException */ .extern artThrowNullPointerExceptionFromCode -ENTRY art_quick_throw_null_pointer_exception_from_code +ENTRY art_quick_throw_null_pointer_exception GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a0, rSELF # pass Thread::Current la $t9, artThrowNullPointerExceptionFromCode jr $t9 # artThrowNullPointerExceptionFromCode(Thread*, $sp) move $a1, $sp # pass $sp -END art_quick_throw_null_pointer_exception_from_code +END art_quick_throw_null_pointer_exception /* * Called by managed code to create and deliver an ArithmeticException */ .extern artThrowDivZeroFromCode -ENTRY art_quick_throw_div_zero_from_code +ENTRY art_quick_throw_div_zero GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a0, rSELF # pass Thread::Current la $t9, artThrowDivZeroFromCode jr $t9 # artThrowDivZeroFromCode(Thread*, $sp) move $a1, $sp # pass $sp -END art_quick_throw_div_zero_from_code +END art_quick_throw_div_zero /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException */ .extern artThrowArrayBoundsFromCode -ENTRY art_quick_throw_array_bounds_from_code +ENTRY art_quick_throw_array_bounds GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a2, rSELF # pass Thread::Current la $t9, artThrowArrayBoundsFromCode jr $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp) move $a3, $sp # pass $sp -END art_quick_throw_array_bounds_from_code +END art_quick_throw_array_bounds /* * Called by managed code to create and deliver a StackOverflowError. */ .extern artThrowStackOverflowFromCode -ENTRY art_quick_throw_stack_overflow_from_code +ENTRY art_quick_throw_stack_overflow GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a0, rSELF # pass Thread::Current la $t9, artThrowStackOverflowFromCode jr $t9 # artThrowStackOverflowFromCode(Thread*, $sp) move $a1, $sp # pass $sp -END art_quick_throw_stack_overflow_from_code +END art_quick_throw_stack_overflow /* * Called by managed code to create and deliver a NoSuchMethodError. */ .extern artThrowNoSuchMethodFromCode -ENTRY art_quick_throw_no_such_method_from_code +ENTRY art_quick_throw_no_such_method GENERATE_GLOBAL_POINTER SETUP_SAVE_ALL_CALLEE_SAVE_FRAME move $a1, rSELF # pass Thread::Current la $t9, artThrowNoSuchMethodFromCode jr $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp) move $a2, $sp # pass $sp -END art_quick_throw_no_such_method_from_code +END art_quick_throw_no_such_method /* * All generated callsites for interface invokes and invocation slow paths will load arguments @@ -466,67 +466,67 @@ END art_quick_invoke_stub * failure. */ .extern artHandleFillArrayDataFromCode -ENTRY art_quick_handle_fill_data_from_code +ENTRY art_quick_handle_fill_data GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC move $a2, rSELF # pass Thread::Current jal artHandleFillArrayDataFromCode # (Array*, const DexFile::Payload*, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_ZERO -END art_quick_handle_fill_data_from_code +END art_quick_handle_fill_data /* * Entry from managed code that calls artLockObjectFromCode, may block for GC. */ .extern artLockObjectFromCode -ENTRY art_quick_lock_object_from_code +ENTRY art_quick_lock_object GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block move $a1, rSELF # pass Thread::Current jal artLockObjectFromCode # (Object* obj, Thread*, $sp) move $a2, $sp # pass $sp RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN -END art_quick_lock_object_from_code +END art_quick_lock_object /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ .extern artUnlockObjectFromCode -ENTRY art_quick_unlock_object_from_code +ENTRY art_quick_unlock_object GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC move $a1, rSELF # pass Thread::Current jal artUnlockObjectFromCode # (Object* obj, Thread*, $sp) move $a2, $sp # pass $sp RETURN_IF_ZERO -END art_quick_unlock_object_from_code +END art_quick_unlock_object /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ .extern artCheckCastFromCode -ENTRY art_quick_check_cast_from_code +ENTRY art_quick_check_cast GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC move $a2, rSELF # pass Thread::Current jal artCheckCastFromCode # (Class* a, Class* b, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_ZERO -END art_quick_check_cast_from_code +END art_quick_check_cast /* * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on * failure. */ .extern artCanPutArrayElementFromCode -ENTRY art_quick_can_put_array_element_from_code +ENTRY art_quick_can_put_array_element GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC move $a2, rSELF # pass Thread::Current jal artCanPutArrayElementFromCode # (Object* element, Class* array_class, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_ZERO -END art_quick_can_put_array_element_from_code +END art_quick_can_put_array_element /* * Entry from managed code when uninitialized static storage, this stub will run the class @@ -534,7 +534,7 @@ END art_quick_can_put_array_element_from_code * returned. */ .extern artInitializeStaticStorageFromCode -ENTRY art_quick_initialize_static_storage_from_code +ENTRY art_quick_initialize_static_storage GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current @@ -542,13 +542,13 @@ ENTRY art_quick_initialize_static_storage_from_code jal artInitializeStaticStorageFromCode move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_initialize_static_storage_from_code +END art_quick_initialize_static_storage /* * Entry from managed code when dex cache misses for a type_idx. */ .extern artInitializeTypeFromCode -ENTRY art_quick_initialize_type_from_code +ENTRY art_quick_initialize_type GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current @@ -556,14 +556,14 @@ ENTRY art_quick_initialize_type_from_code jal artInitializeTypeFromCode move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_initialize_type_from_code +END art_quick_initialize_type /* * Entry from managed code when type_idx needs to be checked for access and dex cache may also * miss. */ .extern artInitializeTypeAndVerifyAccessFromCode -ENTRY art_quick_initialize_type_and_verify_access_from_code +ENTRY art_quick_initialize_type_and_verify_access GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current @@ -571,13 +571,13 @@ ENTRY art_quick_initialize_type_and_verify_access_from_code jal artInitializeTypeAndVerifyAccessFromCode move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_initialize_type_and_verify_access_from_code +END art_quick_initialize_type_and_verify_access /* * Called by managed code to resolve a static field and load a 32-bit primitive value. */ .extern artGet32StaticFromCode -ENTRY art_quick_get32_static_from_code +ENTRY art_quick_get32_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a1, 64($sp) # pass referrer's Method* @@ -585,13 +585,13 @@ ENTRY art_quick_get32_static_from_code jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get32_static_from_code +END art_quick_get32_static /* * Called by managed code to resolve a static field and load a 64-bit primitive value. */ .extern artGet64StaticFromCode -ENTRY art_quick_get64_static_from_code +ENTRY art_quick_get64_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a1, 64($sp) # pass referrer's Method* @@ -599,13 +599,13 @@ ENTRY art_quick_get64_static_from_code jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get64_static_from_code +END art_quick_get64_static /* * Called by managed code to resolve a static field and load an object reference. */ .extern artGetObjStaticFromCode -ENTRY art_quick_get_obj_static_from_code +ENTRY art_quick_get_obj_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a1, 64($sp) # pass referrer's Method* @@ -613,13 +613,13 @@ ENTRY art_quick_get_obj_static_from_code jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get_obj_static_from_code +END art_quick_get_obj_static /* * Called by managed code to resolve an instance field and load a 32-bit primitive value. */ .extern artGet32InstanceFromCode -ENTRY art_quick_get32_instance_from_code +ENTRY art_quick_get32_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a2, 64($sp) # pass referrer's Method* @@ -627,13 +627,13 @@ ENTRY art_quick_get32_instance_from_code jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) sw $sp, 16($sp) # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get32_instance_from_code +END art_quick_get32_instance /* * Called by managed code to resolve an instance field and load a 64-bit primitive value. */ .extern artGet64InstanceFromCode -ENTRY art_quick_get64_instance_from_code +ENTRY art_quick_get64_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a2, 64($sp) # pass referrer's Method* @@ -641,13 +641,13 @@ ENTRY art_quick_get64_instance_from_code jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) sw $sp, 16($sp) # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get64_instance_from_code +END art_quick_get64_instance /* * Called by managed code to resolve an instance field and load an object reference. */ .extern artGetObjInstanceFromCode -ENTRY art_quick_get_obj_instance_from_code +ENTRY art_quick_get_obj_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a2, 64($sp) # pass referrer's Method* @@ -655,13 +655,13 @@ ENTRY art_quick_get_obj_instance_from_code jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp) sw $sp, 16($sp) # pass $sp RETURN_IF_NO_EXCEPTION -END art_quick_get_obj_instance_from_code +END art_quick_get_obj_instance /* * Called by managed code to resolve a static field and store a 32-bit primitive value. */ .extern artSet32StaticFromCode -ENTRY art_quick_set32_static_from_code +ENTRY art_quick_set32_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a2, 64($sp) # pass referrer's Method* @@ -669,13 +669,13 @@ ENTRY art_quick_set32_static_from_code jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) sw $sp, 16($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set32_static_from_code +END art_quick_set32_static /* * Called by managed code to resolve a static field and store a 64-bit primitive value. */ .extern artSet32StaticFromCode -ENTRY art_quick_set64_static_from_code +ENTRY art_quick_set64_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a1, 64($sp) # pass referrer's Method* @@ -683,13 +683,13 @@ ENTRY art_quick_set64_static_from_code jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*, $sp) sw $sp, 20($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set64_static_from_code +END art_quick_set64_static /* * Called by managed code to resolve a static field and store an object reference. */ .extern artSetObjStaticFromCode -ENTRY art_quick_set_obj_static_from_code +ENTRY art_quick_set_obj_static GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a2, 64($sp) # pass referrer's Method* @@ -697,13 +697,13 @@ ENTRY art_quick_set_obj_static_from_code jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*, $sp) sw $sp, 16($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set_obj_static_from_code +END art_quick_set_obj_static /* * Called by managed code to resolve an instance field and store a 32-bit primitive value. */ .extern artSet32InstanceFromCode -ENTRY art_quick_set32_instance_from_code +ENTRY art_quick_set32_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a3, 64($sp) # pass referrer's Method* @@ -711,26 +711,26 @@ ENTRY art_quick_set32_instance_from_code jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) sw $sp, 20($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set32_instance_from_code +END art_quick_set32_instance /* * Called by managed code to resolve an instance field and store a 64-bit primitive value. */ .extern artSet32InstanceFromCode -ENTRY art_quick_set64_instance_from_code +ENTRY art_quick_set64_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC sw rSELF, 16($sp) # pass Thread::Current jal artSet64InstanceFromCode # (field_idx, Object*, new_val, Thread*, $sp) sw $sp, 20($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set64_instance_from_code +END art_quick_set64_instance /* * Called by managed code to resolve an instance field and store an object reference. */ .extern artSetObjInstanceFromCode -ENTRY art_quick_set_obj_instance_from_code +ENTRY art_quick_set_obj_instance GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC lw $a3, 64($sp) # pass referrer's Method* @@ -738,7 +738,7 @@ ENTRY art_quick_set_obj_instance_from_code jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*, $sp) sw $sp, 20($sp) # pass $sp RETURN_IF_ZERO -END art_quick_set_obj_instance_from_code +END art_quick_set_obj_instance /* * Entry from managed code to resolve a string, this stub will allocate a String and deliver an @@ -747,7 +747,7 @@ END art_quick_set_obj_instance_from_code * performed. */ .extern artResolveStringFromCode -ENTRY art_quick_resolve_string_from_code +ENTRY art_quick_resolve_string GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current @@ -755,40 +755,40 @@ ENTRY art_quick_resolve_string_from_code jal artResolveStringFromCode move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_resolve_string_from_code +END art_quick_resolve_string /* * Called by managed code to allocate an object. */ .extern artAllocObjectFromCode -ENTRY art_quick_alloc_object_from_code +ENTRY art_quick_alloc_object GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current jal artAllocObjectFromCode # (uint32_t type_idx, Method* method, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_alloc_object_from_code +END art_quick_alloc_object /* * Called by managed code to allocate an object when the caller doesn't know whether it has * access to the created type. */ .extern artAllocObjectFromCodeWithAccessCheck -ENTRY art_quick_alloc_object_from_code_with_access_check +ENTRY art_quick_alloc_object_with_access_check GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a2, rSELF # pass Thread::Current jal artAllocObjectFromCodeWithAccessCheck # (uint32_t type_idx, Method* method, Thread*, $sp) move $a3, $sp # pass $sp RETURN_IF_NONZERO -END art_quick_alloc_object_from_code_with_access_check +END art_quick_alloc_object_with_access_check /* * Called by managed code to allocate an array. */ .extern artAllocArrayFromCode -ENTRY art_quick_alloc_array_from_code +ENTRY art_quick_alloc_array GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a3, rSELF # pass Thread::Current @@ -796,14 +796,14 @@ ENTRY art_quick_alloc_array_from_code jal artAllocArrayFromCode sw $sp, 16($sp) # pass $sp RETURN_IF_NONZERO -END art_quick_alloc_array_from_code +END art_quick_alloc_array /* * Called by managed code to allocate an array when the caller doesn't know whether it has * access to the created type. */ .extern artAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_alloc_array_from_code_with_access_check +ENTRY art_quick_alloc_array_with_access_check GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a3, rSELF # pass Thread::Current @@ -811,13 +811,13 @@ ENTRY art_quick_alloc_array_from_code_with_access_check jal artAllocArrayFromCodeWithAccessCheck sw $sp, 16($sp) # pass $sp RETURN_IF_NONZERO -END art_quick_alloc_array_from_code_with_access_check +END art_quick_alloc_array_with_access_check /* * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */ .extern artCheckAndAllocArrayFromCode -ENTRY art_quick_check_and_alloc_array_from_code +ENTRY art_quick_check_and_alloc_array GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a3, rSELF # pass Thread::Current @@ -825,13 +825,13 @@ ENTRY art_quick_check_and_alloc_array_from_code jal artCheckAndAllocArrayFromCode sw $sp, 16($sp) # pass $sp RETURN_IF_NONZERO -END art_quick_check_and_alloc_array_from_code +END art_quick_check_and_alloc_array /* * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY. */ .extern artCheckAndAllocArrayFromCodeWithAccessCheck -ENTRY art_quick_check_and_alloc_array_from_code_with_access_check +ENTRY art_quick_check_and_alloc_array_with_access_check GENERATE_GLOBAL_POINTER SETUP_REF_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC move $a3, rSELF # pass Thread::Current @@ -839,7 +839,7 @@ ENTRY art_quick_check_and_alloc_array_from_code_with_access_check jal artCheckAndAllocArrayFromCodeWithAccessCheck sw $sp, 16($sp) # pass $sp RETURN_IF_NONZERO -END art_quick_check_and_alloc_array_from_code_with_access_check +END art_quick_check_and_alloc_array_with_access_check /* * Called by managed code when the value in rSUSPEND has been decremented to 0. @@ -884,13 +884,33 @@ ENTRY art_quick_proxy_invoke_handler DELIVER_PENDING_EXCEPTION END art_quick_proxy_invoke_handler - .extern artInterpreterEntry -ENTRY art_quick_interpreter_entry + .extern artQuickResolutionTrampoline +ENTRY art_quick_resolution_trampoline + GENERATE_GLOBAL_POINTER + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + move $a2, rSELF # pass Thread::Current + jal artQuickProxyInvokeHandler # (Method* called, receiver, Thread*, SP) + move $a3, $sp # pass $sp + lw $gp, 52($sp) # restore $gp + lw $ra, 60($sp) # restore $ra + beqz $v0, 1f + lw $a0, 0($sp) # load resolved method to $a0 + lw $a1, 4($sp) # restore non-callee save $a1 + lw $a2, 8($sp) # restore non-callee save $a2 + lw $a3, 12($sp) # restore non-callee save $a3 + jr $v0 # tail call to method +1: + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 + DELIVER_PENDING_EXCEPTION +END art_quick_resolution_trampoline + + .extern artQuickToInterpreterBridge +ENTRY art_quick_to_interpreter_bridge GENERATE_GLOBAL_POINTER SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame move $a1, rSELF # pass Thread::Current - jal artInterpreterEntry # (Method* method, Thread*, SP) + jal artQuickToInterpreterBridge # (Method* method, Thread*, SP) move $a2, $sp # pass $sp lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ lw $gp, 52($sp) # restore $gp @@ -902,14 +922,14 @@ ENTRY art_quick_interpreter_entry nop 1: DELIVER_PENDING_EXCEPTION -END art_quick_interpreter_entry +END art_quick_to_interpreter_bridge /* * Routine that intercepts method calls and returns. */ .extern artInstrumentationMethodEntryFromCode .extern artInstrumentationMethodExitFromCode -ENTRY art_quick_instrumentation_entry_from_code +ENTRY art_quick_instrumentation_entry GENERATE_GLOBAL_POINTER SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME move $t0, $sp # remember bottom of caller's frame @@ -927,10 +947,10 @@ ENTRY art_quick_instrumentation_entry_from_code RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME jalr $t9 # call method nop -END art_quick_instrumentation_entry_from_code +END art_quick_instrumentation_entry /* intentional fallthrough */ - .global art_quick_instrumentation_exit_from_code -art_quick_instrumentation_exit_from_code: + .global art_quick_instrumentation_exit +art_quick_instrumentation_exit: .cfi_startproc addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp GENERATE_GLOBAL_POINTER @@ -960,7 +980,7 @@ art_quick_instrumentation_exit_from_code: jr $t0 # return addiu $sp, $sp, 112 # 48 bytes of args + 64 bytes of callee save frame .cfi_adjust_cfa_offset -112 -END art_quick_instrumentation_exit_from_code +END art_quick_instrumentation_exit /* * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization @@ -977,18 +997,6 @@ ENTRY art_quick_deoptimize move $a1, $sp # pass $sp END art_quick_deoptimize - /* - * Quick abstract method error stub. $a0 contains method* on entry. - */ -ENTRY art_quick_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - move $a1, $s1 # pass Thread::Current - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a2, $sp # pass SP -END art_quick_abstract_method_error_stub - /* * Long integer shift. This is different from the generic 32/64-bit * binary operations because vAA/vBB are 64-bit but vCC (the shift diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S index 7e6dce9c6a..7a3fdfad30 100644 --- a/runtime/arch/x86/asm_support_x86.S +++ b/runtime/arch/x86/asm_support_x86.S @@ -88,4 +88,16 @@ MACRO1(POP, reg) .cfi_restore REG_VAR(reg,0) END_MACRO +MACRO1(UNIMPLEMENTED,name) + .type VAR(name, 0), @function + .globl VAR(name, 0) + ALIGN_FUNCTION_ENTRY +VAR(name, 0): + .cfi_startproc + int3 + int3 + .cfi_endproc + .size \name, .-\name +END_MACRO + #endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index d47dfef047..91526741cf 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -20,69 +20,73 @@ namespace art { +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Portable entrypoints. +extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*); + // Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t); // Cast entrypoints. -extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, +extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass, const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); +extern "C" void art_quick_can_put_array_element(void*, void*); +extern "C" void art_quick_check_cast(void*, void*); // DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); +extern "C" void* art_quick_initialize_static_storage(uint32_t, void*); +extern "C" void* art_quick_initialize_type(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*); +extern "C" void* art_quick_resolve_string(void*, uint32_t); // Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); +extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static(uint32_t, int32_t); +extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance(uint32_t, void*); +extern "C" int32_t art_quick_get32_static(uint32_t); +extern "C" int64_t art_quick_get64_instance(uint32_t, void*); +extern "C" int64_t art_quick_get64_static(uint32_t); +extern "C" void* art_quick_get_obj_instance(uint32_t, void*); +extern "C" void* art_quick_get_obj_static(uint32_t); // FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); +extern "C" void art_quick_handle_fill_data(void*, void*); // Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); +extern "C" void art_quick_lock_object(void*); +extern "C" void art_quick_unlock_object(void*); // Math entrypoints. -extern "C" double art_quick_fmod_from_code(double, double); -extern "C" float art_quick_fmodf_from_code(float, float); -extern "C" double art_quick_l2d_from_code(int64_t); -extern "C" float art_quick_l2f_from_code(int64_t); -extern "C" int64_t art_quick_d2l_from_code(double); -extern "C" int64_t art_quick_f2l_from_code(float); -extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); -extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); -extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); +extern "C" double art_quick_fmod(double, double); +extern "C" float art_quick_fmodf(float, float); +extern "C" double art_quick_l2d(int64_t); +extern "C" float art_quick_l2f(int64_t); +extern "C" int64_t art_quick_d2l(double); +extern "C" int64_t art_quick_f2l(float); +extern "C" int32_t art_quick_idivmod(int32_t, int32_t); +extern "C" int64_t art_quick_ldiv(int64_t, int64_t); +extern "C" int64_t art_quick_ldivmod(int64_t, int64_t); +extern "C" int64_t art_quick_lmul(int64_t, int64_t); +extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t); // Intrinsic entrypoints. extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); @@ -91,12 +95,8 @@ extern "C" int32_t art_quick_string_compareto(void*, void*); extern "C" void* art_quick_memcpy(void*, const void*, size_t); // Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*); +extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*); extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); @@ -109,49 +109,61 @@ extern void CheckSuspendFromCode(Thread* thread); extern "C" void art_quick_test_suspend(); // Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { +extern "C" void art_quick_deliver_exception(void*); +extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero(); +extern "C" void art_quick_throw_no_such_method(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception(); +extern "C" void art_quick_throw_stack_overflow(void*); + +void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, + PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) { + // Interpreter + ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge; + ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge; + + // JNI + jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; + + // Portable + ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline; + ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge; + // Alloc - qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; - qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; - qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + qpoints->pAllocArray = art_quick_alloc_array; + qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check; + qpoints->pAllocObject = art_quick_alloc_object; + qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check; + qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array; + qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check; // Cast - qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; - qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + qpoints->pInstanceofNonTrivial = art_quick_is_assignable; + qpoints->pCanPutArrayElement = art_quick_can_put_array_element; + qpoints->pCheckCast = art_quick_check_cast; // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; + qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; + qpoints->pInitializeType = art_quick_initialize_type; + qpoints->pResolveString = art_quick_resolve_string; // Field - qpoints->pSet32Instance = art_quick_set32_instance_from_code; - qpoints->pSet32Static = art_quick_set32_static_from_code; - qpoints->pSet64Instance = art_quick_set64_instance_from_code; - qpoints->pSet64Static = art_quick_set64_static_from_code; - qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; - qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; - qpoints->pGet32Instance = art_quick_get32_instance_from_code; - qpoints->pGet64Instance = art_quick_get64_instance_from_code; - qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; - qpoints->pGet32Static = art_quick_get32_static_from_code; - qpoints->pGet64Static = art_quick_get64_static_from_code; - qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + qpoints->pSet32Instance = art_quick_set32_instance; + qpoints->pSet32Static = art_quick_set32_static; + qpoints->pSet64Instance = art_quick_set64_instance; + qpoints->pSet64Static = art_quick_set64_static; + qpoints->pSetObjInstance = art_quick_set_obj_instance; + qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGet32Instance = art_quick_get32_instance; + qpoints->pGet64Instance = art_quick_get64_instance; + qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGet32Static = art_quick_get32_static; + qpoints->pGet64Static = art_quick_get64_static; + qpoints->pGetObjStatic = art_quick_get_obj_static; // FillArray - qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + qpoints->pHandleFillArrayData = art_quick_handle_fill_data; // JNI qpoints->pJniMethodStart = JniMethodStart; @@ -162,33 +174,29 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; // Locks - qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; - qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; // Math // points->pCmpgDouble = NULL; // Not needed on x86. // points->pCmpgFloat = NULL; // Not needed on x86. // points->pCmplDouble = NULL; // Not needed on x86. // points->pCmplFloat = NULL; // Not needed on x86. - qpoints->pFmod = art_quick_fmod_from_code; - qpoints->pL2d = art_quick_l2d_from_code; - qpoints->pFmodf = art_quick_fmodf_from_code; - qpoints->pL2f = art_quick_l2f_from_code; + qpoints->pFmod = art_quick_fmod; + qpoints->pL2d = art_quick_l2d; + qpoints->pFmodf = art_quick_fmodf; + qpoints->pL2f = art_quick_l2f; // points->pD2iz = NULL; // Not needed on x86. // points->pF2iz = NULL; // Not needed on x86. - qpoints->pIdivmod = art_quick_idivmod_from_code; - qpoints->pD2l = art_quick_d2l_from_code; - qpoints->pF2l = art_quick_f2l_from_code; - qpoints->pLdiv = art_quick_ldiv_from_code; - qpoints->pLdivmod = art_quick_ldivmod_from_code; - qpoints->pLmul = art_quick_lmul_from_code; - qpoints->pShlLong = art_quick_lshl_from_code; - qpoints->pShrLong = art_quick_lshr_from_code; - qpoints->pUshrLong = art_quick_lushr_from_code; - - // Interpreter - qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + qpoints->pIdivmod = art_quick_idivmod; + qpoints->pD2l = art_quick_d2l; + qpoints->pF2l = art_quick_f2l; + qpoints->pLdiv = art_quick_ldiv; + qpoints->pLdivmod = art_quick_ldivmod; + qpoints->pLmul = art_quick_lmul; + qpoints->pShlLong = art_quick_lshl; + qpoints->pShrLong = art_quick_lshr; + qpoints->pUshrLong = art_quick_lushr; // Intrinsics qpoints->pIndexOf = art_quick_indexof; @@ -197,7 +205,8 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pMemcpy = art_quick_memcpy; // Invocation - qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; + qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; @@ -206,19 +215,16 @@ void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; // Thread - qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; - qpoints->pTestSuspendFromCode = art_quick_test_suspend; + qpoints->pCheckSuspend = CheckSuspendFromCode; + qpoints->pTestSuspend = art_quick_test_suspend; // Throws - qpoints->pDeliverException = art_quick_deliver_exception_from_code; - qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; - - // Portable - ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; + qpoints->pDeliverException = art_quick_deliver_exception; + qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; + qpoints->pThrowDivZero = art_quick_throw_div_zero; + qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; + qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; + qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; }; } // namespace art diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S index a0fca6cee3..0313d4b882 100644 --- a/runtime/arch/x86/portable_entrypoints_x86.S +++ b/runtime/arch/x86/portable_entrypoints_x86.S @@ -90,20 +90,5 @@ DEFINE_FUNCTION art_portable_proxy_invoke_handler ret END_FUNCTION art_portable_proxy_invoke_handler - /* - * Portable abstract method error stub. method* is at %esp + 4 on entry. - */ -DEFINE_FUNCTION art_portable_abstract_method_error_stub - PUSH ebp - movl %esp, %ebp // Remember SP. - .cfi_def_cfa_register ebp - subl LITERAL(12), %esp // Align stack. - PUSH esp // Pass sp (not used). - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - pushl 8(%ebp) // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - leave // Restore the stack and %ebp. - .cfi_def_cfa esp, 4 - .cfi_restore ebp - ret // Return to caller to handle pending exception. -END_FUNCTION art_portable_abstract_method_error_stub +UNIMPLEMENTED art_portable_resolution_trampoline +UNIMPLEMENTED art_portable_to_interpreter_bridge diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 89ea71a902..dbf552faaf 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -135,34 +135,34 @@ END_MACRO /* * Called by managed code to create and deliver a NullPointerException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode /* * Called by managed code to create and deliver an ArithmeticException. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode /* * Called by managed code to create and deliver a StackOverflowError. */ -NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode +NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode /* * Called by managed code, saves callee saves and then calls artThrowException * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode +ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode /* * Called by managed code to create and deliver a NoSuchMethodError. */ -ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode +ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode /* * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds * index, arg2 holds limit. */ -TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode +TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode /* * All generated callsites for interface invokes and invocation slow paths will load arguments @@ -382,24 +382,24 @@ MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) DELIVER_PENDING_EXCEPTION END_MACRO -TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO -THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO +THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO -TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO +TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO -ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret -ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO +ONE_ARG_DOWNCALL art_quick_lock_object, artLockObjectFromCode, ret +ONE_ARG_DOWNCALL art_quick_unlock_object, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO -TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO -DEFINE_FUNCTION art_quick_is_assignable_from_code +DEFINE_FUNCTION art_quick_is_assignable PUSH eax // alignment padding PUSH ecx // pass arg2 PUSH eax // pass arg1 @@ -407,7 +407,7 @@ DEFINE_FUNCTION art_quick_is_assignable_from_code addl LITERAL(12), %esp // pop arguments .cfi_adjust_cfa_offset -12 ret -END_FUNCTION art_quick_is_assignable_from_code +END_FUNCTION art_quick_is_assignable DEFINE_FUNCTION art_quick_memcpy PUSH edx // pass arg3 @@ -419,12 +419,12 @@ DEFINE_FUNCTION art_quick_memcpy ret END_FUNCTION art_quick_memcpy -TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO -TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_DOWNCALL art_quick_check_cast, artCheckCastFromCode, RETURN_IF_EAX_ZERO +TWO_ARG_DOWNCALL art_quick_can_put_array_element, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret -DEFINE_FUNCTION art_quick_fmod_from_code +DEFINE_FUNCTION art_quick_fmod subl LITERAL(12), %esp // alignment padding .cfi_adjust_cfa_offset 12 PUSH ebx // pass arg4 b.hi @@ -437,9 +437,9 @@ DEFINE_FUNCTION art_quick_fmod_from_code addl LITERAL(28), %esp // pop arguments .cfi_adjust_cfa_offset -28 ret -END_FUNCTION art_quick_fmod_from_code +END_FUNCTION art_quick_fmod -DEFINE_FUNCTION art_quick_fmodf_from_code +DEFINE_FUNCTION art_quick_fmodf PUSH eax // alignment padding PUSH ecx // pass arg2 b PUSH eax // pass arg1 a @@ -449,9 +449,9 @@ DEFINE_FUNCTION art_quick_fmodf_from_code addl LITERAL(12), %esp // pop arguments .cfi_adjust_cfa_offset -12 ret -END_FUNCTION art_quick_fmodf_from_code +END_FUNCTION art_quick_fmodf -DEFINE_FUNCTION art_quick_l2d_from_code +DEFINE_FUNCTION art_quick_l2d PUSH ecx // push arg2 a.hi PUSH eax // push arg1 a.lo fildll (%esp) // load as integer and push into st0 @@ -460,9 +460,9 @@ DEFINE_FUNCTION art_quick_l2d_from_code addl LITERAL(8), %esp // pop arguments .cfi_adjust_cfa_offset -8 ret -END_FUNCTION art_quick_l2d_from_code +END_FUNCTION art_quick_l2d -DEFINE_FUNCTION art_quick_l2f_from_code +DEFINE_FUNCTION art_quick_l2f PUSH ecx // push arg2 a.hi PUSH eax // push arg1 a.lo fildll (%esp) // load as integer and push into st0 @@ -471,9 +471,9 @@ DEFINE_FUNCTION art_quick_l2f_from_code addl LITERAL(8), %esp // pop argument .cfi_adjust_cfa_offset -8 ret -END_FUNCTION art_quick_l2f_from_code +END_FUNCTION art_quick_l2f -DEFINE_FUNCTION art_quick_d2l_from_code +DEFINE_FUNCTION art_quick_d2l PUSH eax // alignment padding PUSH ecx // pass arg2 a.hi PUSH eax // pass arg1 a.lo @@ -481,9 +481,9 @@ DEFINE_FUNCTION art_quick_d2l_from_code addl LITERAL(12), %esp // pop arguments .cfi_adjust_cfa_offset -12 ret -END_FUNCTION art_quick_d2l_from_code +END_FUNCTION art_quick_d2l -DEFINE_FUNCTION art_quick_f2l_from_code +DEFINE_FUNCTION art_quick_f2l subl LITERAL(8), %esp // alignment padding .cfi_adjust_cfa_offset 8 PUSH eax // pass arg1 a @@ -491,9 +491,9 @@ DEFINE_FUNCTION art_quick_f2l_from_code addl LITERAL(12), %esp // pop arguments .cfi_adjust_cfa_offset -12 ret -END_FUNCTION art_quick_f2l_from_code +END_FUNCTION art_quick_f2l -DEFINE_FUNCTION art_quick_idivmod_from_code +DEFINE_FUNCTION art_quick_idivmod cmpl LITERAL(0x80000000), %eax je check_arg2 // special case args_ok: @@ -505,9 +505,9 @@ check_arg2: jne args_ok xorl %edx, %edx ret // eax already holds min int -END_FUNCTION art_quick_idivmod_from_code +END_FUNCTION art_quick_idivmod -DEFINE_FUNCTION art_quick_ldiv_from_code +DEFINE_FUNCTION art_quick_ldiv subl LITERAL(12), %esp // alignment padding .cfi_adjust_cfa_offset 12 PUSH ebx // pass arg4 b.hi @@ -518,9 +518,9 @@ DEFINE_FUNCTION art_quick_ldiv_from_code addl LITERAL(28), %esp // pop arguments .cfi_adjust_cfa_offset -28 ret -END_FUNCTION art_quick_ldiv_from_code +END_FUNCTION art_quick_ldiv -DEFINE_FUNCTION art_quick_ldivmod_from_code +DEFINE_FUNCTION art_quick_ldivmod subl LITERAL(12), %esp // alignment padding .cfi_adjust_cfa_offset 12 PUSH ebx // pass arg4 b.hi @@ -531,18 +531,18 @@ DEFINE_FUNCTION art_quick_ldivmod_from_code addl LITERAL(28), %esp // pop arguments .cfi_adjust_cfa_offset -28 ret -END_FUNCTION art_quick_ldivmod_from_code +END_FUNCTION art_quick_ldivmod -DEFINE_FUNCTION art_quick_lmul_from_code +DEFINE_FUNCTION art_quick_lmul imul %eax, %ebx // ebx = a.lo(eax) * b.hi(ebx) imul %edx, %ecx // ecx = b.lo(edx) * a.hi(ecx) mul %edx // edx:eax = a.lo(eax) * b.lo(edx) add %ebx, %ecx add %ecx, %edx // edx += (a.lo * b.hi) + (b.lo * a.hi) ret -END_FUNCTION art_quick_lmul_from_code +END_FUNCTION art_quick_lmul -DEFINE_FUNCTION art_quick_lshl_from_code +DEFINE_FUNCTION art_quick_lshl // ecx:eax << edx xchg %edx, %ecx shld %cl,%eax,%edx @@ -553,9 +553,9 @@ DEFINE_FUNCTION art_quick_lshl_from_code xor %eax, %eax 1: ret -END_FUNCTION art_quick_lshl_from_code +END_FUNCTION art_quick_lshl -DEFINE_FUNCTION art_quick_lshr_from_code +DEFINE_FUNCTION art_quick_lshr // ecx:eax >> edx xchg %edx, %ecx shrd %cl,%edx,%eax @@ -566,9 +566,9 @@ DEFINE_FUNCTION art_quick_lshr_from_code sar LITERAL(31), %edx 1: ret -END_FUNCTION art_quick_lshr_from_code +END_FUNCTION art_quick_lshr -DEFINE_FUNCTION art_quick_lushr_from_code +DEFINE_FUNCTION art_quick_lushr // ecx:eax >>> edx xchg %edx, %ecx shrd %cl,%edx,%eax @@ -579,9 +579,9 @@ DEFINE_FUNCTION art_quick_lushr_from_code xor %edx, %edx 1: ret -END_FUNCTION art_quick_lushr_from_code +END_FUNCTION art_quick_lushr -DEFINE_FUNCTION art_quick_set32_instance_from_code +DEFINE_FUNCTION art_quick_set32_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP subl LITERAL(8), %esp // alignment padding @@ -599,9 +599,9 @@ DEFINE_FUNCTION art_quick_set32_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set32_instance_from_code +END_FUNCTION art_quick_set32_instance -DEFINE_FUNCTION art_quick_set64_instance_from_code +DEFINE_FUNCTION art_quick_set64_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC subl LITERAL(8), %esp // alignment padding .cfi_adjust_cfa_offset 8 @@ -618,9 +618,9 @@ DEFINE_FUNCTION art_quick_set64_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set64_instance_from_code +END_FUNCTION art_quick_set64_instance -DEFINE_FUNCTION art_quick_set_obj_instance_from_code +DEFINE_FUNCTION art_quick_set_obj_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP subl LITERAL(8), %esp // alignment padding @@ -638,9 +638,9 @@ DEFINE_FUNCTION art_quick_set_obj_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set_obj_instance_from_code +END_FUNCTION art_quick_set_obj_instance -DEFINE_FUNCTION art_quick_get32_instance_from_code +DEFINE_FUNCTION art_quick_get32_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -657,9 +657,9 @@ DEFINE_FUNCTION art_quick_get32_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_instance_from_code +END_FUNCTION art_quick_get32_instance -DEFINE_FUNCTION art_quick_get64_instance_from_code +DEFINE_FUNCTION art_quick_get64_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -676,9 +676,9 @@ DEFINE_FUNCTION art_quick_get64_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_instance_from_code +END_FUNCTION art_quick_get64_instance -DEFINE_FUNCTION art_quick_get_obj_instance_from_code +DEFINE_FUNCTION art_quick_get_obj_instance SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -695,9 +695,9 @@ DEFINE_FUNCTION art_quick_get_obj_instance_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_instance_from_code +END_FUNCTION art_quick_get_obj_instance -DEFINE_FUNCTION art_quick_set32_static_from_code +DEFINE_FUNCTION art_quick_set32_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -714,9 +714,9 @@ DEFINE_FUNCTION art_quick_set32_static_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set32_static_from_code +END_FUNCTION art_quick_set32_static -DEFINE_FUNCTION art_quick_set64_static_from_code +DEFINE_FUNCTION art_quick_set64_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP subl LITERAL(8), %esp // alignment padding @@ -734,9 +734,9 @@ DEFINE_FUNCTION art_quick_set64_static_from_code .cfi_adjust_cfa_offset -32 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set64_static_from_code +END_FUNCTION art_quick_set64_static -DEFINE_FUNCTION art_quick_set_obj_static_from_code +DEFINE_FUNCTION art_quick_set_obj_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %ebx // remember SP mov 32(%esp), %edx // get referrer @@ -752,9 +752,9 @@ DEFINE_FUNCTION art_quick_set_obj_static_from_code addl LITERAL(32), %esp // pop arguments RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_IF_EAX_ZERO // return or deliver exception -END_FUNCTION art_quick_set_obj_static_from_code +END_FUNCTION art_quick_set_obj_static -DEFINE_FUNCTION art_quick_get32_static_from_code +DEFINE_FUNCTION art_quick_get32_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %edx // remember SP mov 32(%esp), %ecx // get referrer @@ -768,9 +768,9 @@ DEFINE_FUNCTION art_quick_get32_static_from_code .cfi_adjust_cfa_offset -16 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get32_static_from_code +END_FUNCTION art_quick_get32_static -DEFINE_FUNCTION art_quick_get64_static_from_code +DEFINE_FUNCTION art_quick_get64_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %edx // remember SP mov 32(%esp), %ecx // get referrer @@ -784,9 +784,9 @@ DEFINE_FUNCTION art_quick_get64_static_from_code .cfi_adjust_cfa_offset -16 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get64_static_from_code +END_FUNCTION art_quick_get64_static -DEFINE_FUNCTION art_quick_get_obj_static_from_code +DEFINE_FUNCTION art_quick_get_obj_static SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save ref containing registers for GC mov %esp, %edx // remember SP mov 32(%esp), %ecx // get referrer @@ -800,7 +800,7 @@ DEFINE_FUNCTION art_quick_get_obj_static_from_code .cfi_adjust_cfa_offset -16 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_get_obj_static_from_code +END_FUNCTION art_quick_get_obj_static DEFINE_FUNCTION art_quick_proxy_invoke_handler SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* @@ -818,7 +818,32 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception END_FUNCTION art_quick_proxy_invoke_handler -DEFINE_FUNCTION art_quick_interpreter_entry +DEFINE_FUNCTION art_quick_resolution_trampoline + SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME + PUSH esp // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass receiver + PUSH eax // pass method + call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP) + movl %eax, %edi // remember code pointer in EDI + addl LITERAL(16), %esp // pop arguments + test %eax, %eax // if code pointer is NULL goto deliver pending exception + jz 1f + POP eax // called method + POP ecx // restore args + POP edx + POP ebx + POP ebp // restore callee saves except EDI + POP esi + xchgl 0(%esp),%edi // restore EDI and place code pointer as only value on stack + ret // tail call into method +1: + RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME + DELIVER_PENDING_EXCEPTION +END_FUNCTION art_quick_resolution_trampoline + +DEFINE_FUNCTION art_quick_to_interpreter_bridge SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame mov %esp, %edx // remember SP PUSH eax // alignment padding @@ -826,19 +851,19 @@ DEFINE_FUNCTION art_quick_interpreter_entry pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() .cfi_adjust_cfa_offset 4 PUSH eax // pass method - call SYMBOL(artInterpreterEntry) // (method, Thread*, SP) + call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP) movd %eax, %xmm0 // place return value also into floating point return value movd %edx, %xmm1 punpckldq %xmm1, %xmm0 addl LITERAL(44), %esp // pop arguments .cfi_adjust_cfa_offset -44 RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception -END_FUNCTION art_quick_interpreter_entry +END_FUNCTION art_quick_to_interpreter_bridge /* * Routine that intercepts method calls and returns. */ -DEFINE_FUNCTION art_quick_instrumentation_entry_from_code +DEFINE_FUNCTION art_quick_instrumentation_entry SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME movl %esp, %edx // Save SP. PUSH eax // Save eax which will be clobbered by the callee-save method. @@ -855,7 +880,7 @@ DEFINE_FUNCTION art_quick_instrumentation_entry_from_code addl LITERAL(28), %esp // Pop arguments upto saved Method*. movl 28(%esp), %edi // Restore edi. movl %eax, 28(%esp) // Place code* over edi, just under return pc. - movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp) + movl LITERAL(SYMBOL(art_quick_instrumentation_exit)), 32(%esp) // Place instrumentation exit as return pc. movl (%esp), %eax // Restore eax. movl 8(%esp), %ecx // Restore ecx. @@ -865,9 +890,9 @@ DEFINE_FUNCTION art_quick_instrumentation_entry_from_code movl 24(%esp), %esi // Restore esi. addl LITERAL(28), %esp // Wind stack back upto code*. ret // Call method (and pop). -END_FUNCTION art_quick_instrumentation_entry_from_code +END_FUNCTION art_quick_instrumentation_entry -DEFINE_FUNCTION art_quick_instrumentation_exit_from_code +DEFINE_FUNCTION art_quick_instrumentation_exit pushl LITERAL(0) // Push a fake return PC as there will be none on the stack. SETUP_REF_ONLY_CALLEE_SAVE_FRAME mov %esp, %ecx // Remember SP @@ -900,7 +925,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit_from_code RESTORE_REF_ONLY_CALLEE_SAVE_FRAME addl LITERAL(4), %esp // Remove fake return pc. jmp *%ecx // Return. -END_FUNCTION art_quick_instrumentation_exit_from_code +END_FUNCTION art_quick_instrumentation_exit /* * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization @@ -919,21 +944,6 @@ DEFINE_FUNCTION art_quick_deoptimize int3 // Unreachable. END_FUNCTION art_quick_deoptimize - /* - * Quick abstract method error stub. %eax contains method* on entry. - */ -DEFINE_FUNCTION art_quick_abstract_method_error_stub - SETUP_SAVE_ALL_CALLEE_SAVE_FRAME - movl %esp, %ecx // Remember SP. - PUSH eax // Align frame. - PUSH ecx // Pass SP for Method*. - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - .cfi_adjust_cfa_offset 4 - PUSH eax // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - int3 // Unreachable. -END_FUNCTION art_quick_abstract_method_error_stub - /* * String's indexOf. * @@ -1030,12 +1040,5 @@ not_equal: ret END_FUNCTION art_quick_string_compareto -MACRO1(UNIMPLEMENTED,name) - .globl VAR(name, 0) - ALIGN_FUNCTION_ENTRY -VAR(name, 0): - int3 -END_MACRO - // TODO: implement these! UNIMPLEMENTED art_quick_memcmp16 diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 6052993c6b..71959c66ab 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -71,7 +71,7 @@ namespace art { -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, +extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, ShadowFrame* shadow_frame, JValue* result); @@ -944,6 +944,43 @@ const OatFile* ClassLinker::FindOatFileFromOatLocationLocked(const std::string& return oat_file; } +static void InitFromImageCallbackCommon(mirror::Object* obj, ClassLinker* class_linker, + bool interpret_only_mode) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(obj != NULL); + DCHECK(class_linker != NULL); + + if (obj->GetClass()->IsStringClass()) { + class_linker->GetInternTable()->RegisterStrong(obj->AsString()); + } else if (obj->IsClass()) { + // Restore class to ClassLinker::classes_ table. + mirror::Class* klass = obj->AsClass(); + ClassHelper kh(klass, class_linker); + mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true); + DCHECK(existing == NULL) << kh.GetDescriptor(); + } else if (interpret_only_mode && obj->IsMethod()) { + mirror::AbstractMethod* method = obj->AsMethod(); + if (!method->IsNative()) { + method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge); + if (method != Runtime::Current()->GetResolutionMethod()) { + method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge()); + } + } + } +} + +static void InitFromImageCallback(mirror::Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = reinterpret_cast(arg); + InitFromImageCallbackCommon(obj, class_linker, false); +} + +static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = reinterpret_cast(arg); + InitFromImageCallbackCommon(obj, class_linker, true); +} + void ClassLinker::InitFromImage() { VLOG(startup) << "ClassLinker::InitFromImage entering"; CHECK(!init_done_); @@ -997,7 +1034,11 @@ void ClassLinker::InitFromImage() { { ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); heap->FlushAllocStack(); - heap->GetLiveBitmap()->Walk(InitFromImageCallback, this); + if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { + heap->GetLiveBitmap()->Walk(InitFromImageInterpretOnlyCallback, this); + } else { + heap->GetLiveBitmap()->Walk(InitFromImageCallback, this); + } } // reinit class_roots_ @@ -1025,40 +1066,6 @@ void ClassLinker::InitFromImage() { VLOG(startup) << "ClassLinker::InitFromImage exiting"; } -void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) { - DCHECK(obj != NULL); - DCHECK(arg != NULL); - ClassLinker* class_linker = reinterpret_cast(arg); - - if (obj->GetClass()->IsStringClass()) { - class_linker->intern_table_->RegisterStrong(obj->AsString()); - return; - } - if (obj->IsClass()) { - // restore class to ClassLinker::classes_ table - mirror::Class* klass = obj->AsClass(); - ClassHelper kh(klass, class_linker); - mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true); - DCHECK(existing == NULL) << kh.GetDescriptor(); - return; - } - - if (obj->IsMethod()) { - mirror::AbstractMethod* method = obj->AsMethod(); - // Set entry points to interpreter for methods in interpreter only mode. - if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) { - method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry); - if (method != Runtime::Current()->GetResolutionMethod()) { - method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint()); - } - } - // Populate native method pointer with jni lookup stub. - if (method->IsNative()) { - method->UnregisterNative(Thread::Current()); - } - } -} - // Keep in sync with InitCallback. Anything we visit, we need to // reinit references to when reinitializing a ClassLinker from a // mapped image. @@ -1558,7 +1565,7 @@ const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) { const void* result = GetOatMethodFor(method).GetCode(); if (result == NULL) { // No code? You must mean to go into the interpreter. - result = GetInterpreterEntryPoint(); + result = GetCompiledCodeToInterpreterBridge(); } return result; } @@ -1619,7 +1626,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) { const bool enter_interpreter = NeedsInterpreter(method, code); if (enter_interpreter) { // Use interpreter entry point. - code = GetInterpreterEntryPoint(); + code = GetCompiledCodeToInterpreterBridge(); } runtime->GetInstrumentation()->UpdateMethodsCode(method, code); } @@ -1640,13 +1647,13 @@ static void LinkCode(SirtRef& method, const OatFile::Oat Runtime* runtime = Runtime::Current(); bool enter_interpreter = NeedsInterpreter(method.get(), method->GetEntryPointFromCompiledCode()); if (enter_interpreter) { - method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry); + method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge); } else { - method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry); + method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge); } if (method->IsAbstract()) { - method->SetEntryPointFromCompiledCode(GetAbstractMethodErrorStub()); + method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge()); return; } @@ -1657,7 +1664,7 @@ static void LinkCode(SirtRef& method, const OatFile::Oat method->SetEntryPointFromCompiledCode(GetResolutionTrampoline(runtime->GetClassLinker())); } else if (enter_interpreter) { // Set entry point from compiled code if there's no code or in interpreter only mode. - method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint()); + method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge()); } if (method->IsNative()) { @@ -2625,12 +2632,8 @@ mirror::AbstractMethod* ClassLinker::CreateProxyMethod(Thread* self, SirtRefSetCoreSpillMask(refs_and_args->GetCoreSpillMask()); method->SetFpSpillMask(refs_and_args->GetFpSpillMask()); method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes()); -#if !defined(ART_USE_PORTABLE_COMPILER) - method->SetEntryPointFromCompiledCode(reinterpret_cast(art_quick_proxy_invoke_handler)); -#else - method->SetEntryPointFromCompiledCode(reinterpret_cast(art_portable_proxy_invoke_handler)); -#endif - method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry); + method->SetEntryPointFromCompiledCode(GetProxyInvokeHandler()); + method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge); return method; } diff --git a/runtime/class_linker.h b/runtime/class_linker.h index fdf75c2301..060c26c3a7 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -347,6 +347,17 @@ class ClassLinker { return quick_resolution_trampoline_; } + InternTable* GetInternTable() const { + return intern_table_; + } + + // Attempts to insert a class into a class table. Returns NULL if + // the class was inserted, otherwise returns an existing class with + // the same descriptor and ClassLoader. + mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) + LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + private: explicit ClassLinker(InternTable*); @@ -362,8 +373,6 @@ class ClassLinker { OatFile& GetImageOatFile(gc::space::ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static void InitFromImageCallback(mirror::Object* obj, void* arg) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -423,13 +432,6 @@ class ClassLinker { const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Attempts to insert a class into a class table. Returns NULL if - // the class was inserted, otherwise returns an existing class with - // the same descriptor and ClassLoader. - mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class) - LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void RegisterDexFileLocked(const DexFile& dex_file, SirtRef& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/common_test.h b/runtime/common_test.h index 7ee6fe20b2..a54361706a 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -192,10 +192,7 @@ class CommonTest : public testing::Test { compiled_method = compiler_driver_->GetCompiledMethod(MethodReference(&dex_file, method->GetDexMethodIndex())); - -#ifndef ART_LIGHT_MODE CHECK(compiled_method != NULL) << PrettyMethod(method); -#endif } if (compiled_method != NULL) { const std::vector& code = compiled_method->GetCode(); @@ -213,12 +210,8 @@ class CommonTest : public testing::Test { oat_method.LinkMethod(method); } else { const void* method_code; - if (method->IsAbstract()) { - method_code = GetAbstractMethodErrorStub(); - } else { - // No code? You must mean to go into the interpreter. - method_code = GetInterpreterEntryPoint(); - } + // No code? You must mean to go into the interpreter. + method_code = GetCompiledCodeToInterpreterBridge(); LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code; OatFile::OatMethod oat_method = CreateOatMethod(method_code, kStackAlignment, diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 3f28b5e41f..b6781c02b9 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -30,24 +30,13 @@ #include "object_utils.h" #include "thread.h" -extern "C" void art_interpreter_invoke_handler(); -extern "C" void art_jni_dlsym_lookup_stub(); -extern "C" void art_portable_abstract_method_error_stub(); -extern "C" void art_portable_proxy_invoke_handler(); -extern "C" void art_quick_abstract_method_error_stub(); -extern "C" void art_quick_deoptimize(); -extern "C" void art_quick_instrumentation_entry_from_code(void*); -extern "C" void art_quick_instrumentation_exit_from_code(); -extern "C" void art_quick_interpreter_entry(void*); -extern "C" void art_quick_proxy_invoke_handler(); -extern "C" void art_work_around_app_jni_bugs(); - namespace art { + namespace mirror { -class Class; -class Field; -class Object; -} + class Class; + class Field; + class Object; +} // namespace mirror // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it // cannot be resolved, throw an error. If it can, use it to create an instance. @@ -350,25 +339,43 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Entry point for deoptimization. -static inline uintptr_t GetDeoptimizationEntryPoint() { +extern "C" void art_quick_deoptimize(); +static inline uintptr_t GetQuickDeoptimizationEntryPoint() { return reinterpret_cast(art_quick_deoptimize); } // Return address of instrumentation stub. -static inline void* GetInstrumentationEntryPoint() { - return reinterpret_cast(art_quick_instrumentation_entry_from_code); +extern "C" void art_quick_instrumentation_entry(void*); +static inline void* GetQuickInstrumentationEntryPoint() { + return reinterpret_cast(art_quick_instrumentation_entry); } // The return_pc of instrumentation exit stub. -static inline uintptr_t GetInstrumentationExitPc() { - return reinterpret_cast(art_quick_instrumentation_exit_from_code); +extern "C" void art_quick_instrumentation_exit(); +static inline uintptr_t GetQuickInstrumentationExitPc() { + return reinterpret_cast(art_quick_instrumentation_exit); +} + +extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*); +static inline const void* GetPortableToInterpreterBridge() { + return reinterpret_cast(art_portable_to_interpreter_bridge); +} + +extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*); +static inline const void* GetQuickToInterpreterBridge() { + return reinterpret_cast(art_quick_to_interpreter_bridge); } // Return address of interpreter stub. -static inline void* GetInterpreterEntryPoint() { - return reinterpret_cast(art_quick_interpreter_entry); +static inline const void* GetCompiledCodeToInterpreterBridge() { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableToInterpreterBridge(); +#else + return GetQuickToInterpreterBridge(); +#endif } + static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { return class_linker->GetPortableResolutionTrampoline(); } @@ -386,23 +393,25 @@ static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { #endif } -static inline void* GetPortableAbstractMethodErrorStub() { - return reinterpret_cast(art_portable_abstract_method_error_stub); +extern "C" void art_portable_proxy_invoke_handler(); +static inline const void* GetPortableProxyInvokeHandler() { + return reinterpret_cast(art_portable_proxy_invoke_handler); } -static inline void* GetQuickAbstractMethodErrorStub() { - return reinterpret_cast(art_quick_abstract_method_error_stub); +extern "C" void art_quick_proxy_invoke_handler(); +static inline const void* GetQuickProxyInvokeHandler() { + return reinterpret_cast(art_quick_proxy_invoke_handler); } -// Return address of abstract method error stub for defined compiler. -static inline void* GetAbstractMethodErrorStub() { +static inline const void* GetProxyInvokeHandler() { #if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableAbstractMethodErrorStub(); + return GetPortableProxyInvokeHandler(); #else - return GetQuickAbstractMethodErrorStub(); + return GetQuickProxyInvokeHandler(); #endif } +extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject); static inline void* GetJniDlsymLookupStub() { return reinterpret_cast(art_jni_dlsym_lookup_stub); } diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc new file mode 100644 index 0000000000..d99c43e052 --- /dev/null +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "class_linker.h" +#include "interpreter/interpreter.h" +#include "invoke_arg_array_builder.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" +#include "runtime.h" +#include "stack.h" + +namespace art { + +extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = shadow_frame->GetMethod(); + // Ensure static methods are initialized. + if (method->IsStatic()) { + Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true); + } + uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_; + ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength()); + arg_array.BuildArgArray(shadow_frame, arg_offset); + method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]); +} + +} // namespace art diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h new file mode 100644 index 0000000000..c7df4e6b0a --- /dev/null +++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_ + +#include "base/macros.h" +#include "dex_file.h" +#include "offsets.h" + +#define INTERPRETER_ENTRYPOINT_OFFSET(x) \ + ThreadOffset(static_cast(OFFSETOF_MEMBER(Thread, interpreter_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(InterpreterEntryPoints, x))) + +namespace art { + +union JValue; +class MethodHelper; +class ShadowFrame; +class Thread; + +// Pointers to functions that are called by interpreter trampolines via thread-local storage. +struct PACKED(4) InterpreterEntryPoints { + void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc index 98f7b1283c..88b4936255 100644 --- a/runtime/entrypoints/jni/jni_entrypoints.cc +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -15,23 +15,26 @@ */ #include "base/logging.h" -#include "mirror/abstract_method.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "object_utils.h" #include "scoped_thread_state_change.h" #include "thread.h" namespace art { // Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern "C" void* artFindNativeMethod(Thread* self) { +extern "C" void* artFindNativeMethod() { + Thread* self = Thread::Current(); Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. - DCHECK(Thread::Current() == self); ScopedObjectAccess soa(self); mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); DCHECK(method != NULL); - // Lookup symbol address for method, on failure we'll return NULL with an - // exception set, otherwise we return the address of the method we found. + // Lookup symbol address for method, on failure we'll return NULL with an exception set, + // otherwise we return the address of the method we found. void* native_code = soa.Vm()->FindCodeForNativeMethod(method); if (native_code == NULL) { DCHECK(self->IsExceptionPending()); @@ -43,4 +46,78 @@ extern "C" void* artFindNativeMethod(Thread* self) { } } +static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { + intptr_t value = *arg_ptr; + mirror::Object** value_as_jni_rep = reinterpret_cast(value); + mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; + CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) + << value_as_work_around_rep; + *arg_ptr = reinterpret_cast(value_as_work_around_rep); +} + +extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(Thread::Current() == self); + // TODO: this code is specific to ARM + // On entry the stack pointed by sp is: + // | arg3 | <- Calling JNI method's frame (and extra bit for out args) + // | LR | + // | R3 | arg2 + // | R2 | arg1 + // | R1 | jclass/jobject + // | R0 | JNIEnv + // | unused | + // | unused | + // | unused | <- sp + mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL); + DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); + intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack + // Fix up this/jclass argument + WorkAroundJniBugsForJobject(arg_ptr); + arg_ptr++; + // Fix up jobject arguments + MethodHelper mh(jni_method); + int reg_num = 2; // Current register being processed, -1 for stack arguments. + for (uint32_t i = 1; i < mh.GetShortyLength(); i++) { + char shorty_char = mh.GetShorty()[i]; + if (shorty_char == 'L') { + WorkAroundJniBugsForJobject(arg_ptr); + } + if (shorty_char == 'J' || shorty_char == 'D') { + if (reg_num == 2) { + arg_ptr = sp + 8; // skip to out arguments + reg_num = -1; + } else if (reg_num == 3) { + arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned + reg_num = -1; + } else { + DCHECK_EQ(reg_num, -1); + if ((reinterpret_cast(arg_ptr) & 7) == 4) { + arg_ptr += 3; // unaligned, pad and move through stack arguments + } else { + arg_ptr += 2; // aligned, move through stack arguments + } + } + } else { + if (reg_num == 2) { + arg_ptr++; // move through register arguments + reg_num++; + } else if (reg_num == 3) { + arg_ptr = sp + 8; // skip to outgoing stack arguments + reg_num = -1; + } else { + DCHECK_EQ(reg_num, -1); + arg_ptr++; // move through stack arguments + } + } + } + // Load expected destination, see Method::RegisterNative + const void* code = reinterpret_cast(jni_method->GetNativeGcMap()); + if (UNLIKELY(code == NULL)) { + code = GetJniDlsymLookupStub(); + jni_method->RegisterNative(self, code); + } + return code; +} + } // namespace art diff --git a/runtime/entrypoints/jni/jni_entrypoints.h b/runtime/entrypoints/jni/jni_entrypoints.h new file mode 100644 index 0000000000..0a53447cb4 --- /dev/null +++ b/runtime/entrypoints/jni/jni_entrypoints.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_ + +#include "base/macros.h" +#include "offsets.h" + +#define JNI_ENTRYPOINT_OFFSET(x) \ + ThreadOffset(static_cast(OFFSETOF_MEMBER(Thread, jni_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(JniEntryPoints, x))) + +namespace art { + +// Pointers to functions that are called by JNI trampolines via thread-local storage. +struct PACKED(4) JniEntryPoints { + // Called when the JNI method isn't registered. + void* (*pDlsymLookup)(JNIEnv* env, jobject); +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h index a229c76dbd..ec9e4f8a7d 100644 --- a/runtime/entrypoints/portable/portable_entrypoints.h +++ b/runtime/entrypoints/portable/portable_entrypoints.h @@ -28,15 +28,15 @@ namespace mirror { class Thread; #define PORTABLE_ENTRYPOINT_OFFSET(x) \ - (static_cast(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \ - static_cast(OFFSETOF_MEMBER(PortableEntryPoints, x))) + ThreadOffset(static_cast(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(PortableEntryPoints, x))) // Pointers to functions that are called by code generated by compiler's adhering to the portable // compiler ABI. struct PACKED(4) PortableEntryPoints { // Invocation - const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); + void (*pPortableResolutionTrampoline)(mirror::AbstractMethod*); + void (*pPortableToInterpreterBridge)(mirror::AbstractMethod*); }; } // namespace art diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h deleted file mode 100644 index 35fa97269c..0000000000 --- a/runtime/entrypoints/quick/quick_argument_visitor.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ -#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ - -#include "object_utils.h" - -namespace art { - -// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. -class QuickArgumentVisitor { - public: -// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. -// Size of Runtime::kRefAndArgs callee save frame. -// Size of Method* and register parameters in out stack arguments. -#if defined(__arm__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 -#define QUICK_STACK_ARG_SKIP 16 -#elif defined(__mips__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 -#define QUICK_STACK_ARG_SKIP 16 -#elif defined(__i386__) -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 -#define QUICK_STACK_ARG_SKIP 16 -#else -#error "Unsupported architecture" -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 -#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 -#define QUICK_STACK_ARG_SKIP 0 -#endif - - QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : - caller_mh_(caller_mh), - args_in_regs_(ComputeArgsInRegs(caller_mh)), - num_params_(caller_mh.NumArgs()), - reg_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), - stack_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE - + QUICK_STACK_ARG_SKIP), - cur_args_(reg_args_), - cur_arg_index_(0), - param_index_(0), - is_split_long_or_double_(false) { - } - - virtual ~QuickArgumentVisitor() {} - - virtual void Visit() = 0; - - bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamAReference(param_index_); - } - - bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamALongOrDouble(param_index_); - } - - Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.GetParamPrimitiveType(param_index_); - } - - byte* GetParamAddress() const { - return cur_args_ + (cur_arg_index_ * kPointerSize); - } - - bool IsSplitLongOrDouble() const { - return is_split_long_or_double_; - } - - uint64_t ReadSplitLongParam() const { - DCHECK(IsSplitLongOrDouble()); - uint64_t low_half = *reinterpret_cast(GetParamAddress()); - uint64_t high_half = *reinterpret_cast(stack_args_); - return (low_half & 0xffffffffULL) | (high_half << 32); - } - - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { - is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble(); - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - cur_args_ = stack_args_; - cur_arg_index_ = is_split_long_or_double_ ? 1 : 0; - is_split_long_or_double_ = false; - while (param_index_ < num_params_) { - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - } - - private: - static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - size_t args_in_regs = 0; - size_t num_params = mh.NumArgs(); - for (size_t i = 0; i < num_params; i++) { - args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - return args_in_regs; - } - MethodHelper& caller_mh_; - const size_t args_in_regs_; - const size_t num_params_; - byte* const reg_args_; - byte* const stack_args_; - byte* cur_args_; - size_t cur_arg_index_; - size_t param_index_; - // Does a 64bit parameter straddle the register and stack arguments? - bool is_split_long_or_double_; -}; - -} // namespace art - -#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_ diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index 74b8cfd09b..e76679b91f 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -17,44 +17,45 @@ #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ -#include "dex_file-inl.h" -#include "runtime.h" +#include + +#include "base/macros.h" +#include "offsets.h" #define QUICK_ENTRYPOINT_OFFSET(x) \ - (static_cast(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \ - static_cast(OFFSETOF_MEMBER(QuickEntryPoints, x))) + ThreadOffset(static_cast(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(QuickEntryPoints, x))) namespace art { + namespace mirror { class AbstractMethod; class Class; class Object; } // namespace mirror -class DvmDex; -class MethodHelper; -class ShadowFrame; + class Thread; // Pointers to functions that are called by quick compiler generated code via thread-local storage. struct PACKED(4) QuickEntryPoints { // Alloc - void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t); - void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); - void* (*pAllocObjectFromCode)(uint32_t, void*); - void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*); - void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t); - void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t); + void* (*pAllocArray)(uint32_t, void*, int32_t); + void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t); + void* (*pAllocObject)(uint32_t, void*); + void* (*pAllocObjectWithAccessCheck)(uint32_t, void*); + void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t); + void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t); // Cast - uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*); - void (*pCanPutArrayElementFromCode)(void*, void*); - void (*pCheckCastFromCode)(void*, void*); + uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*); + void (*pCanPutArrayElement)(void*, void*); + void (*pCheckCast)(void*, void*); // DexCache void* (*pInitializeStaticStorage)(uint32_t, void*); - void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*); - void* (*pInitializeTypeFromCode)(uint32_t, void*); - void* (*pResolveStringFromCode)(void*, uint32_t); + void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*); + void* (*pInitializeType)(uint32_t, void*); + void* (*pResolveString)(void*, uint32_t); // Field int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src @@ -71,7 +72,7 @@ struct PACKED(4) QuickEntryPoints { void* (*pGetObjStatic)(uint32_t); // FillArray - void (*pHandleFillArrayDataFromCode)(void*, void*); + void (*pHandleFillArrayData)(void*, void*); // JNI uint32_t (*pJniMethodStart)(Thread*); @@ -83,8 +84,8 @@ struct PACKED(4) QuickEntryPoints { jobject locked, Thread* self); // Locks - void (*pLockObjectFromCode)(void*); - void (*pUnlockObjectFromCode)(void*); + void (*pLockObject)(void*); + void (*pUnlockObject)(void*); // Math int32_t (*pCmpgDouble)(double, double); @@ -108,14 +109,6 @@ struct PACKED(4) QuickEntryPoints { uint64_t (*pShrLong)(uint64_t, uint32_t); uint64_t (*pUshrLong)(uint64_t, uint32_t); - // Interpreter - void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - // Intrinsics int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t); int32_t (*pMemcmp16)(void*, void*, int32_t); @@ -123,8 +116,8 @@ struct PACKED(4) QuickEntryPoints { void* (*pMemcpy)(void*, const void*, size_t); // Invocation - const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); + void (*pQuickResolutionTrampoline)(mirror::AbstractMethod*); + void (*pQuickToInterpreterBridge)(mirror::AbstractMethod*); void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); void (*pInvokeInterfaceTrampoline)(uint32_t, void*); void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*); @@ -133,22 +126,21 @@ struct PACKED(4) QuickEntryPoints { void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*); // Thread - void (*pCheckSuspendFromCode)(Thread*); // Stub that is called when the suspend count is non-zero - void (*pTestSuspendFromCode)(); // Stub that is periodically called to test the suspend count + void (*pCheckSuspend)(Thread*); // Stub that is called when the suspend count is non-zero + void (*pTestSuspend)(); // Stub that is periodically called to test the suspend count // Throws void (*pDeliverException)(void*); - void (*pThrowArrayBoundsFromCode)(int32_t, int32_t); - void (*pThrowDivZeroFromCode)(); - void (*pThrowNoSuchMethodFromCode)(int32_t); - void (*pThrowNullPointerFromCode)(); - void (*pThrowStackOverflowFromCode)(void*); + void (*pThrowArrayBounds)(int32_t, int32_t); + void (*pThrowDivZero)(); + void (*pThrowNoSuchMethod)(int32_t); + void (*pThrowNullPointer)(); + void (*pThrowStackOverflow)(void*); }; // JNI entrypoints. -extern uint32_t JniMethodStart(Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; +extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc index 7ecd296742..0e61942209 100644 --- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc @@ -32,7 +32,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(mirror::AbstractMet FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); const void* result = instrumentation->GetQuickCodeFor(method); - bool interpreter_entry = (result == GetInterpreterEntryPoint()); + bool interpreter_entry = (result == GetQuickToInterpreterBridge()); instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object, method, lr, interpreter_entry); CHECK(result != NULL) << PrettyMethod(method); diff --git a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc deleted file mode 100644 index 656df8de5b..0000000000 --- a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "quick_argument_visitor.h" -#include "callee_save_frame.h" -#include "dex_file-inl.h" -#include "interpreter/interpreter.h" -#include "invoke_arg_array_builder.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "object_utils.h" - -namespace art { - -// Visits arguments on the stack placing them into the shadow frame. -class BuildShadowFrameVisitor : public QuickArgumentVisitor { - public: - BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ShadowFrame& sf, size_t first_arg_reg) : - QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - if (IsSplitLongOrDouble()) { - sf_.SetVRegLong(cur_reg_, ReadSplitLongParam()); - } else { - sf_.SetVRegLong(cur_reg_, *reinterpret_cast(GetParamAddress())); - } - ++cur_reg_; - break; - case Primitive::kPrimNot: - sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - sf_.SetVReg(cur_reg_, *reinterpret_cast(GetParamAddress())); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - break; - } - ++cur_reg_; - } - - private: - ShadowFrame& sf_; - size_t cur_reg_; - - DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor); -}; - -extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in the shadow - // frame. - const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); - FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); - - MethodHelper mh(method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - uint16_t num_regs = code_item->registers_size_; - void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); - ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. - method, 0, memory)); - size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; - BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg); - shadow_frame_builder.VisitArguments(); - // Push a transition back into managed code onto the linked list in thread. - ManagedStack fragment; - self->PushManagedStackFragment(&fragment); - self->PushShadowFrame(shadow_frame); - self->EndAssertNoThreadSuspension(old_cause); - - if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { - // Ensure static method's class is initialized. - if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), - true, true)) { - DCHECK(Thread::Current()->IsExceptionPending()); - self->PopManagedStackFragment(fragment); - return 0; - } - } - - JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); - // Pop transition. - self->PopManagedStackFragment(fragment); - return result.GetJ(); -} - -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::AbstractMethod* method = shadow_frame->GetMethod(); - // Ensure static methods are initialized. - if (method->IsStatic()) { - Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true); - } - uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_; - ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength()); - arg_array.BuildArgArray(shadow_frame, arg_offset); - method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]); -} - -} // namespace art diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 23a28f9cce..9907c043ee 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -94,78 +94,4 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, return o; } -static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) { - intptr_t value = *arg_ptr; - mirror::Object** value_as_jni_rep = reinterpret_cast(value); - mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL; - CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) - << value_as_work_around_rep; - *arg_ptr = reinterpret_cast(value_as_work_around_rep); -} - -extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(Thread::Current() == self); - // TODO: this code is specific to ARM - // On entry the stack pointed by sp is: - // | arg3 | <- Calling JNI method's frame (and extra bit for out args) - // | LR | - // | R3 | arg2 - // | R2 | arg1 - // | R1 | jclass/jobject - // | R0 | JNIEnv - // | unused | - // | unused | - // | unused | <- sp - mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL); - DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method); - intptr_t* arg_ptr = sp + 4; // pointer to r1 on stack - // Fix up this/jclass argument - WorkAroundJniBugsForJobject(arg_ptr); - arg_ptr++; - // Fix up jobject arguments - MethodHelper mh(jni_method); - int reg_num = 2; // Current register being processed, -1 for stack arguments. - for (uint32_t i = 1; i < mh.GetShortyLength(); i++) { - char shorty_char = mh.GetShorty()[i]; - if (shorty_char == 'L') { - WorkAroundJniBugsForJobject(arg_ptr); - } - if (shorty_char == 'J' || shorty_char == 'D') { - if (reg_num == 2) { - arg_ptr = sp + 8; // skip to out arguments - reg_num = -1; - } else if (reg_num == 3) { - arg_ptr = sp + 10; // skip to out arguments plus 2 slots as long must be aligned - reg_num = -1; - } else { - DCHECK_EQ(reg_num, -1); - if ((reinterpret_cast(arg_ptr) & 7) == 4) { - arg_ptr += 3; // unaligned, pad and move through stack arguments - } else { - arg_ptr += 2; // aligned, move through stack arguments - } - } - } else { - if (reg_num == 2) { - arg_ptr++; // move through register arguments - reg_num++; - } else if (reg_num == 3) { - arg_ptr = sp + 8; // skip to outgoing stack arguments - reg_num = -1; - } else { - DCHECK_EQ(reg_num, -1); - arg_ptr++; // move through stack arguments - } - } - } - // Load expected destination, see Method::RegisterNative - const void* code = reinterpret_cast(jni_method->GetNativeGcMap()); - if (UNLIKELY(code == NULL)) { - code = GetJniDlsymLookupStub(); - jni_method->RegisterNative(self, code); - } - return code; -} - } // namespace art diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc deleted file mode 100644 index 4e3d749e27..0000000000 --- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "quick_argument_visitor.h" -#include "dex_file-inl.h" -#include "entrypoints/entrypoint_utils.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "reflection.h" -#include "scoped_thread_state_change.h" -#include "thread.h" -#include "well_known_classes.h" - -#include "ScopedLocalRef.h" - -namespace art { - -// Visits arguments on the stack placing them into the args vector, Object* arguments are converted -// to jobjects. -class BuildQuickArgumentVisitor : public QuickArgumentVisitor { - public: - BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ScopedObjectAccessUnchecked& soa, std::vector& args) : - QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - jvalue val; - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimNot: { - mirror::Object* obj = *reinterpret_cast(GetParamAddress()); - val.l = soa_.AddLocalReference(obj); - break; - } - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - if (IsSplitLongOrDouble()) { - val.j = ReadSplitLongParam(); - } else { - val.j = *reinterpret_cast(GetParamAddress()); - } - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - val.i = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args_.push_back(val); - } - - private: - ScopedObjectAccessUnchecked& soa_; - std::vector& args_; - - DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); -}; - -// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method -// which is responsible for recording callee save registers. We explicitly place into jobjects the -// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a -// field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method, - mirror::Object* receiver, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - // Register the top of the managed stack, making stack crawlable. - DCHECK_EQ(*sp, proxy_method); - self->SetTopOfStack(sp, 0); - DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), - Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Placing arguments into args vector and remove the receiver. - MethodHelper proxy_mh(proxy_method); - std::vector args; - BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); - local_ref_visitor.VisitArguments(); - args.erase(args.begin()); - - // Convert proxy method into expected interface method. - mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code - // that performs allocations. - self->EndAssertNoThreadSuspension(old_cause); - JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), - rcvr_jobj, interface_method_jobj, args); - return result.GetJ(); -} - -} // namespace art diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc deleted file mode 100644 index d78bbf3bc8..0000000000 --- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc +++ /dev/null @@ -1,295 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "callee_save_frame.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "dex_instruction-inl.h" -#include "mirror/class-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/object-inl.h" -#include "object_utils.h" -#include "scoped_thread_state_change.h" - -// Architecture specific assembler helper to deliver exception. -extern "C" void art_quick_deliver_exception_from_code(void*); - -namespace art { - -// Lazily resolve a method for quick. Called by stub code. -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if defined(__arm__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | LR | - // | ... | callee saves - // | R3 | arg3 - // | R2 | arg2 - // | R1 | arg1 - // | R0 | - // | Method* | <- sp - DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 48); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp) + kPointerSize); - uint32_t pc_offset = 10; - uintptr_t caller_pc = regs[pc_offset]; -#elif defined(__i386__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | Return | - // | EBP,ESI,EDI | callee saves - // | EBX | arg3 - // | EDX | arg2 - // | ECX | arg1 - // | EAX/Method* | <- sp - DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 32); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uintptr_t caller_pc = regs[7]; -#elif defined(__mips__) - // On entry the stack pointed by sp is: - // | argN | | - // | ... | | - // | arg4 | | - // | arg3 spill | | Caller's frame - // | arg2 spill | | - // | arg1 spill | | - // | Method* | --- - // | RA | - // | ... | callee saves - // | A3 | arg3 - // | A2 | arg2 - // | A1 | arg1 - // | A0/Method* | <- sp - DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); - mirror::AbstractMethod** caller_sp = reinterpret_cast(reinterpret_cast(sp) + 64); - uintptr_t* regs = reinterpret_cast(reinterpret_cast(sp)); - uint32_t pc_offset = 15; - uintptr_t caller_pc = regs[pc_offset]; -#else - UNIMPLEMENTED(FATAL); - mirror::AbstractMethod** caller_sp = NULL; - uintptr_t* regs = NULL; - uintptr_t caller_pc = 0; -#endif - FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); - // Start new JNI local reference state - JNIEnvExt* env = thread->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - - // Compute details about the called method (avoid GCs) - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - mirror::AbstractMethod* caller = *caller_sp; - InvokeType invoke_type; - uint32_t dex_method_idx; -#if !defined(__i386__) - const char* shorty; - uint32_t shorty_len; -#endif - if (called->IsRuntimeMethod()) { - uint32_t dex_pc = caller->ToDexPc(caller_pc); - const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - bool is_range; - switch (instr_code) { - case Instruction::INVOKE_DIRECT: - invoke_type = kDirect; - is_range = false; - break; - case Instruction::INVOKE_DIRECT_RANGE: - invoke_type = kDirect; - is_range = true; - break; - case Instruction::INVOKE_STATIC: - invoke_type = kStatic; - is_range = false; - break; - case Instruction::INVOKE_STATIC_RANGE: - invoke_type = kStatic; - is_range = true; - break; - case Instruction::INVOKE_SUPER: - invoke_type = kSuper; - is_range = false; - break; - case Instruction::INVOKE_SUPER_RANGE: - invoke_type = kSuper; - is_range = true; - break; - case Instruction::INVOKE_VIRTUAL: - invoke_type = kVirtual; - is_range = false; - break; - case Instruction::INVOKE_VIRTUAL_RANGE: - invoke_type = kVirtual; - is_range = true; - break; - case Instruction::INVOKE_INTERFACE: - invoke_type = kInterface; - is_range = false; - break; - case Instruction::INVOKE_INTERFACE_RANGE: - invoke_type = kInterface; - is_range = true; - break; - default: - LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); - // Avoid used uninitialized warnings. - invoke_type = kDirect; - is_range = false; - } - dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); -#if !defined(__i386__) - shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len); -#endif - } else { - invoke_type = kStatic; - dex_method_idx = called->GetDexMethodIndex(); -#if !defined(__i386__) - MethodHelper mh(called); - shorty = mh.GetShorty(); - shorty_len = mh.GetShortyLength(); -#endif - } -#if !defined(__i386__) - // Discover shorty (avoid GCs) - size_t args_in_regs = 0; - for (size_t i = 1; i < shorty_len; i++) { - char c = shorty[i]; - args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - // Place into local references incoming arguments from the caller's register arguments - size_t cur_arg = 1; // skip method_idx in R0, first arg is in R1 - if (invoke_type != kStatic) { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - cur_arg++; - if (args_in_regs < 3) { - // If we thought we had fewer than 3 arguments in registers, account for the receiver - args_in_regs++; - } - soa.AddLocalReference(obj); - } - size_t shorty_index = 1; // skip return value - // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip - // R0) - while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) { - char c = shorty[shorty_index]; - shorty_index++; - if (c == 'L') { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - soa.AddLocalReference(obj); - } - cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); - } - // Place into local references incoming arguments from the caller's stack arguments - cur_arg += pc_offset + 1; // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves - while (shorty_index < shorty_len) { - char c = shorty[shorty_index]; - shorty_index++; - if (c == 'L') { - mirror::Object* obj = reinterpret_cast(regs[cur_arg]); - soa.AddLocalReference(obj); - } - cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1); - } -#endif - // Resolve method filling in dex cache - if (called->IsRuntimeMethod()) { - called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); - } - const void* code = NULL; - if (LIKELY(!thread->IsExceptionPending())) { - // Incompatible class change should have been handled in resolve method. - CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - // Ensure that the called method's class is initialized. - mirror::Class* called_class = called->GetDeclaringClass(); - linker->EnsureInitialized(called_class, true, true); - if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromCompiledCode(); - } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { - // Class is still initializing, go to oat and grab code (trampoline must be left in place - // until class is initialized to stop races between threads). - code = linker->GetOatCodeFor(called); - } else { - // No trampoline for non-static methods. - code = called->GetEntryPointFromCompiledCode(); - } - } else { - DCHECK(called_class->IsErroneous()); - } - } - if (UNLIKELY(code == NULL)) { - // Something went wrong in ResolveMethod or EnsureInitialized, - // go into deliver exception with the pending exception in r0 - CHECK(thread->IsExceptionPending()); - code = reinterpret_cast(art_quick_deliver_exception_from_code); - regs[0] = reinterpret_cast(thread->GetException(NULL)); - thread->ClearException(); - } else { - // Expect class to at least be initializing. - DCHECK(called->GetDeclaringClass()->IsInitializing()); - // Don't want infinite recursion. - DCHECK(code != GetResolutionTrampoline(linker)); - // Set up entry into main method - regs[0] = reinterpret_cast(called); - } - return code; -} - -// Called by the abstract method error stub. -extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self, - mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if !defined(ART_USE_PORTABLE_COMPILER) - FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll); -#else - UNUSED(sp); -#endif - ThrowAbstractMethodError(method); - self->QuickDeliverException(); -} - -} // namespace art diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc new file mode 100644 index 0000000000..9bf02e8c8e --- /dev/null +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -0,0 +1,558 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "callee_save_frame.h" +#include "dex_file-inl.h" +#include "dex_instruction-inl.h" +#include "interpreter/interpreter.h" +#include "invoke_arg_array_builder.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "object_utils.h" +#include "runtime.h" + +namespace art { + +// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. +class QuickArgumentVisitor { + public: +// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. +// Size of Runtime::kRefAndArgs callee save frame. +// Size of Method* and register parameters in out stack arguments. +#if defined(__arm__) + // The callee save frame is pointed to by SP. + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | LR | + // | ... | callee saves + // | R3 | arg3 + // | R2 | arg2 + // | R1 | arg1 + // | R0 | + // | Method* | <- sp +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 44 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 +#define QUICK_STACK_ARG_SKIP 16 +#elif defined(__mips__) + // The callee save frame is pointed to by SP. + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | RA | + // | ... | callee saves + // | A3 | arg3 + // | A2 | arg2 + // | A1 | arg1 + // | A0/Method* | <- sp +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 60 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 +#define QUICK_STACK_ARG_SKIP 16 +#elif defined(__i386__) + // The callee save frame is pointed to by SP. + // | argN | | + // | ... | | + // | arg4 | | + // | arg3 spill | | Caller's frame + // | arg2 spill | | + // | arg1 spill | | + // | Method* | --- + // | Return | + // | EBP,ESI,EDI | callee saves + // | EBX | arg3 + // | EDX | arg2 + // | ECX | arg1 + // | EAX/Method* | <- sp +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 28 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 +#define QUICK_STACK_ARG_SKIP 16 +#else +#error "Unsupported architecture" +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 0 +#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 +#define QUICK_STACK_ARG_SKIP 0 +#endif + + static mirror::AbstractMethod* GetCallingMethod(mirror::AbstractMethod** sp) { + byte* previous_sp = reinterpret_cast(sp) + + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE; + return *reinterpret_cast(previous_sp); + } + + static uintptr_t GetCallingPc(mirror::AbstractMethod** sp) { + byte* lr = reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET; + return *reinterpret_cast(lr); + } + + QuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, + const char* shorty, uint32_t shorty_len) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len), + args_in_regs_(ComputeArgsInRegs(is_static, shorty, shorty_len)), + num_params_((is_static ? 0 : 1) + shorty_len - 1), // +1 for this, -1 for return type + reg_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), + stack_args_(reinterpret_cast(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE + + QUICK_STACK_ARG_SKIP), + cur_args_(reg_args_), + cur_arg_index_(0), + param_index_(0), + is_split_long_or_double_(false) { + DCHECK_EQ(static_cast(QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE), + Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + } + + virtual ~QuickArgumentVisitor() {} + + virtual void Visit() = 0; + + Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t index = param_index_; + if (is_static_) { + index++; // 0th argument must skip return value at start of the shorty + } else if (index == 0) { + return Primitive::kPrimNot; + } + CHECK_LT(index, shorty_len_); + return Primitive::GetType(shorty_[index]); + } + + byte* GetParamAddress() const { + return cur_args_ + (cur_arg_index_ * kPointerSize); + } + + bool IsSplitLongOrDouble() const { + return is_split_long_or_double_; + } + + bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetParamPrimitiveType() == Primitive::kPrimNot; + } + + bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Primitive::Type type = GetParamPrimitiveType(); + return type == Primitive::kPrimLong || type == Primitive::kPrimDouble; + } + + uint64_t ReadSplitLongParam() const { + DCHECK(IsSplitLongOrDouble()); + uint64_t low_half = *reinterpret_cast(GetParamAddress()); + uint64_t high_half = *reinterpret_cast(stack_args_); + return (low_half & 0xffffffffULL) | (high_half << 32); + } + + void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { + is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble(); + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + cur_args_ = stack_args_; + cur_arg_index_ = is_split_long_or_double_ ? 1 : 0; + is_split_long_or_double_ = false; + while (param_index_ < num_params_) { + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + } + + private: + static size_t ComputeArgsInRegs(bool is_static, const char* shorty, uint32_t shorty_len) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + size_t args_in_regs = (is_static ? 0 : 1); + for (size_t i = 0; i < shorty_len; i++) { + char s = shorty[i]; + if (s == 'J' || s == 'D') { + args_in_regs += 2; + } else { + args_in_regs++; + } + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + return args_in_regs; + } + + const bool is_static_; + const char* const shorty_; + const uint32_t shorty_len_; + const size_t args_in_regs_; + const size_t num_params_; + byte* const reg_args_; + byte* const stack_args_; + byte* cur_args_; + size_t cur_arg_index_; + size_t param_index_; + // Does a 64bit parameter straddle the register and stack arguments? + bool is_split_long_or_double_; +}; + +// Visits arguments on the stack placing them into the shadow frame. +class BuildShadowFrameVisitor : public QuickArgumentVisitor { + public: + BuildShadowFrameVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty, + uint32_t shorty_len, ShadowFrame& sf, size_t first_arg_reg) : + QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + if (IsSplitLongOrDouble()) { + sf_.SetVRegLong(cur_reg_, ReadSplitLongParam()); + } else { + sf_.SetVRegLong(cur_reg_, *reinterpret_cast(GetParamAddress())); + } + ++cur_reg_; + break; + case Primitive::kPrimNot: + sf_.SetVRegReference(cur_reg_, *reinterpret_cast(GetParamAddress())); + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + sf_.SetVReg(cur_reg_, *reinterpret_cast(GetParamAddress())); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + break; + } + ++cur_reg_; + } + + private: + ShadowFrame& sf_; + size_t cur_reg_; + + DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor); +}; + +extern "C" uint64_t artQuickToInterpreterBridge(mirror::AbstractMethod* method, Thread* self, + mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in the shadow + // frame. + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs); + + if (method->IsAbstract()) { + ThrowAbstractMethodError(method); + return 0; + } else { + const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame"); + MethodHelper mh(method); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + uint16_t num_regs = code_item->registers_size_; + void* memory = alloca(ShadowFrame::ComputeSize(num_regs)); + ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL, // No last shadow coming from quick. + method, 0, memory)); + size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_; + BuildShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(), + mh.GetShortyLength(), + *shadow_frame, first_arg_reg); + shadow_frame_builder.VisitArguments(); + // Push a transition back into managed code onto the linked list in thread. + ManagedStack fragment; + self->PushManagedStackFragment(&fragment); + self->PushShadowFrame(shadow_frame); + self->EndAssertNoThreadSuspension(old_cause); + + if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) { + // Ensure static method's class is initialized. + if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), + true, true)) { + DCHECK(Thread::Current()->IsExceptionPending()); + self->PopManagedStackFragment(fragment); + return 0; + } + } + + JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame); + // Pop transition. + self->PopManagedStackFragment(fragment); + return result.GetJ(); + } +} + +// Visits arguments on the stack placing them into the args vector, Object* arguments are converted +// to jobjects. +class BuildQuickArgumentVisitor : public QuickArgumentVisitor { + public: + BuildQuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty, + uint32_t shorty_len, ScopedObjectAccessUnchecked* soa, + std::vector* args) : + QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jvalue val; + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimNot: { + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); + val.l = soa_->AddLocalReference(obj); + break; + } + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + if (IsSplitLongOrDouble()) { + val.j = ReadSplitLongParam(); + } else { + val.j = *reinterpret_cast(GetParamAddress()); + } + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + val.i = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + val.j = 0; + break; + } + args_->push_back(val); + } + + private: + ScopedObjectAccessUnchecked* soa_; + std::vector* args_; + + DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); +}; + +// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method +// which is responsible for recording callee save registers. We explicitly place into jobjects the +// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a +// field within the proxy object, which will box the primitive arguments and deal with error cases. +extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in jobjects. + const char* old_cause = + self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); + // Register the top of the managed stack, making stack crawlable. + DCHECK_EQ(*sp, proxy_method); + self->SetTopOfStack(sp, 0); + DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), + Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes()); + self->VerifyStack(); + // Start new JNI local reference state. + JNIEnvExt* env = self->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + // Create local ref. copies of proxy method and the receiver. + jobject rcvr_jobj = soa.AddLocalReference(receiver); + + // Placing arguments into args vector and remove the receiver. + MethodHelper proxy_mh(proxy_method); + std::vector args; + BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(), + proxy_mh.GetShortyLength(), &soa, &args); + local_ref_visitor.VisitArguments(); + args.erase(args.begin()); + + // Convert proxy method into expected interface method. + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + DCHECK(interface_method != NULL); + DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); + jobject interface_method_jobj = soa.AddLocalReference(interface_method); + + // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code + // that performs allocations. + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), + rcvr_jobj, interface_method_jobj, args); + return result.GetJ(); +} + +// Read object references held in arguments from quick frames and place in a JNI local references, +// so they don't get garbage collected. +class RememberFoGcArgumentVisitor : public QuickArgumentVisitor { + public: + RememberFoGcArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty, + uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) : + QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsParamAReference()) { + soa_->AddLocalReference(*reinterpret_cast(GetParamAddress())); + } + } + + private: + ScopedObjectAccessUnchecked* soa_; + + DISALLOW_COPY_AND_ASSIGN(RememberFoGcArgumentVisitor); +}; + +// Lazily resolve a method for quick. Called by stub code. +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + Thread* thread, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs); + // Start new JNI local reference state + JNIEnvExt* env = thread->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + const char* old_cause = thread->StartAssertNoThreadSuspension("Quick method resolution set up"); + + // Compute details about the called method (avoid GCs) + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + mirror::AbstractMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); + InvokeType invoke_type; + const DexFile* dex_file; + uint32_t dex_method_idx; + if (called->IsRuntimeMethod()) { + uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp)); + const DexFile::CodeItem* code; + { + MethodHelper mh(caller); + dex_file = &mh.GetDexFile(); + code = mh.GetCodeItem(); + } + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + bool is_range; + switch (instr_code) { + case Instruction::INVOKE_DIRECT: + invoke_type = kDirect; + is_range = false; + break; + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + is_range = true; + break; + case Instruction::INVOKE_STATIC: + invoke_type = kStatic; + is_range = false; + break; + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + is_range = true; + break; + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + is_range = false; + break; + case Instruction::INVOKE_SUPER_RANGE: + invoke_type = kSuper; + is_range = true; + break; + case Instruction::INVOKE_VIRTUAL: + invoke_type = kVirtual; + is_range = false; + break; + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + is_range = true; + break; + case Instruction::INVOKE_INTERFACE: + invoke_type = kInterface; + is_range = false; + break; + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + is_range = true; + break; + default: + LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); + // Avoid used uninitialized warnings. + invoke_type = kDirect; + is_range = false; + } + dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); + + } else { + invoke_type = kStatic; + dex_file = &MethodHelper(called).GetDexFile(); + dex_method_idx = called->GetDexMethodIndex(); + } + uint32_t shorty_len; + const char* shorty = + dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len); + RememberFoGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa); + visitor.VisitArguments(); + thread->EndAssertNoThreadSuspension(old_cause); + // Resolve method filling in dex cache. + if (called->IsRuntimeMethod()) { + called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + } + const void* code = NULL; + if (LIKELY(!thread->IsExceptionPending())) { + // Incompatible class change should have been handled in resolve method. + CHECK(!called->CheckIncompatibleClassChange(invoke_type)); + // Refine called method based on receiver. + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else if (invoke_type == kInterface) { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } + // Ensure that the called method's class is initialized. + mirror::Class* called_class = called->GetDeclaringClass(); + linker->EnsureInitialized(called_class, true, true); + if (LIKELY(called_class->IsInitialized())) { + code = called->GetEntryPointFromCompiledCode(); + } else if (called_class->IsInitializing()) { + if (invoke_type == kStatic) { + // Class is still initializing, go to oat and grab code (trampoline must be left in place + // until class is initialized to stop races between threads). + code = linker->GetOatCodeFor(called); + } else { + // No trampoline for non-static methods. + code = called->GetEntryPointFromCompiledCode(); + } + } else { + DCHECK(called_class->IsErroneous()); + } + } + CHECK_EQ(code == NULL, thread->IsExceptionPending()); +#ifdef MOVING_GARBAGE_COLLECTOR + // TODO: locally saved objects may have moved during a GC during resolution. Need to update the + // registers so that the stale objects aren't passed to the method we've resolved. + UNIMPLEMENTED(WARNING); +#endif + // Place called method in callee-save frame to be placed as first argument to quick method. + *sp = called; + return code; +} + +} // namespace art diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index c0b85f41fd..c3b66b3f79 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -60,7 +60,7 @@ bool Instrumentation::InstallStubsForClass(mirror::Class* klass) { const void* new_code; if (uninstall) { if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) { - new_code = GetInterpreterEntryPoint(); + new_code = GetCompiledCodeToInterpreterBridge(); } else if (is_initialized || !method->IsStatic() || method->IsConstructor()) { new_code = class_linker->GetOatCodeFor(method); } else { @@ -68,9 +68,9 @@ bool Instrumentation::InstallStubsForClass(mirror::Class* klass) { } } else { // !uninstall if (!interpreter_stubs_installed_ || method->IsNative()) { - new_code = GetInstrumentationEntryPoint(); + new_code = GetQuickInstrumentationEntryPoint(); } else { - new_code = GetInterpreterEntryPoint(); + new_code = GetCompiledCodeToInterpreterBridge(); } } method->SetEntryPointFromCompiledCode(new_code); @@ -82,15 +82,15 @@ bool Instrumentation::InstallStubsForClass(mirror::Class* klass) { const void* new_code; if (uninstall) { if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) { - new_code = GetInterpreterEntryPoint(); + new_code = GetCompiledCodeToInterpreterBridge(); } else { new_code = class_linker->GetOatCodeFor(method); } } else { // !uninstall if (!interpreter_stubs_installed_ || method->IsNative()) { - new_code = GetInstrumentationEntryPoint(); + new_code = GetQuickInstrumentationEntryPoint(); } else { - new_code = GetInterpreterEntryPoint(); + new_code = GetCompiledCodeToInterpreterBridge(); } } method->SetEntryPointFromCompiledCode(new_code); @@ -159,7 +159,7 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) LOG(INFO) << "Installing exit stubs in " << thread_name; } UniquePtr context(Context::Create()); - uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc(); + uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc(); InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc); visitor.WalkStack(true); @@ -251,7 +251,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg) std::deque* stack = thread->GetInstrumentationStack(); if (stack->size() > 0) { Instrumentation* instrumentation = reinterpret_cast(arg); - uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc(); + uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc(); RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation); visitor.WalkStack(true); CHECK_EQ(visitor.frames_removed_, stack->size()); @@ -384,9 +384,9 @@ void Instrumentation::UpdateMethodsCode(mirror::AbstractMethod* method, const vo method->SetEntryPointFromCompiledCode(code); } else { if (!interpreter_stubs_installed_ || method->IsNative()) { - method->SetEntryPointFromCompiledCode(GetInstrumentationEntryPoint()); + method->SetEntryPointFromCompiledCode(GetQuickInstrumentationEntryPoint()); } else { - method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint()); + method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge()); } } } @@ -396,8 +396,8 @@ const void* Instrumentation::GetQuickCodeFor(const mirror::AbstractMethod* metho if (LIKELY(!instrumentation_stubs_installed_)) { const void* code = method->GetEntryPointFromCompiledCode(); DCHECK(code != NULL); - if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()) && - code != GetInterpreterEntryPoint())) { + if (LIKELY(code != GetQuickResolutionTrampoline(runtime->GetClassLinker()) && + code != GetQuickToInterpreterBridge())) { return code; } } @@ -548,7 +548,7 @@ uint64_t Instrumentation::PopInstrumentationStackFrame(Thread* self, uintptr_t* << " result is " << std::hex << return_value.GetJ(); } self->SetDeoptimizationReturnValue(return_value); - return static_cast(GetDeoptimizationEntryPoint()) | + return static_cast(GetQuickDeoptimizationEntryPoint()) | (static_cast(*return_pc) << 32); } else { if (kVerboseInstrumentation) { diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index ef4b95c037..6e35d937fb 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -148,7 +148,7 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper& mh, } } else { // Not special, continue with regular interpreter execution. - artInterpreterToInterpreterEntry(self, mh, code_item, shadow_frame, result); + artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result); } } @@ -3039,6 +3039,10 @@ static JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* c static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, ShadowFrame& shadow_frame, JValue result_register) { + DCHECK(shadow_frame.GetMethod() == mh.GetMethod() || + shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass()); + DCHECK(!shadow_frame.GetMethod()->IsAbstract()); + DCHECK(!shadow_frame.GetMethod()->IsNative()); if (shadow_frame.GetMethod()->IsPreverified()) { // Enter the "without access check" interpreter. return ExecuteImpl(self, mh, code_item, shadow_frame, result_register); @@ -3150,8 +3154,7 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa } JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item, - ShadowFrame& shadow_frame) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ShadowFrame& shadow_frame) { DCHECK_EQ(self, Thread::Current()); if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) { ThrowStackOverflowError(self); @@ -3161,10 +3164,9 @@ JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::C return Execute(self, mh, code_item, shadow_frame, JValue()); } -void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result) { if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) { ThrowStackOverflowError(self); return; diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h index 17884b9a63..af4a1472ee 100644 --- a/runtime/interpreter/interpreter.h +++ b/runtime/interpreter/interpreter.h @@ -47,9 +47,9 @@ extern JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, ShadowFrame& shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result) +extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); } // namespace interpreter diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h index ad66ada329..fcac4811c6 100644 --- a/runtime/jni_internal.h +++ b/runtime/jni_internal.h @@ -144,6 +144,10 @@ struct JNIEnvExt : public JNIEnv { return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie)); } + static Offset SelfOffset() { + return Offset(OFFSETOF_MEMBER(JNIEnvExt, self)); + } + Thread* const self; JavaVMExt* vm; diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h index d235e3eed8..8fde99be3b 100644 --- a/runtime/mirror/abstract_method-inl.h +++ b/runtime/mirror/abstract_method-inl.h @@ -114,11 +114,11 @@ inline void AbstractMethod::AssertPcIsWithinCode(uintptr_t pc) const { if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) { return; } - if (pc == GetInstrumentationExitPc()) { + if (pc == GetQuickInstrumentationExitPc()) { return; } const void* code = GetEntryPointFromCompiledCode(); - if (code == GetInterpreterEntryPoint() || code == GetInstrumentationEntryPoint()) { + if (code == GetCompiledCodeToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) { return; } ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc index 4d7f99e076..93065e7f4e 100644 --- a/runtime/mirror/abstract_method.cc +++ b/runtime/mirror/abstract_method.cc @@ -321,6 +321,7 @@ bool AbstractMethod::IsRegistered() const { return native_method != jni_stub; } +extern "C" void art_work_around_app_jni_bugs(JNIEnv*, jobject); void AbstractMethod::RegisterNative(Thread* self, const void* native_method) { DCHECK(Thread::Current() == self); CHECK(IsNative()) << PrettyMethod(this); @@ -332,10 +333,10 @@ void AbstractMethod::RegisterNative(Thread* self, const void* native_method) { // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct // the native method to runtime support and store the target somewhere runtime support will // find it. -#if defined(__arm__) && !defined(ART_USE_PORTABLE_COMPILER) - SetNativeMethod(native_method); -#else +#if defined(__i386__) UNIMPLEMENTED(FATAL); +#else + SetNativeMethod(reinterpret_cast(art_work_around_app_jni_bugs)); #endif SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, gc_map_), reinterpret_cast(native_method), false); diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 4ef76076ad..47fe025412 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -144,8 +144,6 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVe if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) { Runtime* runtime = Runtime::Current(); JavaVMExt* vm = runtime->GetJavaVM(); - -#if !defined(ART_USE_PORTABLE_COMPILER) if (vm->check_jni) { LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds..."; Thread* self = static_cast(env)->self; @@ -158,11 +156,6 @@ static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVe << targetSdkVersion << "..."; vm->work_around_app_jni_bugs = true; -#else - UNUSED(env); - LOG(WARNING) << "LLVM does not work-around app jni bugs."; - vm->work_around_app_jni_bugs = false; -#endif } } diff --git a/runtime/oat.cc b/runtime/oat.cc index e606953ed5..c01f77c364 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -22,7 +22,7 @@ namespace art { const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' }; -const uint8_t OatHeader::kOatVersion[] = { '0', '0', '6', '\0' }; +const uint8_t OatHeader::kOatVersion[] = { '0', '0', '7', '\0' }; OatHeader::OatHeader() { memset(this, 0, sizeof(*this)); @@ -57,10 +57,13 @@ OatHeader::OatHeader(InstructionSet instruction_set, UpdateChecksum(image_file_location.data(), image_file_location_size_); executable_offset_ = 0; - interpreter_to_interpreter_entry_offset_ = 0; - interpreter_to_quick_entry_offset_ = 0; + interpreter_to_interpreter_bridge_offset_ = 0; + interpreter_to_compiled_code_bridge_offset_ = 0; + jni_dlsym_lookup_offset_ = 0; portable_resolution_trampoline_offset_ = 0; + portable_to_interpreter_bridge_offset_ = 0; quick_resolution_trampoline_offset_ = 0; + quick_to_interpreter_bridge_offset_ = 0; } bool OatHeader::IsValid() const { @@ -111,42 +114,61 @@ void OatHeader::SetExecutableOffset(uint32_t executable_offset) { UpdateChecksum(&executable_offset_, sizeof(executable_offset)); } -const void* OatHeader::GetInterpreterToInterpreterEntry() const { - return reinterpret_cast(this) + GetInterpreterToInterpreterEntryOffset(); +const void* OatHeader::GetInterpreterToInterpreterBridge() const { + return reinterpret_cast(this) + GetInterpreterToInterpreterBridgeOffset(); } -uint32_t OatHeader::GetInterpreterToInterpreterEntryOffset() const { +uint32_t OatHeader::GetInterpreterToInterpreterBridgeOffset() const { DCHECK(IsValid()); - CHECK_GE(interpreter_to_interpreter_entry_offset_, executable_offset_); - return interpreter_to_interpreter_entry_offset_; + CHECK_GE(interpreter_to_interpreter_bridge_offset_, executable_offset_); + return interpreter_to_interpreter_bridge_offset_; } -void OatHeader::SetInterpreterToInterpreterEntryOffset(uint32_t offset) { +void OatHeader::SetInterpreterToInterpreterBridgeOffset(uint32_t offset) { CHECK(offset == 0 || offset >= executable_offset_); DCHECK(IsValid()); - DCHECK_EQ(interpreter_to_interpreter_entry_offset_, 0U) << offset; + DCHECK_EQ(interpreter_to_interpreter_bridge_offset_, 0U) << offset; - interpreter_to_interpreter_entry_offset_ = offset; - UpdateChecksum(&interpreter_to_interpreter_entry_offset_, sizeof(offset)); + interpreter_to_interpreter_bridge_offset_ = offset; + UpdateChecksum(&interpreter_to_interpreter_bridge_offset_, sizeof(offset)); } -const void* OatHeader::GetInterpreterToQuickEntry() const { - return reinterpret_cast(this) + GetInterpreterToQuickEntryOffset(); +const void* OatHeader::GetInterpreterToCompiledCodeBridge() const { + return reinterpret_cast(this) + GetInterpreterToCompiledCodeBridgeOffset(); } -uint32_t OatHeader::GetInterpreterToQuickEntryOffset() const { +uint32_t OatHeader::GetInterpreterToCompiledCodeBridgeOffset() const { DCHECK(IsValid()); - CHECK_GE(interpreter_to_quick_entry_offset_, interpreter_to_interpreter_entry_offset_); - return interpreter_to_quick_entry_offset_; + CHECK_GE(interpreter_to_compiled_code_bridge_offset_, interpreter_to_interpreter_bridge_offset_); + return interpreter_to_compiled_code_bridge_offset_; } -void OatHeader::SetInterpreterToQuickEntryOffset(uint32_t offset) { - CHECK(offset == 0 || offset >= interpreter_to_interpreter_entry_offset_); +void OatHeader::SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset) { + CHECK(offset == 0 || offset >= interpreter_to_interpreter_bridge_offset_); DCHECK(IsValid()); - DCHECK_EQ(interpreter_to_quick_entry_offset_, 0U) << offset; + DCHECK_EQ(interpreter_to_compiled_code_bridge_offset_, 0U) << offset; - interpreter_to_quick_entry_offset_ = offset; - UpdateChecksum(&interpreter_to_quick_entry_offset_, sizeof(offset)); + interpreter_to_compiled_code_bridge_offset_ = offset; + UpdateChecksum(&interpreter_to_compiled_code_bridge_offset_, sizeof(offset)); +} + +const void* OatHeader::GetJniDlsymLookup() const { + return reinterpret_cast(this) + GetJniDlsymLookupOffset(); +} + +uint32_t OatHeader::GetJniDlsymLookupOffset() const { + DCHECK(IsValid()); + CHECK_GE(jni_dlsym_lookup_offset_, interpreter_to_compiled_code_bridge_offset_); + return jni_dlsym_lookup_offset_; +} + +void OatHeader::SetJniDlsymLookupOffset(uint32_t offset) { + CHECK(offset == 0 || offset >= interpreter_to_compiled_code_bridge_offset_); + DCHECK(IsValid()); + DCHECK_EQ(jni_dlsym_lookup_offset_, 0U) << offset; + + jni_dlsym_lookup_offset_ = offset; + UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(offset)); } const void* OatHeader::GetPortableResolutionTrampoline() const { @@ -155,12 +177,12 @@ const void* OatHeader::GetPortableResolutionTrampoline() const { uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const { DCHECK(IsValid()); - CHECK_GE(portable_resolution_trampoline_offset_, interpreter_to_quick_entry_offset_); + CHECK_GE(portable_resolution_trampoline_offset_, jni_dlsym_lookup_offset_); return portable_resolution_trampoline_offset_; } void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) { - CHECK(offset == 0 || offset >= interpreter_to_quick_entry_offset_); + CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_); DCHECK(IsValid()); DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset; @@ -168,18 +190,37 @@ void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) { UpdateChecksum(&portable_resolution_trampoline_offset_, sizeof(offset)); } +const void* OatHeader::GetPortableToInterpreterBridge() const { + return reinterpret_cast(this) + GetPortableToInterpreterBridgeOffset(); +} + +uint32_t OatHeader::GetPortableToInterpreterBridgeOffset() const { + DCHECK(IsValid()); + CHECK_GE(portable_to_interpreter_bridge_offset_, portable_resolution_trampoline_offset_); + return portable_to_interpreter_bridge_offset_; +} + +void OatHeader::SetPortableToInterpreterBridgeOffset(uint32_t offset) { + CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_); + DCHECK(IsValid()); + DCHECK_EQ(portable_to_interpreter_bridge_offset_, 0U) << offset; + + portable_to_interpreter_bridge_offset_ = offset; + UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset)); +} + const void* OatHeader::GetQuickResolutionTrampoline() const { return reinterpret_cast(this) + GetQuickResolutionTrampolineOffset(); } uint32_t OatHeader::GetQuickResolutionTrampolineOffset() const { DCHECK(IsValid()); - CHECK_GE(quick_resolution_trampoline_offset_, portable_resolution_trampoline_offset_); + CHECK_GE(quick_resolution_trampoline_offset_, portable_to_interpreter_bridge_offset_); return quick_resolution_trampoline_offset_; } void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) { - CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_); + CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_); DCHECK(IsValid()); DCHECK_EQ(quick_resolution_trampoline_offset_, 0U) << offset; @@ -187,6 +228,25 @@ void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) { UpdateChecksum(&quick_resolution_trampoline_offset_, sizeof(offset)); } +const void* OatHeader::GetQuickToInterpreterBridge() const { + return reinterpret_cast(this) + GetQuickToInterpreterBridgeOffset(); +} + +uint32_t OatHeader::GetQuickToInterpreterBridgeOffset() const { + DCHECK(IsValid()); + CHECK_GE(quick_to_interpreter_bridge_offset_, quick_resolution_trampoline_offset_); + return quick_to_interpreter_bridge_offset_; +} + +void OatHeader::SetQuickToInterpreterBridgeOffset(uint32_t offset) { + CHECK(offset == 0 || offset >= quick_resolution_trampoline_offset_); + DCHECK(IsValid()); + DCHECK_EQ(quick_to_interpreter_bridge_offset_, 0U) << offset; + + quick_to_interpreter_bridge_offset_ = offset; + UpdateChecksum(&quick_to_interpreter_bridge_offset_, sizeof(offset)); +} + uint32_t OatHeader::GetImageFileLocationOatChecksum() const { CHECK(IsValid()); return image_file_location_oat_checksum_; diff --git a/runtime/oat.h b/runtime/oat.h index 4bd1871a71..a5c6bed5fc 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -44,18 +44,32 @@ class PACKED(4) OatHeader { } uint32_t GetExecutableOffset() const; void SetExecutableOffset(uint32_t executable_offset); - const void* GetInterpreterToInterpreterEntry() const; - uint32_t GetInterpreterToInterpreterEntryOffset() const; - void SetInterpreterToInterpreterEntryOffset(uint32_t offset); - const void* GetInterpreterToQuickEntry() const; - uint32_t GetInterpreterToQuickEntryOffset() const; - void SetInterpreterToQuickEntryOffset(uint32_t offset); + + const void* GetInterpreterToInterpreterBridge() const; + uint32_t GetInterpreterToInterpreterBridgeOffset() const; + void SetInterpreterToInterpreterBridgeOffset(uint32_t offset); + const void* GetInterpreterToCompiledCodeBridge() const; + uint32_t GetInterpreterToCompiledCodeBridgeOffset() const; + void SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset); + + const void* GetJniDlsymLookup() const; + uint32_t GetJniDlsymLookupOffset() const; + void SetJniDlsymLookupOffset(uint32_t offset); + const void* GetPortableResolutionTrampoline() const; uint32_t GetPortableResolutionTrampolineOffset() const; void SetPortableResolutionTrampolineOffset(uint32_t offset); + const void* GetPortableToInterpreterBridge() const; + uint32_t GetPortableToInterpreterBridgeOffset() const; + void SetPortableToInterpreterBridgeOffset(uint32_t offset); + const void* GetQuickResolutionTrampoline() const; uint32_t GetQuickResolutionTrampolineOffset() const; void SetQuickResolutionTrampolineOffset(uint32_t offset); + const void* GetQuickToInterpreterBridge() const; + uint32_t GetQuickToInterpreterBridgeOffset() const; + void SetQuickToInterpreterBridgeOffset(uint32_t offset); + InstructionSet GetInstructionSet() const; uint32_t GetImageFileLocationOatChecksum() const; uint32_t GetImageFileLocationOatDataBegin() const; @@ -74,10 +88,13 @@ class PACKED(4) OatHeader { InstructionSet instruction_set_; uint32_t dex_file_count_; uint32_t executable_offset_; - uint32_t interpreter_to_interpreter_entry_offset_; - uint32_t interpreter_to_quick_entry_offset_; + uint32_t interpreter_to_interpreter_bridge_offset_; + uint32_t interpreter_to_compiled_code_bridge_offset_; + uint32_t jni_dlsym_lookup_offset_; uint32_t portable_resolution_trampoline_offset_; + uint32_t portable_to_interpreter_bridge_offset_; uint32_t quick_resolution_trampoline_offset_; + uint32_t quick_to_interpreter_bridge_offset_; uint32_t image_file_location_oat_checksum_; uint32_t image_file_location_oat_data_begin_; diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc index 5d0dca9e4c..68595c896d 100644 --- a/runtime/oat_test.cc +++ b/runtime/oat_test.cc @@ -141,7 +141,7 @@ TEST_F(OatTest, WriteRead) { TEST_F(OatTest, OatHeaderSizeCheck) { // If this test is failing and you have to update these constants, // it is time to update OatHeader::kOatVersion - EXPECT_EQ(52U, sizeof(OatHeader)); + EXPECT_EQ(64U, sizeof(OatHeader)); EXPECT_EQ(28U, sizeof(OatMethodOffsets)); } diff --git a/runtime/object_utils.h b/runtime/object_utils.h index fa7763e11f..3639a80e77 100644 --- a/runtime/object_utils.h +++ b/runtime/object_utils.h @@ -411,6 +411,10 @@ class MethodHelper { shorty_ = NULL; } + const mirror::AbstractMethod* GetMethod() const { + return method_; + } + const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { const DexFile& dex_file = GetDexFile(); uint32_t dex_method_idx = method_->GetDexMethodIndex(); diff --git a/runtime/stack.cc b/runtime/stack.cc index aeb15f09bd..d49d6bae95 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -304,7 +304,7 @@ void StackVisitor::WalkStack(bool include_transitions) { if (UNLIKELY(exit_stubs_installed)) { // While profiling, the return pc is restored from the side stack, except when walking // the stack for an exception where the side stack will be unwound in VisitFrame. - if (GetInstrumentationExitPc() == return_pc) { + if (GetQuickInstrumentationExitPc() == return_pc) { instrumentation::InstrumentationStackFrame instrumentation_frame = GetInstrumentationStackFrame(instrumentation_stack_depth); instrumentation_stack_depth++; diff --git a/runtime/thread.cc b/runtime/thread.cc index 97a1410892..c79caa21f8 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -86,23 +86,25 @@ static void UnimplementedEntryPoint() { } #endif -void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints); +void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, + PortableEntryPoints* ppoints, QuickEntryPoints* qpoints); -void Thread::InitFunctionPointers() { +void Thread::InitTlsEntryPoints() { #if !defined(__APPLE__) // The Mac GCC is too old to accept this code. // Insert a placeholder so we can easily tell if we call an unimplemented entry point. - uintptr_t* begin = reinterpret_cast(&quick_entrypoints_); + uintptr_t* begin = reinterpret_cast(&interpreter_entrypoints_); uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(quick_entrypoints_)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } - begin = reinterpret_cast(&portable_entrypoints_); + begin = reinterpret_cast(&interpreter_entrypoints_); end = reinterpret_cast(reinterpret_cast(begin) + sizeof(portable_entrypoints_)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } #endif - InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_); + InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_, + &quick_entrypoints_); } void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { @@ -292,7 +294,7 @@ void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) { CHECK(Thread::Current() == NULL); SetUpAlternateSignalStack(); InitCpu(); - InitFunctionPointers(); + InitTlsEntryPoints(); InitCardTable(); InitTid(); // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this @@ -1589,22 +1591,29 @@ struct EntryPointInfo { uint32_t offset; const char* name; }; -#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x } -#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x } +#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } +#define JNI_ENTRY_POINT_INFO(x) { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } +#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } +#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } static const EntryPointInfo gThreadEntryPointInfo[] = { - QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode), - QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), - QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode), - QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), - QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), - QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), - QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), - QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode), - QUICK_ENTRY_POINT_INFO(pCheckCastFromCode), + INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge), + INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge), + JNI_ENTRY_POINT_INFO(pDlsymLookup), + PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline), + PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge), + QUICK_ENTRY_POINT_INFO(pAllocArray), + QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pAllocObject), + QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial), + QUICK_ENTRY_POINT_INFO(pCanPutArrayElement), + QUICK_ENTRY_POINT_INFO(pCheckCast), QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage), - QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), - QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode), - QUICK_ENTRY_POINT_INFO(pResolveStringFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess), + QUICK_ENTRY_POINT_INFO(pInitializeType), + QUICK_ENTRY_POINT_INFO(pResolveString), QUICK_ENTRY_POINT_INFO(pSet32Instance), QUICK_ENTRY_POINT_INFO(pSet32Static), QUICK_ENTRY_POINT_INFO(pSet64Instance), @@ -1617,15 +1626,15 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { QUICK_ENTRY_POINT_INFO(pGet64Static), QUICK_ENTRY_POINT_INFO(pGetObjInstance), QUICK_ENTRY_POINT_INFO(pGetObjStatic), - QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), + QUICK_ENTRY_POINT_INFO(pHandleFillArrayData), QUICK_ENTRY_POINT_INFO(pJniMethodStart), QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized), QUICK_ENTRY_POINT_INFO(pJniMethodEnd), QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized), QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference), QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), - QUICK_ENTRY_POINT_INFO(pLockObjectFromCode), - QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pLockObject), + QUICK_ENTRY_POINT_INFO(pUnlockObject), QUICK_ENTRY_POINT_INFO(pCmpgDouble), QUICK_ENTRY_POINT_INFO(pCmpgFloat), QUICK_ENTRY_POINT_INFO(pCmplDouble), @@ -1646,28 +1655,26 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { QUICK_ENTRY_POINT_INFO(pShlLong), QUICK_ENTRY_POINT_INFO(pShrLong), QUICK_ENTRY_POINT_INFO(pUshrLong), - QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), - QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry), QUICK_ENTRY_POINT_INFO(pIndexOf), QUICK_ENTRY_POINT_INFO(pMemcmp16), QUICK_ENTRY_POINT_INFO(pStringCompareTo), QUICK_ENTRY_POINT_INFO(pMemcpy), - QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), + QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline), + QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge), QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), - QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode), - QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pCheckSuspend), + QUICK_ENTRY_POINT_INFO(pTestSuspend), QUICK_ENTRY_POINT_INFO(pDeliverException), - QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), - QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode), - QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), - QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode), - QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode), - PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), + QUICK_ENTRY_POINT_INFO(pThrowArrayBounds), + QUICK_ENTRY_POINT_INFO(pThrowDivZero), + QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod), + QUICK_ENTRY_POINT_INFO(pThrowNullPointer), + QUICK_ENTRY_POINT_INFO(pThrowStackOverflow), }; #undef QUICK_ENTRY_POINT_INFO @@ -1695,8 +1702,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ size_t entry_point_count = arraysize(gThreadEntryPointInfo); CHECK_EQ(entry_point_count * size_of_pointers, - sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints)); - uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_); + sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) + + sizeof(QuickEntryPoints)); + uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_); for (size_t i = 0; i < entry_point_count; ++i) { CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; expected_offset += size_of_pointers; @@ -1739,7 +1747,7 @@ class CatchBlockStackVisitor : public StackVisitor { return false; // End stack walk. } else { if (UNLIKELY(method_tracing_active_ && - GetInstrumentationExitPc() == GetReturnPc())) { + GetQuickInstrumentationExitPc() == GetReturnPc())) { // Keep count of the number of unwinds during instrumentation. instrumentation_frames_to_pop_++; } diff --git a/runtime/thread.h b/runtime/thread.h index ff0fe228c0..8b6771e60c 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -26,6 +26,8 @@ #include #include "base/macros.h" +#include "entrypoints/interpreter/interpreter_entrypoints.h" +#include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/portable/portable_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "globals.h" @@ -43,17 +45,17 @@ namespace art { namespace mirror { -class AbstractMethod; -class Array; -class Class; -class ClassLoader; -class Object; -template class ObjectArray; -template class PrimitiveArray; -typedef PrimitiveArray IntArray; -class StackTraceElement; -class StaticStorageBase; -class Throwable; + class AbstractMethod; + class Array; + class Class; + class ClassLoader; + class Object; + template class ObjectArray; + template class PrimitiveArray; + typedef PrimitiveArray IntArray; + class StackTraceElement; + class StaticStorageBase; + class Throwable; } // namespace mirror class BaseMutex; class ClassLinker; @@ -614,7 +616,7 @@ class PACKED(4) Thread { void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); void InitCardTable(); void InitCpu(); - void InitFunctionPointers(); + void InitTlsEntryPoints(); void InitTid(); void InitPthreadKeySelf(); void InitStackHwm(); @@ -776,8 +778,10 @@ class PACKED(4) Thread { public: // Entrypoint function pointers // TODO: move this near the top, since changing its offset requires all oats to be recompiled! - QuickEntryPoints quick_entrypoints_; + InterpreterEntryPoints interpreter_entrypoints_; + JniEntryPoints jni_entrypoints_; PortableEntryPoints portable_entrypoints_; + QuickEntryPoints quick_entrypoints_; private: // How many times has our pthread key's destructor been called? -- cgit v1.2.3-59-g8ed1b