From 166db04e259ca51838c311891598664deeed85ad Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 26 Jul 2013 12:05:57 -0700 Subject: Move assembler out of runtime into compiler/utils. Other directory layout bits of clean up. There is still work to separate quick and portable in some files (e.g. argument visitor, proxy..). Change-Id: If8fecffda8ba5c4c47a035f0c622c538c6b58351 --- compiler/utils/mips/assembler_mips.cc | 999 ++++++++++++++++++++++++++++++++++ 1 file changed, 999 insertions(+) create mode 100644 compiler/utils/mips/assembler_mips.cc (limited to 'compiler/utils/mips/assembler_mips.cc') diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc new file mode 100644 index 0000000000..58815da1b8 --- /dev/null +++ b/compiler/utils/mips/assembler_mips.cc @@ -0,0 +1,999 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "assembler_mips.h" + +#include "base/casts.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "memory_region.h" +#include "thread.h" + +namespace art { +namespace mips { +#if 0 +class DirectCallRelocation : public AssemblerFixup { + public: + void Process(const MemoryRegion& region, int position) { + // Direct calls are relative to the following instruction on mips. + int32_t pointer = region.Load(position); + int32_t start = reinterpret_cast(region.start()); + int32_t delta = start + position + sizeof(int32_t); + region.Store(position, pointer - delta); + } +}; +#endif + +std::ostream& operator<<(std::ostream& os, const DRegister& rhs) { + if (rhs >= D0 && rhs < kNumberOfDRegisters) { + os << "d" << static_cast(rhs); + } else { + os << "DRegister[" << static_cast(rhs) << "]"; + } + return os; +} + +void MipsAssembler::Emit(int32_t value) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + buffer_.Emit(value); +} + +void MipsAssembler::EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + CHECK_NE(rd, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + static_cast(rd) << kRdShift | + shamt << kShamtShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) { + CHECK_NE(rs, kNoRegister); + CHECK_NE(rt, kNoRegister); + int32_t encoding = opcode << kOpcodeShift | + static_cast(rs) << kRsShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitJ(int opcode, int address) { + int32_t encoding = opcode << kOpcodeShift | + address; + Emit(encoding); +} + +void MipsAssembler::EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct) { + CHECK_NE(ft, kNoFRegister); + CHECK_NE(fs, kNoFRegister); + CHECK_NE(fd, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(ft) << kFtShift | + static_cast(fs) << kFsShift | + static_cast(fd) << kFdShift | + funct; + Emit(encoding); +} + +void MipsAssembler::EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm) { + CHECK_NE(rt, kNoFRegister); + int32_t encoding = opcode << kOpcodeShift | + fmt << kFmtShift | + static_cast(rt) << kRtShift | + imm; + Emit(encoding); +} + +void MipsAssembler::EmitBranch(Register rt, Register rs, Label* label, bool equal) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the branch instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (equal) { + Beq(rt, rs, (offset >> 2) & kBranchOffsetMask); + } else { + Bne(rt, rs, (offset >> 2) & kBranchOffsetMask); + } +} + +void MipsAssembler::EmitJump(Label* label, bool link) { + int offset; + if (label->IsBound()) { + offset = label->Position() - buffer_.Size(); + } else { + // Use the offset field of the jump instruction for linking the sites. + offset = label->position_; + label->LinkTo(buffer_.Size()); + } + if (link) { + Jal((offset >> 2) & kJumpOffsetMask); + } else { + J((offset >> 2) & kJumpOffsetMask); + } +} + +int32_t MipsAssembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) { + CHECK_ALIGNED(offset, 4); + CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset)) << offset; + + // Properly preserve only the bits supported in the instruction. + offset >>= 2; + if (is_jump) { + offset &= kJumpOffsetMask; + return (inst & ~kJumpOffsetMask) | offset; + } else { + offset &= kBranchOffsetMask; + return (inst & ~kBranchOffsetMask) | offset; + } +} + +int MipsAssembler::DecodeBranchOffset(int32_t inst, bool is_jump) { + // Sign-extend, then left-shift by 2. + if (is_jump) { + return (((inst & kJumpOffsetMask) << 6) >> 4); + } else { + return (((inst & kBranchOffsetMask) << 16) >> 14); + } +} + +void MipsAssembler::Bind(Label* label, bool is_jump) { + CHECK(!label->IsBound()); + int bound_pc = buffer_.Size(); + while (label->IsLinked()) { + int32_t position = label->Position(); + int32_t next = buffer_.Load(position); + int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4; + int32_t encoded = MipsAssembler::EncodeBranchOffset(offset, next, is_jump); + buffer_.Store(position, encoded); + label->position_ = MipsAssembler::DecodeBranchOffset(next, is_jump); + } + label->BindTo(bound_pc); +} + +void MipsAssembler::Add(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x20); +} + +void MipsAssembler::Addu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x21); +} + +void MipsAssembler::Addi(Register rt, Register rs, uint16_t imm16) { + EmitI(0x8, rs, rt, imm16); +} + +void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x9, rs, rt, imm16); +} + +void MipsAssembler::Sub(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x22); +} + +void MipsAssembler::Subu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x23); +} + +void MipsAssembler::Mult(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x18); +} + +void MipsAssembler::Multu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x19); +} + +void MipsAssembler::Div(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1a); +} + +void MipsAssembler::Divu(Register rs, Register rt) { + EmitR(0, rs, rt, static_cast(0), 0, 0x1b); +} + +void MipsAssembler::And(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x24); +} + +void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) { + EmitI(0xc, rs, rt, imm16); +} + +void MipsAssembler::Or(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x25); +} + +void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xd, rs, rt, imm16); +} + +void MipsAssembler::Xor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x26); +} + +void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) { + EmitI(0xe, rs, rt, imm16); +} + +void MipsAssembler::Nor(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x27); +} + +void MipsAssembler::Sll(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x00); +} + +void MipsAssembler::Srl(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x02); +} + +void MipsAssembler::Sra(Register rd, Register rs, int shamt) { + EmitR(0, rs, static_cast(0), rd, shamt, 0x03); +} + +void MipsAssembler::Sllv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x04); +} + +void MipsAssembler::Srlv(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x06); +} + +void MipsAssembler::Srav(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x07); +} + +void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x20, rs, rt, imm16); +} + +void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x21, rs, rt, imm16); +} + +void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x23, rs, rt, imm16); +} + +void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x24, rs, rt, imm16); +} + +void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) { + EmitI(0x25, rs, rt, imm16); +} + +void MipsAssembler::Lui(Register rt, uint16_t imm16) { + EmitI(0xf, static_cast(0), rt, imm16); +} + +void MipsAssembler::Mfhi(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x10); +} + +void MipsAssembler::Mflo(Register rd) { + EmitR(0, static_cast(0), static_cast(0), rd, 0, 0x12); +} + +void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) { + EmitI(0x28, rs, rt, imm16); +} + +void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) { + EmitI(0x29, rs, rt, imm16); +} + +void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) { + EmitI(0x2b, rs, rt, imm16); +} + +void MipsAssembler::Slt(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2a); +} + +void MipsAssembler::Sltu(Register rd, Register rs, Register rt) { + EmitR(0, rs, rt, rd, 0, 0x2b); +} + +void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) { + EmitI(0xa, rs, rt, imm16); +} + +void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) { + EmitI(0xb, rs, rt, imm16); +} + +void MipsAssembler::Beq(Register rt, Register rs, uint16_t imm16) { + EmitI(0x4, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::Bne(Register rt, Register rs, uint16_t imm16) { + EmitI(0x5, rs, rt, imm16); + Nop(); +} + +void MipsAssembler::J(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jal(uint32_t address) { + EmitJ(0x2, address); + Nop(); +} + +void MipsAssembler::Jr(Register rs) { + EmitR(0, rs, static_cast(0), static_cast(0), 0, 0x08); + Nop(); +} + +void MipsAssembler::Jalr(Register rs) { + EmitR(0, rs, static_cast(0), RA, 0, 0x09); + Nop(); +} + +void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x0); +} + +void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x1); +} + +void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x2); +} + +void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) { + EmitFR(0x11, 0x10, ft, fs, fd, 0x3); +} + +void MipsAssembler::AddD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x0); +} + +void MipsAssembler::SubD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x1); +} + +void MipsAssembler::MulD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x2); +} + +void MipsAssembler::DivD(DRegister fd, DRegister fs, DRegister ft) { + EmitFR(0x11, 0x11, static_cast(ft), static_cast(fs), + static_cast(fd), 0x3); +} + +void MipsAssembler::MovS(FRegister fd, FRegister fs) { + EmitFR(0x11, 0x10, static_cast(0), fs, fd, 0x6); +} + +void MipsAssembler::MovD(DRegister fd, DRegister fs) { + EmitFR(0x11, 0x11, static_cast(0), static_cast(fs), + static_cast(fd), 0x6); +} + +void MipsAssembler::Mfc1(Register rt, FRegister fs) { + EmitFR(0x11, 0x00, static_cast(rt), fs, static_cast(0), 0x0); +} + +void MipsAssembler::Mtc1(FRegister ft, Register rs) { + EmitFR(0x11, 0x04, ft, static_cast(rs), static_cast(0), 0x0); +} + +void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x31, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Ldc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x35, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) { + EmitI(0x39, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Sdc1(DRegister ft, Register rs, uint16_t imm16) { + EmitI(0x3d, rs, static_cast(ft), imm16); +} + +void MipsAssembler::Break() { + EmitR(0, static_cast(0), static_cast(0), + static_cast(0), 0, 0xD); +} + +void MipsAssembler::Nop() { + EmitR(0x0, static_cast(0), static_cast(0), static_cast(0), 0, 0x0); +} + +void MipsAssembler::Move(Register rt, Register rs) { + EmitI(0x8, rs, rt, 0); +} + +void MipsAssembler::Clear(Register rt) { + EmitR(0, static_cast(0), static_cast(0), rt, 0, 0x20); +} + +void MipsAssembler::Not(Register rt, Register rs) { + EmitR(0, static_cast(0), rs, rt, 0, 0x27); +} + +void MipsAssembler::Mul(Register rd, Register rs, Register rt) { + Mult(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Div(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mflo(rd); +} + +void MipsAssembler::Rem(Register rd, Register rs, Register rt) { + Div(rs, rt); + Mfhi(rd); +} + +void MipsAssembler::AddConstant(Register rt, Register rs, int32_t value) { + Addi(rt, rs, value); +} + +void MipsAssembler::LoadImmediate(Register rt, int32_t value) { + Addi(rt, ZERO, value); +} + +void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, + size_t size) { + MipsManagedRegister dst = m_dst.AsMips(); + if (dst.IsNoRegister()) { + CHECK_EQ(0u, size) << dst; + } else if (dst.IsCoreRegister()) { + CHECK_EQ(4u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset); + } else if (dst.IsRegisterPair()) { + CHECK_EQ(8u, size) << dst; + LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset); + LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4); + } else if (dst.IsFRegister()) { + LoadSFromOffset(dst.AsFRegister(), src_register, src_offset); + } else { + CHECK(dst.IsDRegister()) << dst; + LoadDFromOffset(dst.AsDRegister(), src_register, src_offset); + } +} + +void MipsAssembler::LoadFromOffset(LoadOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kLoadSignedByte: + Lb(reg, base, offset); + break; + case kLoadUnsignedByte: + Lbu(reg, base, offset); + break; + case kLoadSignedHalfword: + Lh(reg, base, offset); + break; + case kLoadUnsignedHalfword: + Lhu(reg, base, offset); + break; + case kLoadWord: + Lw(reg, base, offset); + break; + case kLoadWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) { + Lwc1(reg, base, offset); +} + +void MipsAssembler::LoadDFromOffset(DRegister reg, Register base, int32_t offset) { + Ldc1(reg, base, offset); +} + +void MipsAssembler::StoreToOffset(StoreOperandType type, Register reg, Register base, + int32_t offset) { + switch (type) { + case kStoreByte: + Sb(reg, base, offset); + break; + case kStoreHalfword: + Sh(reg, base, offset); + break; + case kStoreWord: + Sw(reg, base, offset); + break; + case kStoreWordPair: + LOG(FATAL) << "UNREACHABLE"; + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } +} + +void MipsAssembler::StoreFToOffset(FRegister reg, Register base, int32_t offset) { + Swc1(reg, base, offset); +} + +void MipsAssembler::StoreDToOffset(DRegister reg, Register base, int32_t offset) { + Sdc1(reg, base, offset); +} + +void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg, + const std::vector& callee_save_regs, + const std::vector& entry_spills) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Increase frame to required size. + IncreaseFrameSize(frame_size); + + // Push callee saves and return address + int stack_offset = frame_size - kPointerSize; + StoreToOffset(kStoreWord, RA, SP, stack_offset); + for (int i = callee_save_regs.size() - 1; i >= 0; --i) { + stack_offset -= kPointerSize; + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, stack_offset); + } + + // Write out Method*. + StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0); + + // Write out entry spills. + for (size_t i = 0; i < entry_spills.size(); ++i) { + Register reg = entry_spills.at(i).AsMips().AsCoreRegister(); + StoreToOffset(kStoreWord, reg, SP, frame_size + kPointerSize + (i * kPointerSize)); + } +} + +void MipsAssembler::RemoveFrame(size_t frame_size, + const std::vector& callee_save_regs) { + CHECK_ALIGNED(frame_size, kStackAlignment); + + // Pop callee saves and return address + int stack_offset = frame_size - (callee_save_regs.size() * kPointerSize) - kPointerSize; + for (size_t i = 0; i < callee_save_regs.size(); ++i) { + Register reg = callee_save_regs.at(i).AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, reg, SP, stack_offset); + stack_offset += kPointerSize; + } + LoadFromOffset(kLoadWord, RA, SP, stack_offset); + + // Decrease frame to required size. + DecreaseFrameSize(frame_size); + + // Then jump to the return address. + Jr(RA); +} + +void MipsAssembler::IncreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, -adjust); +} + +void MipsAssembler::DecreaseFrameSize(size_t adjust) { + CHECK_ALIGNED(adjust, kStackAlignment); + AddConstant(SP, SP, adjust); +} + +void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) { + MipsManagedRegister src = msrc.AsMips(); + if (src.IsNoRegister()) { + CHECK_EQ(0u, size); + } else if (src.IsCoreRegister()) { + CHECK_EQ(4u, size); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + } else if (src.IsRegisterPair()) { + CHECK_EQ(8u, size); + StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value()); + StoreToOffset(kStoreWord, src.AsRegisterPairHigh(), + SP, dest.Int32Value() + 4); + } else if (src.IsFRegister()) { + StoreFToOffset(src.AsFRegister(), SP, dest.Int32Value()); + } else { + CHECK(src.IsDRegister()); + StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value()); + } +} + +void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) { + MipsManagedRegister src = msrc.AsMips(); + CHECK(src.IsCoreRegister()); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadImmediate(scratch.AsCoreRegister(), imm); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), S1, dest.Int32Value()); +} + +void MipsAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) { + StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value()); +} + +void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc, + FrameOffset in_off, ManagedRegister mscratch) { + MipsManagedRegister src = msrc.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); +} + +void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) { + return EmitLoad(mdest, SP, src.Int32Value(), size); +} + +void MipsAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) { + return EmitLoad(mdest, S1, src.Int32Value(), size); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value()); +} + +void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, + MemberOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, + Offset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister() && dest.IsCoreRegister()) << dest; + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), + base.AsMips().AsCoreRegister(), offs.Int32Value()); +} + +void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, + ThreadOffset offs) { + MipsManagedRegister dest = mdest.AsMips(); + CHECK(dest.IsCoreRegister()); + LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value()); +} + +void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips"; +} + +void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips"; +} + +void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) { + MipsManagedRegister dest = mdest.AsMips(); + MipsManagedRegister src = msrc.AsMips(); + if (!dest.Equals(src)) { + if (dest.IsCoreRegister()) { + CHECK(src.IsCoreRegister()) << src; + Move(dest.AsCoreRegister(), src.AsCoreRegister()); + } else if (dest.IsFRegister()) { + CHECK(src.IsFRegister()) << src; + MovS(dest.AsFRegister(), src.AsFRegister()); + } else if (dest.IsDRegister()) { + CHECK(src.IsDRegister()) << src; + MovD(dest.AsDRegister(), src.AsDRegister()); + } else { + CHECK(dest.IsRegisterPair()) << dest; + CHECK(src.IsRegisterPair()) << src; + // Ensure that the first move doesn't clobber the input of the second + if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) { + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + } else { + Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh()); + Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow()); + } + } + } +} + +void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); +} + +void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs, + ThreadOffset thr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); +} + +void MipsAssembler::CopyRawPtrToThread(ThreadOffset thr_offs, + FrameOffset fr_offs, + ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, fr_offs.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), + S1, thr_offs.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, + ManagedRegister mscratch, size_t size) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + CHECK(size == 4 || size == 8) << size; + if (size == 4) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + } else if (size == 8) { + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4); + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4); + } +} + +void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value()); +} + +void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, + ManagedRegister mscratch, size_t size) { + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + movl(scratch, Address(ESP, src_base)); + movl(scratch, Address(scratch, src_offset)); + movl(Address(ESP, dest), scratch); +#endif +} + +void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset, + ManagedRegister src, Offset src_offset, + ManagedRegister mscratch, size_t size) { + CHECK_EQ(size, 4u); + Register scratch = mscratch.AsMips().AsCoreRegister(); + LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value()); + StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value()); +} + +void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/, + ManagedRegister /*mscratch*/, size_t /*size*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + Register scratch = mscratch.AsMips().AsCoreRegister(); + CHECK_EQ(size, 4u); + CHECK_EQ(dest.Int32Value(), src.Int32Value()); + movl(scratch, Address(ESP, src)); + pushl(Address(scratch, src_offset)); + popl(Address(scratch, dest_offset)); +#endif +} + +void MipsAssembler::MemoryBarrier(ManagedRegister) { + UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED"; +#if 0 +#if ANDROID_SMP != 0 + mfence(); +#endif +#endif +} + +void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg, + FrameOffset sirt_offset, + ManagedRegister min_reg, bool null_allowed) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg; + CHECK(out_reg.IsCoreRegister()) << out_reg; + if (null_allowed) { + Label null_arg; + // Null values get a SIRT entry value of 0. Otherwise, the SIRT entry is + // the address in the SIRT holding the reference. + // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset) + if (in_reg.IsNoRegister()) { + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + SP, sirt_offset.Int32Value()); + in_reg = out_reg; + } + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } +} + +void MipsAssembler::CreateSirtEntry(FrameOffset out_off, + FrameOffset sirt_offset, + ManagedRegister mscratch, + bool null_allowed) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + if (null_allowed) { + Label null_arg; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, + sirt_offset.Int32Value()); + // Null values get a SIRT entry value of 0. Otherwise, the sirt entry is + // the address in the SIRT holding the reference. + // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset) + EmitBranch(scratch.AsCoreRegister(), ZERO, &null_arg, true); + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + Bind(&null_arg, false); + } else { + AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value()); + } + StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value()); +} + +// Given a SIRT entry, load the associated reference. +void MipsAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg, + ManagedRegister min_reg) { + MipsManagedRegister out_reg = mout_reg.AsMips(); + MipsManagedRegister in_reg = min_reg.AsMips(); + CHECK(out_reg.IsCoreRegister()) << out_reg; + CHECK(in_reg.IsCoreRegister()) << in_reg; + Label null_arg; + if (!out_reg.Equals(in_reg)) { + LoadImmediate(out_reg.AsCoreRegister(), 0); + } + EmitBranch(in_reg.AsCoreRegister(), ZERO, &null_arg, true); + LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), + in_reg.AsCoreRegister(), 0); + Bind(&null_arg, false); +} + +void MipsAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) { + // TODO: not validating references +} + +void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister base = mbase.AsMips(); + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(base.IsCoreRegister()) << base; + CHECK(scratch.IsCoreRegister()) << scratch; + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + base.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) { + MipsManagedRegister scratch = mscratch.AsMips(); + CHECK(scratch.IsCoreRegister()) << scratch; + // Call *(*(SP + base) + offset) + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + SP, base.Int32Value()); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + scratch.AsCoreRegister(), offset.Int32Value()); + Jalr(scratch.AsCoreRegister()); + // TODO: place reference map on call +} + +void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) { + UNIMPLEMENTED(FATAL) << "no arm implementation"; +#if 0 + fs()->call(Address::Absolute(offset)); +#endif +} + +void MipsAssembler::GetCurrentThread(ManagedRegister tr) { + Move(tr.AsMips().AsCoreRegister(), S1); +} + +void MipsAssembler::GetCurrentThread(FrameOffset offset, + ManagedRegister /*mscratch*/) { + StoreToOffset(kStoreWord, S1, SP, offset.Int32Value()); +} + +void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) { + MipsManagedRegister scratch = mscratch.AsMips(); + MipsExceptionSlowPath* slow = new MipsExceptionSlowPath(scratch, stack_adjust); + buffer_.EnqueueSlowPath(slow); + LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), + S1, Thread::ExceptionOffset().Int32Value()); + EmitBranch(scratch.AsCoreRegister(), ZERO, slow->Entry(), false); +} + +void MipsExceptionSlowPath::Emit(Assembler* sasm) { + MipsAssembler* sp_asm = down_cast(sasm); +#define __ sp_asm-> + __ Bind(&entry_, false); + if (stack_adjust_ != 0) { // Fix up the frame. + __ DecreaseFrameSize(stack_adjust_); + } + // Pass exception object as argument + // Don't care about preserving A0 as this call won't return + __ Move(A0, scratch_.AsCoreRegister()); + // Set up call to Thread::Current()->pDeliverException + __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException)); + __ Jr(T9); + // Call never returns + __ Break(); +#undef __ +} + +} // namespace mips +} // namespace art -- cgit v1.2.3-59-g8ed1b From 7655f29fabc0a12765de828914a18314382e5a35 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 29 Jul 2013 11:07:13 -0700 Subject: Portable refactorings. Separate quick from portable entrypoints. Move architectural dependencies into arch. Change-Id: I9adbc0a9782e2959fdc3308215f01e3107632b7c --- build/Android.gtest.mk | 2 +- compiler/dex/quick/arm/call_arm.cc | 6 +- compiler/dex/quick/arm/fp_arm.cc | 16 +- compiler/dex/quick/arm/int_arm.cc | 4 +- compiler/dex/quick/gen_common.cc | 101 +-- compiler/dex/quick/gen_invoke.cc | 36 +- compiler/dex/quick/mips/call_mips.cc | 6 +- compiler/dex/quick/mips/fp_mips.cc | 26 +- compiler/dex/quick/mips/int_mips.cc | 2 +- compiler/dex/quick/x86/call_x86.cc | 6 +- compiler/dex/quick/x86/fp_x86.cc | 14 +- compiler/dex/quick/x86/int_x86.cc | 2 +- compiler/jni/quick/jni_compiler.cc | 12 +- compiler/stubs/portable/stubs.cc | 7 +- compiler/stubs/quick/stubs.cc | 18 +- compiler/utils/arm/assembler_arm.cc | 2 +- compiler/utils/mips/assembler_mips.cc | 2 +- compiler/utils/x86/assembler_x86.cc | 2 +- runtime/Android.mk | 62 +- runtime/arch/arm/asm_support_arm.S | 38 + runtime/arch/arm/asm_support_arm.h | 31 + runtime/arch/arm/entrypoints_init_arm.cc | 241 ++++++ runtime/arch/arm/jni_entrypoints_arm.S | 65 ++ runtime/arch/arm/portable_entrypoints_arm.S | 96 +++ runtime/arch/arm/quick_entrypoints_arm.S | 127 +-- runtime/arch/arm/quick_entrypoints_init_arm.cc | 237 ------ runtime/arch/arm/thread_arm.cc | 29 + runtime/arch/mips/asm_support_mips.S | 41 + runtime/arch/mips/asm_support_mips.h | 31 + runtime/arch/mips/entrypoints_init_mips.cc | 242 ++++++ runtime/arch/mips/jni_entrypoints_mips.S | 89 ++ runtime/arch/mips/portable_entrypoints_mips.S | 73 ++ runtime/arch/mips/quick_entrypoints_init_mips.cc | 238 ------ runtime/arch/mips/quick_entrypoints_mips.S | 115 +-- runtime/arch/mips/thread_mips.cc | 29 + runtime/arch/x86/asm_support_x86.S | 91 ++ runtime/arch/x86/asm_support_x86.h | 27 + runtime/arch/x86/entrypoints_init_x86.cc | 224 +++++ runtime/arch/x86/jni_entrypoints_x86.S | 35 + runtime/arch/x86/portable_entrypoints_x86.S | 109 +++ runtime/arch/x86/quick_entrypoints_init_x86.cc | 221 ----- runtime/arch/x86/quick_entrypoints_x86.S | 172 +--- runtime/arch/x86/thread_x86.cc | 139 +++ runtime/asm_support.h | 25 - runtime/class_linker.cc | 5 +- runtime/class_linker_test.cc | 2 +- runtime/common_test.h | 2 +- runtime/entrypoints/entrypoint_utils.cc | 407 +++++++++ runtime/entrypoints/entrypoint_utils.h | 412 +++++++++ runtime/entrypoints/jni/jni_entrypoints.cc | 46 + runtime/entrypoints/math_entrypoints.cc | 89 ++ runtime/entrypoints/math_entrypoints.h | 29 + runtime/entrypoints/math_entrypoints_test.cc | 74 ++ .../portable/portable_alloc_entrypoints.cc | 69 ++ .../portable/portable_argument_visitor.h | 136 +++ .../portable/portable_cast_entrypoints.cc | 57 ++ .../portable/portable_dexcache_entrypoints.cc | 53 ++ .../entrypoints/portable/portable_entrypoints.h | 44 + .../portable/portable_field_entrypoints.cc | 241 ++++++ .../portable/portable_fillarray_entrypoints.cc | 50 ++ .../portable/portable_invoke_entrypoints.cc | 104 +++ .../portable/portable_jni_entrypoints.cc | 98 +++ .../portable/portable_lock_entrypoints.cc | 38 + .../portable/portable_proxy_entrypoints.cc | 109 +++ .../portable/portable_stub_entrypoints.cc | 145 ++++ .../portable/portable_thread_entrypoints.cc | 99 +++ .../portable/portable_throw_entrypoints.cc | 123 +++ .../entrypoints/quick/quick_alloc_entrypoints.cc | 2 +- runtime/entrypoints/quick/quick_argument_visitor.h | 110 --- .../entrypoints/quick/quick_cast_entrypoints.cc | 2 +- .../quick/quick_dexcache_entrypoints.cc | 4 +- runtime/entrypoints/quick/quick_entrypoints.h | 15 +- .../entrypoints/quick/quick_field_entrypoints.cc | 2 +- .../quick/quick_fillarray_entrypoints.cc | 2 +- .../entrypoints/quick/quick_invoke_entrypoints.cc | 2 +- runtime/entrypoints/quick/quick_jni_entrypoints.cc | 2 +- .../entrypoints/quick/quick_proxy_entrypoints.cc | 86 +- .../entrypoints/quick/quick_stub_entrypoints.cc | 143 ---- .../entrypoints/quick/quick_thread_entrypoints.cc | 2 +- .../entrypoints/quick/quick_throw_entrypoints.cc | 2 +- runtime/interpreter/interpreter.cc | 2 +- runtime/mirror/abstract_method-inl.h | 2 +- runtime/mirror/object_test.cc | 2 +- runtime/runtime_support.cc | 475 ----------- runtime/runtime_support.h | 419 ---------- runtime/runtime_support_llvm.cc | 930 --------------------- runtime/runtime_support_llvm.h | 27 - runtime/runtime_support_test.cc | 74 -- runtime/thread.cc | 177 ++-- runtime/thread.h | 8 +- runtime/thread_arm.cc | 29 - runtime/thread_mips.cc | 29 - runtime/thread_x86.cc | 139 --- 93 files changed, 4346 insertions(+), 3864 deletions(-) create mode 100644 runtime/arch/arm/asm_support_arm.S create mode 100644 runtime/arch/arm/asm_support_arm.h create mode 100644 runtime/arch/arm/entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/jni_entrypoints_arm.S create mode 100644 runtime/arch/arm/portable_entrypoints_arm.S delete mode 100644 runtime/arch/arm/quick_entrypoints_init_arm.cc create mode 100644 runtime/arch/arm/thread_arm.cc create mode 100644 runtime/arch/mips/asm_support_mips.S create mode 100644 runtime/arch/mips/asm_support_mips.h create mode 100644 runtime/arch/mips/entrypoints_init_mips.cc create mode 100644 runtime/arch/mips/jni_entrypoints_mips.S create mode 100644 runtime/arch/mips/portable_entrypoints_mips.S delete mode 100644 runtime/arch/mips/quick_entrypoints_init_mips.cc create mode 100644 runtime/arch/mips/thread_mips.cc create mode 100644 runtime/arch/x86/asm_support_x86.S create mode 100644 runtime/arch/x86/asm_support_x86.h create mode 100644 runtime/arch/x86/entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/jni_entrypoints_x86.S create mode 100644 runtime/arch/x86/portable_entrypoints_x86.S delete mode 100644 runtime/arch/x86/quick_entrypoints_init_x86.cc create mode 100644 runtime/arch/x86/thread_x86.cc create mode 100644 runtime/entrypoints/entrypoint_utils.cc create mode 100644 runtime/entrypoints/entrypoint_utils.h create mode 100644 runtime/entrypoints/jni/jni_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.cc create mode 100644 runtime/entrypoints/math_entrypoints.h create mode 100644 runtime/entrypoints/math_entrypoints_test.cc create mode 100644 runtime/entrypoints/portable/portable_alloc_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_argument_visitor.h create mode 100644 runtime/entrypoints/portable/portable_cast_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_dexcache_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_entrypoints.h create mode 100644 runtime/entrypoints/portable/portable_field_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_fillarray_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_invoke_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_jni_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_lock_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_proxy_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_stub_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_thread_entrypoints.cc create mode 100644 runtime/entrypoints/portable/portable_throw_entrypoints.cc delete mode 100644 runtime/runtime_support.cc delete mode 100644 runtime/runtime_support.h delete mode 100644 runtime/runtime_support_llvm.cc delete mode 100644 runtime/runtime_support_llvm.h delete mode 100644 runtime/runtime_support_test.cc delete mode 100644 runtime/thread_arm.cc delete mode 100644 runtime/thread_mips.cc delete mode 100644 runtime/thread_x86.cc (limited to 'compiler/utils/mips/assembler_mips.cc') diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 4648d44899..b9ebd83555 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -34,6 +34,7 @@ TEST_COMMON_SRC_FILES := \ runtime/dex_file_test.cc \ runtime/dex_instruction_visitor_test.cc \ runtime/dex_method_iterator_test.cc \ + runtime/entrypoints/math_entrypoints_test.cc \ runtime/exception_test.cc \ runtime/gc/accounting/space_bitmap_test.cc \ runtime/gc/heap_test.cc \ @@ -50,7 +51,6 @@ TEST_COMMON_SRC_FILES := \ runtime/oat_test.cc \ runtime/output_stream_test.cc \ runtime/reference_table_test.cc \ - runtime/runtime_support_test.cc \ runtime/runtime_test.cc \ runtime/thread_pool_test.cc \ runtime/utils_test.cc \ diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc index 7c3ec14981..745e43dc38 100644 --- a/compiler/dex/quick/arm/call_arm.cc +++ b/compiler/dex/quick/arm/call_arm.cc @@ -432,7 +432,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { // Making a call - use explicit registers FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, r0); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rARM_LR); // Materialize a pointer to the fill data image NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast(tab_rec)); @@ -488,7 +488,7 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { OpRegImm(kOpCmp, r1, 0); OpIT(kCondNe, "T"); // Go expensive route - artLockObjectFromCode(self, obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); @@ -519,7 +519,7 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { OpIT(kCondEq, "EE"); StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3); // Go expensive route - UnlockObjectFromCode(obj); - LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); + LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, rARM_LR); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc index 1bb08c45e3..08d6778129 100644 --- a/compiler/dex/quick/arm/fp_arm.cc +++ b/compiler/dex/quick/arm/fp_arm.cc @@ -49,7 +49,8 @@ void ArmMir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -91,7 +92,8 @@ void ArmMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -140,16 +142,16 @@ void ArmMir2Lir::GenConversion(Instruction::Code opcode, op = kThumb2VcvtDI; break; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -315,7 +317,7 @@ bool ArmMir2Lir::GenInlinedSqrt(CallInfo* info) { branch = NewLIR2(kThumbBCond, 0, kArmCondEq); ClobberCalleeSave(); LockCallTemps(); // Using fixed registers - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt)); NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg)); NewLIR1(kThumbBlxR, r_tgt); NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1); diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc index 4bb507b9ea..9db1016efa 100644 --- a/compiler/dex/quick/arm/int_arm.cc +++ b/compiler/dex/quick/arm/int_arm.cc @@ -665,7 +665,7 @@ void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1, */ RegLocation rl_result; if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) { - int func_offset = ENTRYPOINT_OFFSET(pLmul); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); FlushAllRegs(); CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); rl_result = GetReturnWide(false); @@ -956,7 +956,7 @@ void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 8934340d48..ebe10bb57e 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -211,9 +211,9 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode); } else { - func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); + func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); RegLocation rl_result = GetReturn(false); @@ -233,9 +233,9 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { int func_offset; if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck); } CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true); FreeTemp(TargetReg(kArg2)); @@ -375,7 +375,7 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); LoadConstant(TargetReg(kArg0), ssb_index); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -408,9 +408,9 @@ void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_do FreeTemp(rBase); } else { FlushAllRegs(); // Everything to home locations - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic) - : ENTRYPOINT_OFFSET(pSet32Static)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pSet32Static)); CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true); } } @@ -455,7 +455,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, // or NULL if not initialized. Check for NULL and call helper if NULL. // TUNING: fast path should fall through LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL); - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true); if (cu_->instruction_set == kMips) { // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy OpRegCopy(rBase, TargetReg(kRet0)); @@ -483,9 +483,9 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, } } else { FlushAllRegs(); // Everything to home locations - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic) - : ENTRYPOINT_OFFSET(pGet32Static)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic) + : QUICK_ENTRYPOINT_OFFSET(pGet32Static)); CallRuntimeHelperImm(getterOffset, field_idx, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -499,7 +499,7 @@ void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest, void Mir2Lir::HandleSuspendLaunchPads() { int num_elems = suspend_launchpads_.Size(); - int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode); + int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode); for (int i = 0; i < num_elems; i++) { ResetRegPool(); ResetDefTracking(); @@ -545,7 +545,7 @@ void Mir2Lir::HandleThrowLaunchPads() { bool target_x86 = (cu_->instruction_set == kX86); switch (lab->operands[0]) { case kThrowNullPointer: - func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode); break; case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads. @@ -557,7 +557,7 @@ void Mir2Lir::HandleThrowLaunchPads() { // Make sure the following LoadConstant doesn't mess with kArg1. LockTemp(TargetReg(kArg1)); LoadConstant(TargetReg(kArg0), v2); - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowArrayBounds: // Move v1 (array index) to kArg0 and v2 (array length) to kArg1 @@ -590,18 +590,18 @@ void Mir2Lir::HandleThrowLaunchPads() { OpRegCopy(TargetReg(kArg0), v1); } } - func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode); break; case kThrowDivZero: - func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode); break; case kThrowNoSuchMethod: OpRegCopy(TargetReg(kArg0), v1); func_offset = - ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); + QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode); break; case kThrowStackOverflow: - func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode); // Restore stack alignment if (target_x86) { OpRegImm(kOpAdd, TargetReg(kSp), frame_size_); @@ -664,9 +664,9 @@ void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size, StoreValue(rl_dest, rl_result); } } else { - int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance) - : ENTRYPOINT_OFFSET(pGet32Instance)); + int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pGet32Instance)); CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true); if (is_long_or_double) { RegLocation rl_result = GetReturnWide(rl_dest.fp); @@ -719,9 +719,9 @@ void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size, } } } else { - int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) : - (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance) - : ENTRYPOINT_OFFSET(pSet32Instance)); + int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) : + (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance) + : QUICK_ENTRYPOINT_OFFSET(pSet32Instance)); CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true); } } @@ -735,7 +735,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { type_idx)) { // Call out to helper which resolves type and verifies access. // Resolved type returned in kRet0. - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -764,7 +764,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // TUNING: move slow path to end & remove unconditional branch LIR* target1 = NewLIR0(kPseudoTargetLabel); // Call out to helper, which will return resolved type in kArg0 - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, rl_method.low_reg, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); @@ -797,7 +797,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { LoadWordDisp(TargetReg(kArg2), mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0)); // Might call out to helper, which will return resolved string in kRet0 - int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode)); + int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode)); LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0)); LoadConstant(TargetReg(kArg1), string_idx); if (cu_->instruction_set == kThumb2) { @@ -821,7 +821,8 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { branch->target = target; } else { DCHECK_EQ(cu_->instruction_set, kX86); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), + TargetReg(kArg1), true); } GenBarrier(); StoreValue(rl_dest, GetReturn(false)); @@ -847,9 +848,9 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { int func_offset; if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks( cu_->method_idx, *cu_->dex_file, type_idx)) { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode); } else { - func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck); } CallRuntimeHelperImmMethod(func_offset, type_idx, true); RegLocation rl_result = GetReturn(false); @@ -858,7 +859,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) { void Mir2Lir::GenThrow(RegLocation rl_src) { FlushAllRegs(); - CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); + CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true); } // For final classes there are no sub-classes to check and so we can answer the instance-of @@ -928,7 +929,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref @@ -950,7 +951,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL); // Not resolved // Call out to helper, which will return resolved type in kRet0 - CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); + CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true); OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */ // Rejoin code paths @@ -985,7 +986,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } } else { if (cu_->instruction_set == kThumb2) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); if (!type_known_abstract) { /* Uses conditional nullification */ OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same? @@ -1002,13 +1003,13 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL); } if (cu_->instruction_set != kX86) { - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } else { OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode)); } } } @@ -1068,7 +1069,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 // InitializeTypeAndVerifyAccess(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode), type_idx, TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path } else if (use_declaring_class) { @@ -1088,8 +1089,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Not resolved // Call out to helper, which will return resolved type in kArg0 // InitializeTypeFromCode(idx, method) - CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1), - true); + CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, + TargetReg(kArg1), true); OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); @@ -1108,8 +1109,8 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (!type_known_abstract) { branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL); } - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2), - true); + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), + TargetReg(kArg2), true); /* branch target here */ LIR* target = NewLIR0(kPseudoTargetLabel); branch1->target = target; @@ -1172,15 +1173,15 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::SHL_LONG: case Instruction::SHL_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShlLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong); break; case Instruction::SHR_LONG: case Instruction::SHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pShrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong); break; case Instruction::USHR_LONG: case Instruction::USHR_LONG_2ADDR: - func_offset = ENTRYPOINT_OFFSET(pUshrLong); + func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong); break; default: LOG(FATAL) << "Unexpected case"; @@ -1302,7 +1303,7 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, } rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv); } else { - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); FlushAllRegs(); /* Send everything to home location */ LoadValueDirectFixed(rl_src2, TargetReg(kArg1)); int r_tgt = CallHelperSetup(func_offset); @@ -1557,7 +1558,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re FlushAllRegs(); /* Everything to home location */ LoadValueDirectFixed(rl_src, TargetReg(kArg0)); Clobber(TargetReg(kArg0)); - int func_offset = ENTRYPOINT_OFFSET(pIdivmod); + int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod); CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false); if (is_div) rl_result = GetReturn(false); @@ -1634,7 +1635,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, } else { call_out = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLmul); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul); } break; case Instruction::DIV_LONG: @@ -1642,13 +1643,13 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, call_out = true; check_zero = true; ret_reg = TargetReg(kRet0); - func_offset = ENTRYPOINT_OFFSET(pLdiv); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv); break; case Instruction::REM_LONG: case Instruction::REM_LONG_2ADDR: call_out = true; check_zero = true; - func_offset = ENTRYPOINT_OFFSET(pLdivmod); + func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0); break; diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 91f250075a..1b34e99a72 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -471,7 +471,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, direct_method = 0; } int trampoline = (cu->instruction_set == kX86) ? 0 - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline); if (direct_method != 0) { switch (state) { @@ -555,7 +555,7 @@ static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -563,7 +563,7 @@ static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -571,7 +571,7 @@ static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -579,7 +579,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state, const MethodReference& target_method, uint32_t method_idx, uintptr_t unused, uintptr_t unused2, InvokeType unused3) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -589,7 +589,7 @@ static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu, uint32_t unused, uintptr_t unused2, uintptr_t unused3, InvokeType unused4) { - int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0); } @@ -773,14 +773,14 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { if (info->num_arg_words >= 20) { // Generate memcpy OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset); OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset); - CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), + CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0), TargetReg(kArg1), (info->num_arg_words - 3) * 4, false); } else { // Use vldm/vstm pair using kArg3 as a temp @@ -1047,7 +1047,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { } else { LoadValueDirectFixed(rl_start, reg_start); } - int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0; + int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0; GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags); LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); intrinsic_launchpads_.Insert(launch_pad); @@ -1056,7 +1056,7 @@ bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf)); } LIR* resume_tgt = NewLIR0(kPseudoTargetLabel); launch_pad->operands[2] = reinterpret_cast(resume_tgt); @@ -1084,7 +1084,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { LoadValueDirectFixed(rl_this, reg_this); LoadValueDirectFixed(rl_cmp, reg_cmp); int r_tgt = (cu_->instruction_set != kX86) ? - LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; + LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0; GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags); // TUNING: check if rl_cmp.s_reg_low is already null checked LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast(info)); @@ -1094,7 +1094,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { if (cu_->instruction_set != kX86) { OpReg(kOpBlx, r_tgt); } else { - OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo)); + OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)); } launch_pad->operands[2] = 0; // No return possible // Record that we've already inlined & null checked @@ -1409,20 +1409,20 @@ void Mir2Lir::GenInvoke(CallInfo* info) { int trampoline = 0; switch (info->type) { case kInterface: - trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) - : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); + trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline) + : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck); break; case kDirect: - trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck); break; case kStatic: - trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck); break; case kSuper: - trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck); break; case kVirtual: - trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); + trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck); break; default: LOG(FATAL) << "Unexpected invoke type"; diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc index b6c200ca98..846c055ac2 100644 --- a/compiler/dex/quick/mips/call_mips.cc +++ b/compiler/dex/quick/mips/call_mips.cc @@ -247,7 +247,7 @@ void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { GenBarrier(); NewLIR0(kMipsCurrPC); // Really a jal to .+8 // Now, fill the branch delay slot with the helper load - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode)); GenBarrier(); // Scheduling barrier // Construct BaseLabel and set up table base register @@ -272,7 +272,7 @@ void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - artLockObjectFromCode(self, obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); @@ -287,7 +287,7 @@ void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LockCallTemps(); // Prepare for explicit register usage GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags); // Go expensive route - UnlockObjectFromCode(obj); - int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); + int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode)); ClobberCalleeSave(); LIR* call_inst = OpReg(kOpBlx, r_tgt); MarkSafepointPC(call_inst); diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc index 620527e35b..320301726b 100644 --- a/compiler/dex/quick/mips/fp_mips.cc +++ b/compiler/dex/quick/mips/fp_mips.cc @@ -50,7 +50,8 @@ void MipsMir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -92,7 +93,8 @@ void MipsMir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -133,22 +135,22 @@ void MipsMir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, op = kMipsFcvtdw; break; case Instruction::FLOAT_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_INT: - GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src); return; case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(FATAL) << "Unexpected opcode: " << opcode; @@ -178,18 +180,18 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, switch (opcode) { case Instruction::CMPL_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmplFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat); wide = false; break; case Instruction::CMPG_FLOAT: - offset = ENTRYPOINT_OFFSET(pCmpgFloat); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat); wide = false; break; case Instruction::CMPL_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmplDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble); break; case Instruction::CMPG_DOUBLE: - offset = ENTRYPOINT_OFFSET(pCmpgDouble); + offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble); break; default: LOG(FATAL) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc index 7c8214b927..bd044c66bd 100644 --- a/compiler/dex/quick/mips/int_mips.cc +++ b/compiler/dex/quick/mips/int_mips.cc @@ -579,7 +579,7 @@ void MipsMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index d530a1c644..1c395def55 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -148,7 +148,7 @@ void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) { NewLIR1(kX86StartOfMethod, rX86_ARG2); NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast(tab_rec)); NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, rX86_ARG1, true); } @@ -165,7 +165,7 @@ void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX); LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq); // If lock is held, go the expensive route - artLockObjectFromCode(self, obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true); branch->target = NewLIR0(kPseudoTargetLabel); } @@ -185,7 +185,7 @@ void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { LIR* branch2 = NewLIR1(kX86Jmp8, 0); branch->target = NewLIR0(kPseudoTargetLabel); // Otherwise, go the expensive route - UnlockObjectFromCode(obj); - CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); + CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true); branch2->target = NewLIR0(kPseudoTargetLabel); } diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc index cc6f374488..f736b5e28f 100644 --- a/compiler/dex/quick/x86/fp_x86.cc +++ b/compiler/dex/quick/x86/fp_x86.cc @@ -49,7 +49,8 @@ void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode, case Instruction::REM_FLOAT_2ADDR: case Instruction::REM_FLOAT: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, + false); rl_result = GetReturn(true); StoreValue(rl_dest, rl_result); return; @@ -99,7 +100,8 @@ void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode, case Instruction::REM_DOUBLE_2ADDR: case Instruction::REM_DOUBLE: FlushAllRegs(); // Send everything to home location - CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false); + CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, + false); rl_result = GetReturnWide(true); StoreValueWide(rl_dest, rl_result); return; @@ -196,17 +198,17 @@ void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest, return; } case Instruction::LONG_TO_DOUBLE: - GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src); return; case Instruction::LONG_TO_FLOAT: // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers. - GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src); return; case Instruction::FLOAT_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src); return; case Instruction::DOUBLE_TO_LONG: - GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); + GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src); return; default: LOG(INFO) << "Unexpected opcode: " << opcode; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 3be24df565..0b4b4be04e 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -532,7 +532,7 @@ void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array, // Get the array's class. LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class); - CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, + CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value, r_array_class, true); // Redo LoadValues in case they didn't survive the call. LoadValueDirectFixed(rl_array, r_array); // Reload array diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index aeadb54a22..b069fbd4a1 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -172,8 +172,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, // can occur. The result is the saved JNI local state that is restored by the exit call. We // abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer // arguments. - uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodStart); + uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart); main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size)); FrameOffset locked_object_sirt_offset(0); if (is_synchronized) { @@ -304,13 +304,13 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler, uintptr_t jni_end; if (reference_return) { // Pass result. - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEndWithReference); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference); SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister()); end_jni_conv->Next(); } else { - jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) - : ENTRYPOINT_OFFSET(pJniMethodEnd); + jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized) + : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd); } // Pass saved local reference state. if (end_jni_conv->IsCurrentParamOnStack()) { diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc index cee68478a4..def43e2bd2 100644 --- a/compiler/stubs/portable/stubs.cc +++ b/compiler/stubs/portable/stubs.cc @@ -34,7 +34,8 @@ const std::vector* CreatePortableResolutionTrampoline() { RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr __ IncreaseFrameSize(12); // 3 words of space for alignment @@ -69,7 +70,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ StoreToOffset(kStoreWord, A0, SP, 0); __ LoadFromOffset(kLoadWord, T9, S1, - ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); + PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*) @@ -112,7 +113,7 @@ const std::vector* CreatePortableResolutionTrampoline() { __ pushl(ECX); // pass receiver __ pushl(EAX); // pass called // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), + __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ leave(); diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc index 598481f3f7..912f1c0746 100644 --- a/compiler/stubs/quick/stubs.cc +++ b/compiler/stubs/quick/stubs.cc @@ -46,7 +46,7 @@ const std::vector* CreateQuickResolutionTrampoline() { // TODO: enable when GetCalleeSaveMethod is available at stub generation time // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask()); __ PushList(save); - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3 __ IncreaseFrameSize(8); // 2 words of space for alignment __ mov(R2, ShifterOperand(SP)); // Pass SP @@ -71,7 +71,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -85,7 +85,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kArm))); - __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); + __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)); __ bkpt(0); size_t cs = assembler->CodeSize(); @@ -123,7 +123,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ StoreToOffset(kStoreWord, A2, SP, 8); __ StoreToOffset(kStoreWord, A1, SP, 4); - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)); __ Move(A3, S1); // Pass Thread::Current() in A3 __ Move(A2, SP); // Pass SP for Method** callee_addr __ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*) @@ -161,7 +161,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -176,7 +176,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kMips))); - __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); + __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)); __ Jr(T9); __ Break(); @@ -208,7 +208,7 @@ const std::vector* CreateQuickResolutionTrampoline() { __ pushl(EAX); // pass Method* // Call to resolve method. - __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), + __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)), X86ManagedRegister::FromCpuRegister(ECX)); __ movl(EDI, EAX); // save code pointer in EDI @@ -236,7 +236,7 @@ const std::vector* CreateQuickResolutionTrampoline() { const std::vector* CreateInterpreterToInterpreterEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); @@ -249,7 +249,7 @@ const std::vector* CreateInterpreterToInterpreterEntry() { const std::vector* CreateInterpreterToQuickEntry() { UniquePtr assembler(static_cast(Assembler::Create(kX86))); - __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); + __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry)))); size_t cs = assembler->CodeSize(); UniquePtr > entry_stub(new std::vector(cs)); diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index 0778cd3bbc..fa202c3017 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -1884,7 +1884,7 @@ void ArmExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving R0 as this call won't return __ mov(R0, ShifterOperand(scratch_.AsCoreRegister())); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); __ blx(R12); // Call never returns __ bkpt(0); diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index 58815da1b8..931d7ab0f7 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -988,7 +988,7 @@ void MipsExceptionSlowPath::Emit(Assembler* sasm) { // Don't care about preserving A0 as this call won't return __ Move(A0, scratch_.AsCoreRegister()); // Set up call to Thread::Current()->pDeliverException - __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException)); + __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException)); __ Jr(T9); // Call never returns __ Break(); diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 89bfeb5917..9095180246 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1837,7 +1837,7 @@ void X86ExceptionSlowPath::Emit(Assembler *sasm) { } // Pass exception as argument in EAX __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset())); - __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException))); + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException))); // this call should never return __ int3(); #undef __ diff --git a/runtime/Android.mk b/runtime/Android.mk index bc6a2ed2f8..c686128418 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -118,8 +118,6 @@ LIBART_COMMON_SRC_FILES := \ reference_table.cc \ reflection.cc \ runtime.cc \ - runtime_support.cc \ - runtime_support_llvm.cc \ signal_catcher.cc \ stack.cc \ thread.cc \ @@ -143,6 +141,21 @@ LIBART_COMMON_SRC_FILES += \ arch/arm/registers_arm.cc \ arch/x86/registers_x86.cc \ arch/mips/registers_mips.cc \ + entrypoints/entrypoint_utils.cc \ + entrypoints/jni/jni_entrypoints.cc \ + entrypoints/math_entrypoints.cc \ + entrypoints/portable/portable_alloc_entrypoints.cc \ + entrypoints/portable/portable_cast_entrypoints.cc \ + entrypoints/portable/portable_dexcache_entrypoints.cc \ + entrypoints/portable/portable_field_entrypoints.cc \ + entrypoints/portable/portable_fillarray_entrypoints.cc \ + entrypoints/portable/portable_invoke_entrypoints.cc \ + entrypoints/portable/portable_jni_entrypoints.cc \ + entrypoints/portable/portable_lock_entrypoints.cc \ + entrypoints/portable/portable_proxy_entrypoints.cc \ + entrypoints/portable/portable_stub_entrypoints.cc \ + entrypoints/portable/portable_thread_entrypoints.cc \ + entrypoints/portable/portable_throw_entrypoints.cc \ entrypoints/quick/quick_alloc_entrypoints.cc \ entrypoints/quick/quick_cast_entrypoints.cc \ entrypoints/quick/quick_deoptimization_entrypoints.cc \ @@ -171,39 +184,35 @@ LIBART_TARGET_SRC_FILES := \ ifeq ($(TARGET_ARCH),arm) LIBART_TARGET_SRC_FILES += \ arch/arm/context_arm.cc.arm \ + arch/arm/entrypoints_init_arm.cc \ + arch/arm/jni_entrypoints_arm.S \ + arch/arm/portable_entrypoints_arm.S \ arch/arm/quick_entrypoints_arm.S \ - arch/arm/quick_entrypoints_init_arm.cc + arch/arm/thread_arm.cc else # TARGET_ARCH != arm ifeq ($(TARGET_ARCH),x86) LIBART_TARGET_SRC_FILES += \ arch/x86/context_x86.cc \ - arch/x86/quick_entrypoints_init_x86.cc \ - arch/x86/quick_entrypoints_x86.S + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # TARGET_ARCH != x86 ifeq ($(TARGET_ARCH),mips) LIBART_TARGET_SRC_FILES += \ arch/mips/context_mips.cc \ - arch/mips/quick_entrypoints_init_mips.cc \ - arch/mips/quick_entrypoints_mips.S + arch/mips/entrypoints_init_mips.cc \ + arch/mips/jni_entrypoints_mips.S \ + arch/mips/portable_entrypoints_mips.S \ + arch/mips/quick_entrypoints_mips.S \ + arch/mips/thread_mips.cc else # TARGET_ARCH != mips $(error unsupported TARGET_ARCH=$(TARGET_ARCH)) endif # TARGET_ARCH != mips endif # TARGET_ARCH != x86 endif # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),arm) -LIBART_TARGET_SRC_FILES += thread_arm.cc -else # TARGET_ARCH != arm -ifeq ($(TARGET_ARCH),x86) -LIBART_TARGET_SRC_FILES += thread_x86.cc -else # TARGET_ARCH != x86 -ifeq ($(TARGET_ARCH),mips) -LIBART_TARGET_SRC_FILES += thread_mips.cc -else # TARGET_ARCH != mips -$(error unsupported TARGET_ARCH=$(TARGET_ARCH)) -endif # TARGET_ARCH != mips -endif # TARGET_ARCH != x86 -endif # TARGET_ARCH != arm LIBART_HOST_SRC_FILES := \ $(LIBART_COMMON_SRC_FILES) \ @@ -215,14 +224,11 @@ LIBART_HOST_SRC_FILES := \ ifeq ($(HOST_ARCH),x86) LIBART_HOST_SRC_FILES += \ arch/x86/context_x86.cc \ - arch/x86/quick_entrypoints_init_x86.cc \ - arch/x86/quick_entrypoints_x86.S -else # HOST_ARCH != x86 -$(error unsupported HOST_ARCH=$(HOST_ARCH)) -endif # HOST_ARCH != x86 - -ifeq ($(HOST_ARCH),x86) -LIBART_HOST_SRC_FILES += thread_x86.cc + arch/x86/entrypoints_init_x86.cc \ + arch/x86/jni_entrypoints_x86.S \ + arch/x86/portable_entrypoints_x86.S \ + arch/x86/quick_entrypoints_x86.S \ + arch/x86/thread_x86.cc else # HOST_ARCH != x86 $(error unsupported HOST_ARCH=$(HOST_ARCH)) endif # HOST_ARCH != x86 diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S new file mode 100644 index 0000000000..ed655e95b1 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.S @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_arm.h" + +.macro ENTRY name + .type \name, #function + .global \name + /* Cache alignment for function entry */ + .balign 16 +\name: + .cfi_startproc + .fnstart +.endm + +.macro END name + .fnend + .cfi_endproc + .size \name, .-\name +.endm + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h new file mode 100644 index 0000000000..ed3d476b24 --- /dev/null +++ b/runtime/arch/arm/asm_support_arm.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ +#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND r4 +// Register holding Thread::Current(). +#define rSELF r9 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_ diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc new file mode 100644 index 0000000000..b71a158289 --- /dev/null +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); + +// Math conversions. +extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT +extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT +extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT +extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Integer arithmetics. +extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); +extern "C" int64_t art_quick_mul_long(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pSqrt = sqrt; + qpoints->pL2d = __aeabi_l2d; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __aeabi_l2f; + qpoints->pD2iz = __aeabi_d2iz; + qpoints->pF2iz = __aeabi_f2iz; + qpoints->pIdivmod = __aeabi_idivmod; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = __aeabi_ldivmod; + qpoints->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 + qpoints->pLmul = art_quick_mul_long; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S new file mode 100644 index 0000000000..0a0d06a22a --- /dev/null +++ b/runtime/arch/arm/jni_entrypoints_arm.S @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + push {r0, r1, r2, r3, lr} @ spill regs + .save {r0, r1, r2, r3, lr} + .pad #20 + .cfi_adjust_cfa_offset 20 + sub sp, #12 @ pad stack pointer to align frame + .pad #12 + .cfi_adjust_cfa_offset 12 + mov r0, r9 @ pass Thread::Current + blx artFindNativeMethod @ (Thread*) + mov r12, r0 @ save result in r12 + add sp, #12 @ restore stack pointer + .cfi_adjust_cfa_offset -12 + pop {r0, r1, r2, r3, lr} @ restore regs + .cfi_adjust_cfa_offset -20 + cmp r12, #0 @ is method code null? + bxne r12 @ if non-null, tail call to method's code + bx lr @ otherwise, return to caller to handle exception +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + @ save registers that may contain arguments and LR that will be crushed by a call + push {r0-r3, lr} + .save {r0-r3, lr} + .cfi_adjust_cfa_offset 16 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r1, 4 + .cfi_rel_offset r2, 8 + .cfi_rel_offset r3, 12 + sub sp, #12 @ 3 words of space for alignment + mov r0, r9 @ pass Thread::Current + mov r1, sp @ pass SP + bl artWorkAroundAppJniBugs @ (Thread*, SP) + add sp, #12 @ rewind stack + mov r12, r0 @ save target address + pop {r0-r3, lr} @ restore possibly modified argument registers + .cfi_adjust_cfa_offset -16 + bx r12 @ tail call into JNI routine +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S new file mode 100644 index 0000000000..4cc6654ebb --- /dev/null +++ b/runtime/arch/arm/portable_entrypoints_arm.S @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_arm.S" + + /* + * Portable invocation stub. + * On entry: + * r0 = method pointer + * r1 = argument array or NULL for no argument methods + * r2 = size of argument array in bytes + * r3 = (managed) thread pointer + * [sp] = JValue* result + * [sp + 4] = result type char + */ +ENTRY art_portable_invoke_stub + push {r0, r4, r5, r9, r11, lr} @ spill regs + .save {r0, r4, r5, r9, r11, lr} + .pad #24 + .cfi_adjust_cfa_offset 24 + .cfi_rel_offset r0, 0 + .cfi_rel_offset r4, 4 + .cfi_rel_offset r5, 8 + .cfi_rel_offset r9, 12 + .cfi_rel_offset r11, 16 + .cfi_rel_offset lr, 20 + mov r11, sp @ save the stack pointer + .cfi_def_cfa_register r11 + mov r9, r3 @ move managed thread pointer into r9 + mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval + add r5, r2, #16 @ create space for method pointer in frame + and r5, #0xFFFFFFF0 @ align frame size to 16 bytes + sub sp, r5 @ reserve stack space for argument array + add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy + bl memcpy @ memcpy (dest, src, bytes) + ldr r0, [r11] @ restore method* + ldr r1, [sp, #4] @ copy arg value for r1 + ldr r2, [sp, #8] @ copy arg value for r2 + ldr r3, [sp, #12] @ copy arg value for r3 + mov ip, #0 @ set ip to 0 + str ip, [sp] @ store NULL for method* at bottom of frame + add sp, #16 @ first 4 args are not passed on stack for portable + ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code + blx ip @ call the method + mov sp, r11 @ restore the stack pointer + ldr ip, [sp, #24] @ load the result pointer + strd r0, [ip] @ store r0/r1 into result pointer + pop {r0, r4, r5, r9, r11, lr} @ restore spill regs + .cfi_adjust_cfa_offset -24 + bx lr +END art_portable_invoke_stub + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves + .save {r1-r3, r5-r8, r10-r11, lr} + .cfi_adjust_cfa_offset 40 + .cfi_rel_offset r1, 0 + .cfi_rel_offset r2, 4 + .cfi_rel_offset r3, 8 + .cfi_rel_offset r5, 12 + .cfi_rel_offset r6, 16 + .cfi_rel_offset r7, 20 + .cfi_rel_offset r8, 24 + .cfi_rel_offset r10, 28 + .cfi_rel_offset r11, 32 + .cfi_rel_offset lr, 36 + sub sp, #8 @ 2 words of space, bottom word will hold Method* + .pad #8 + .cfi_adjust_cfa_offset 8 + @ Begin argument set up. + str r0, [sp, #0] @ place proxy method at bottom of frame + mov r2, r9 @ pass Thread::Current + mov r3, sp @ pass SP + blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) + ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ + ldr lr, [sp, #44] @ restore lr + add sp, #48 @ pop frame + .cfi_adjust_cfa_offset -48 + bx lr @ return +END art_portable_proxy_invoke_handler diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index f19e8bada0..9b8d238ab8 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -14,29 +14,13 @@ * limitations under the License. */ -#include "asm_support.h" +#include "asm_support_arm.S" /* Deliver the given exception */ .extern artDeliverExceptionFromCode /* Deliver an exception pending on a thread */ .extern artDeliverPendingException -.macro ENTRY name - .type \name, #function - .global \name - /* Cache alignment for function entry */ - .balign 16 -\name: - .cfi_startproc - .fnstart -.endm - -.macro END name - .fnend - .cfi_endproc - .size \name, .-\name -.endm - /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveAll) @@ -246,53 +230,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - /* - * Portable invocation stub. - * On entry: - * r0 = method pointer - * r1 = argument array or NULL for no argument methods - * r2 = size of argument array in bytes - * r3 = (managed) thread pointer - * [sp] = JValue* result - * [sp + 4] = result type char - */ -ENTRY art_portable_invoke_stub - push {r0, r4, r5, r9, r11, lr} @ spill regs - .save {r0, r4, r5, r9, r11, lr} - .pad #24 - .cfi_adjust_cfa_offset 24 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r4, 4 - .cfi_rel_offset r5, 8 - .cfi_rel_offset r9, 12 - .cfi_rel_offset r11, 16 - .cfi_rel_offset lr, 20 - mov r11, sp @ save the stack pointer - .cfi_def_cfa_register r11 - mov r9, r3 @ move managed thread pointer into r9 - mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval - add r5, r2, #16 @ create space for method pointer in frame - and r5, #0xFFFFFFF0 @ align frame size to 16 bytes - sub sp, r5 @ reserve stack space for argument array - add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy - bl memcpy @ memcpy (dest, src, bytes) - ldr r0, [r11] @ restore method* - ldr r1, [sp, #4] @ copy arg value for r1 - ldr r2, [sp, #8] @ copy arg value for r2 - ldr r3, [sp, #12] @ copy arg value for r3 - mov ip, #0 @ set ip to 0 - str ip, [sp] @ store NULL for method* at bottom of frame - add sp, #16 @ first 4 args are not passed on stack for portable - ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code - blx ip @ call the method - mov sp, r11 @ restore the stack pointer - ldr ip, [sp, #24] @ load the result pointer - strd r0, [ip] @ store r0/r1 into result pointer - pop {r0, r4, r5, r9, r11, lr} @ restore spill regs - .cfi_adjust_cfa_offset -24 - bx lr -END art_portable_invoke_stub - /* * Quick invocation stub. * On entry: @@ -352,30 +289,6 @@ ENTRY art_quick_do_long_jump bx r2 @ do long jump END art_quick_do_long_jump - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - @ save registers that may contain arguments and LR that will be crushed by a call - push {r0-r3, lr} - .save {r0-r3, lr} - .cfi_adjust_cfa_offset 16 - .cfi_rel_offset r0, 0 - .cfi_rel_offset r1, 4 - .cfi_rel_offset r2, 8 - .cfi_rel_offset r3, 12 - sub sp, #12 @ 3 words of space for alignment - mov r0, r9 @ pass Thread::Current - mov r1, sp @ pass SP - bl artWorkAroundAppJniBugs @ (Thread*, SP) - add sp, #12 @ rewind stack - mov r12, r0 @ save target address - pop {r0-r3, lr} @ restore possibly modified argument registers - .cfi_adjust_cfa_offset -16 - bx r12 @ tail call into JNI routine -END art_quick_work_around_app_jni_bugs - /* * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on * failure. @@ -906,20 +819,6 @@ ENTRY art_quick_test_suspend RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN END art_quick_test_suspend - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - str r0, [sp, #0] @ place proxy method at bottom of frame - mov r2, r9 @ pass Thread::Current - mov r3, sp @ pass SP - blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP) - ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_ - ldr lr, [sp, #44] @ restore lr - add sp, #48 @ pop frame - .cfi_adjust_cfa_offset -48 - bx lr @ return -END art_portable_proxy_invoke_handler - /* * Called by managed code that is attempting to call a method on a proxy class. On entry * r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The @@ -1044,30 +943,6 @@ ENTRY art_quick_abstract_method_error_stub b artThrowAbstractMethodErrorFromCode @ (Method*, Thread*, SP) END art_quick_abstract_method_error_stub - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - push {r0, r1, r2, r3, lr} @ spill regs - .save {r0, r1, r2, r3, lr} - .pad #20 - .cfi_adjust_cfa_offset 20 - sub sp, #12 @ pad stack pointer to align frame - .pad #12 - .cfi_adjust_cfa_offset 12 - mov r0, r9 @ pass Thread::Current - blx artFindNativeMethod @ (Thread*) - mov r12, r0 @ save result in r12 - add sp, #12 @ restore stack pointer - .cfi_adjust_cfa_offset -12 - pop {r0, r1, r2, r3, lr} @ restore regs - .cfi_adjust_cfa_offset -20 - cmp r12, #0 @ is method code null? - bxne r12 @ if non-null, tail call to method's code - bx lr @ otherwise, return to caller to handle exception -END art_jni_dlsym_lookup_stub - /* * Signed 64-bit integer multiply. * diff --git a/runtime/arch/arm/quick_entrypoints_init_arm.cc b/runtime/arch/arm/quick_entrypoints_init_arm.cc deleted file mode 100644 index 2f66b361ee..0000000000 --- a/runtime/arch/arm/quick_entrypoints_init_arm.cc +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); - -// Math conversions. -extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT -extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT -extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT -extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Integer arithmetics. -extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); -extern "C" int64_t art_quick_mul_long(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pSqrt = sqrt; - points->pL2d = __aeabi_l2d; - points->pFmodf = fmodf; - points->pL2f = __aeabi_l2f; - points->pD2iz = __aeabi_d2iz; - points->pF2iz = __aeabi_f2iz; - points->pIdivmod = __aeabi_idivmod; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = __aeabi_ldivmod; - points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3 - points->pLmul = art_quick_mul_long; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/arm/thread_arm.cc b/runtime/arch/arm/thread_arm.cc new file mode 100644 index 0000000000..ea908be22c --- /dev/null +++ b/runtime/arch/arm/thread_arm.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_arm.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S new file mode 100644 index 0000000000..8a34b9dbd0 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.S @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ + +#include "asm_support_mips.h" + + /* Cache alignment for function entry */ +.macro ENTRY name + .type \name, %function + .global \name + .balign 16 +\name: + .cfi_startproc +.endm + +.macro END name + .cfi_endproc + .size \name, .-\name +.endm + + /* Generates $gp for function calls */ +.macro GENERATE_GLOBAL_POINTER + .cpload $t9 +.endm + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_ diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h new file mode 100644 index 0000000000..9a66352ad1 --- /dev/null +++ b/runtime/arch/mips/asm_support_mips.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ +#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ + +#include "asm_support.h" + +// Register holding suspend check count down. +#define rSUSPEND $s0 +// Register holding Thread::Current(). +#define rSELF $s1 +// Offset of field Thread::suspend_count_ verified in InitCpu +#define THREAD_FLAGS_OFFSET 0 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_ diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc new file mode 100644 index 0000000000..0a62a4096d --- /dev/null +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -0,0 +1,242 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" +#include "entrypoints/math_entrypoints.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Exception entrypoints. +extern "C" void* GetAndClearException(Thread*); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern int32_t CmpgDouble(double a, double b); +extern int32_t CmplDouble(double a, double b); +extern int32_t CmpgFloat(float a, float b); +extern int32_t CmplFloat(float a, float b); +extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); +extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); + +// Math conversions. +extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT +extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT +extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT +extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE +extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG +extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG + +// Single-precision FP arithmetics. +extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] + +// Double-precision FP arithmetics. +extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] + +// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] +extern "C" int64_t __divdi3(int64_t, int64_t); +extern "C" int64_t __moddi3(int64_t, int64_t); +extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); +extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t __memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + qpoints->pCmpgDouble = CmpgDouble; + qpoints->pCmpgFloat = CmpgFloat; + qpoints->pCmplDouble = CmplDouble; + qpoints->pCmplFloat = CmplFloat; + qpoints->pFmod = fmod; + qpoints->pL2d = __floatdidf; + qpoints->pFmodf = fmodf; + qpoints->pL2f = __floatdisf; + qpoints->pD2iz = __fixdfsi; + qpoints->pF2iz = __fixsfsi; + qpoints->pIdivmod = NULL; + qpoints->pD2l = art_d2l; + qpoints->pF2l = art_f2l; + qpoints->pLdiv = artLdivFromCode; + qpoints->pLdivmod = artLdivmodFromCode; + qpoints->pLmul = artLmulFromCode; + qpoints->pShlLong = art_quick_shl_long; + qpoints->pShrLong = art_quick_shr_long; + qpoints->pUshrLong = art_quick_ushr_long; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = __memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S new file mode 100644 index 0000000000..fca6d777ab --- /dev/null +++ b/runtime/arch/mips/jni_entrypoints_mips.S @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + /* + * Jni dlsym lookup stub. + */ + .extern artFindNativeMethod +ENTRY art_jni_dlsym_lookup_stub + GENERATE_GLOBAL_POINTER + addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra + .cfi_adjust_cfa_offset 32 + sw $ra, 16($sp) + .cfi_rel_offset 31, 16 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + sw $a0, 0($sp) + .cfi_rel_offset 4, 0 + jal artFindNativeMethod # (Thread*) + move $a0, $s1 # pass Thread::Current() + lw $a0, 0($sp) # restore registers from stack + lw $a1, 4($sp) + lw $a2, 8($sp) + lw $a3, 12($sp) + lw $ra, 16($sp) + beq $v0, $zero, no_native_code_found + addiu $sp, $sp, 32 # restore the stack + .cfi_adjust_cfa_offset -32 + move $t9, $v0 # put method code result in $t9 + jr $t9 # leaf call to method's code + nop +no_native_code_found: + jr $ra + nop +END art_jni_dlsym_lookup_stub + + /* + * Entry point of native methods when JNI bug compatibility is enabled. + */ + .extern artWorkAroundAppJniBugs +ENTRY art_quick_work_around_app_jni_bugs + GENERATE_GLOBAL_POINTER + # save registers that may contain arguments and LR that will be crushed by a call + addiu $sp, $sp, -32 + .cfi_adjust_cfa_offset 32 + sw $ra, 28($sp) + .cfi_rel_offset 31, 28 + sw $a3, 24($sp) + .cfi_rel_offset 7, 28 + sw $a2, 20($sp) + .cfi_rel_offset 6, 28 + sw $a1, 16($sp) + .cfi_rel_offset 5, 28 + sw $a0, 12($sp) + .cfi_rel_offset 4, 28 + move $a0, rSELF # pass Thread::Current + jal artWorkAroundAppJniBugs # (Thread*, $sp) + move $a1, $sp # pass $sp + move $t9, $v0 # save target address + lw $a0, 12($sp) + lw $a1, 16($sp) + lw $a2, 20($sp) + lw $a3, 24($sp) + lw $ra, 28($sp) + jr $t9 # tail call into JNI routine + addiu $sp, $sp, 32 + .cfi_adjust_cfa_offset -32 +END art_quick_work_around_app_jni_bugs diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S new file mode 100644 index 0000000000..e7a9b0fb60 --- /dev/null +++ b/runtime/arch/mips/portable_entrypoints_mips.S @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_mips.S" + + .set noreorder + .balign 4 + + .extern artPortableProxyInvokeHandler +ENTRY art_portable_proxy_invoke_handler + GENERATE_GLOBAL_POINTER + # Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + # TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + addiu $sp, $sp, -64 + .cfi_adjust_cfa_offset 64 + sw $ra, 60($sp) + .cfi_rel_offset 31, 60 + sw $s8, 56($sp) + .cfi_rel_offset 30, 56 + sw $gp, 52($sp) + .cfi_rel_offset 28, 52 + sw $s7, 48($sp) + .cfi_rel_offset 23, 48 + sw $s6, 44($sp) + .cfi_rel_offset 22, 44 + sw $s5, 40($sp) + .cfi_rel_offset 21, 40 + sw $s4, 36($sp) + .cfi_rel_offset 20, 36 + sw $s3, 32($sp) + .cfi_rel_offset 19, 32 + sw $s2, 28($sp) + .cfi_rel_offset 18, 28 + sw $a3, 12($sp) + .cfi_rel_offset 7, 12 + sw $a2, 8($sp) + .cfi_rel_offset 6, 8 + sw $a1, 4($sp) + .cfi_rel_offset 5, 4 + # Begin argument set up. + sw $a0, 0($sp) # place proxy method at bottom of frame + move $a2, rSELF # pass Thread::Current + jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) + move $a3, $sp # pass $sp + lw $ra, 60($sp) # restore $ra + jr $ra + addiu $sp, $sp, 64 # pop frame + .cfi_adjust_cfa_offset -64 +END art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. + */ + .extern artThrowAbstractMethodErrorFromCode +ENTRY art_portable_abstract_method_error_stub + GENERATE_GLOBAL_POINTER + la $t9, artThrowAbstractMethodErrorFromCode + jr $t9 # (Method*, Thread*, SP) + move $a1, $s1 # pass Thread::Current +END art_portable_abstract_method_error_stub diff --git a/runtime/arch/mips/quick_entrypoints_init_mips.cc b/runtime/arch/mips/quick_entrypoints_init_mips.cc deleted file mode 100644 index d494c65615..0000000000 --- a/runtime/arch/mips/quick_entrypoints_init_mips.cc +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Exception entrypoints. -extern "C" void* GetAndClearException(Thread*); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern int32_t CmpgDouble(double a, double b); -extern int32_t CmplDouble(double a, double b); -extern int32_t CmpgFloat(float a, float b); -extern int32_t CmplFloat(float a, float b); -extern "C" int64_t artLmulFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivFromCode(int64_t a, int64_t b); -extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b); - -// Math conversions. -extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT -extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT -extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT -extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE -extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG -extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG - -// Single-precision FP arithmetics. -extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR] - -// Double-precision FP arithmetics. -extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR] - -// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR] -extern "C" int64_t __divdi3(int64_t, int64_t); -extern "C" int64_t __moddi3(int64_t, int64_t); -extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t); -extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t __memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - points->pCmpgDouble = CmpgDouble; - points->pCmpgFloat = CmpgFloat; - points->pCmplDouble = CmplDouble; - points->pCmplFloat = CmplFloat; - points->pFmod = fmod; - points->pL2d = __floatdidf; - points->pFmodf = fmodf; - points->pL2f = __floatdisf; - points->pD2iz = __fixdfsi; - points->pF2iz = __fixsfsi; - points->pIdivmod = NULL; - points->pD2l = art_d2l; - points->pF2l = art_f2l; - points->pLdiv = artLdivFromCode; - points->pLdivmod = artLdivmodFromCode; - points->pLmul = artLmulFromCode; - points->pShlLong = art_quick_shl_long; - points->pShrLong = art_quick_shr_long; - points->pUshrLong = art_quick_ushr_long; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = __memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 45d583e097..d32a2b4a15 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "asm_support.h" +#include "asm_support_mips.S" .set noreorder .balign 4 @@ -24,25 +24,6 @@ /* Deliver an exception pending on a thread */ .extern artDeliverPendingExceptionFromCode - /* Cache alignment for function entry */ -.macro ENTRY name - .type \name, %function - .global \name - .balign 16 -\name: - .cfi_startproc -.endm - -.macro END name - .cfi_endproc - .size \name, .-\name -.endm - - /* Generates $gp for function calls */ -.macro GENERATE_GLOBAL_POINTER - .cpload $t9 -.endm - /* * Macro that sets up the callee save frame to conform with * Runtime::CreateCalleeSaveMethod(kSaveAll) @@ -480,39 +461,6 @@ ENTRY art_quick_invoke_stub END art_quick_invoke_stub .size art_portable_invoke_stub, .-art_portable_invoke_stub - /* - * Entry point of native methods when JNI bug compatibility is enabled. - */ - .extern artWorkAroundAppJniBugs -ENTRY art_quick_work_around_app_jni_bugs - GENERATE_GLOBAL_POINTER - # save registers that may contain arguments and LR that will be crushed by a call - addiu $sp, $sp, -32 - .cfi_adjust_cfa_offset 32 - sw $ra, 28($sp) - .cfi_rel_offset 31, 28 - sw $a3, 24($sp) - .cfi_rel_offset 7, 28 - sw $a2, 20($sp) - .cfi_rel_offset 6, 28 - sw $a1, 16($sp) - .cfi_rel_offset 5, 28 - sw $a0, 12($sp) - .cfi_rel_offset 4, 28 - move $a0, rSELF # pass Thread::Current - jal artWorkAroundAppJniBugs # (Thread*, $sp) - move $a1, $sp # pass $sp - move $t9, $v0 # save target address - lw $a0, 12($sp) - lw $a1, 16($sp) - lw $a2, 20($sp) - lw $a3, 24($sp) - lw $ra, 28($sp) - jr $t9 # tail call into JNI routine - addiu $sp, $sp, 32 - .cfi_adjust_cfa_offset -32 -END art_quick_work_around_app_jni_bugs - /* * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on * failure. @@ -912,20 +860,6 @@ ENTRY art_quick_test_suspend RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN END art_quick_test_suspend - .extern artPortableProxyInvokeHandler -ENTRY art_portable_proxy_invoke_handler - GENERATE_GLOBAL_POINTER - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME - sw $a0, 0($sp) # place proxy method at bottom of frame - move $a2, rSELF # pass Thread::Current - jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP) - move $a3, $sp # pass $sp - lw $ra, 60($sp) # restore $ra - jr $ra - addiu $sp, $sp, 64 # pop frame - .cfi_adjust_cfa_offset -64 -END art_portable_proxy_invoke_handler - /* * Called by managed code that is attempting to call a method on a proxy class. On entry * r0 holds the proxy method; r1, r2 and r3 may contain arguments. @@ -1043,17 +977,6 @@ ENTRY art_quick_deoptimize move $a1, $sp # pass $sp END art_quick_deoptimize - /* - * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable. - */ - .extern artThrowAbstractMethodErrorFromCode -ENTRY art_portable_abstract_method_error_stub - GENERATE_GLOBAL_POINTER - la $t9, artThrowAbstractMethodErrorFromCode - jr $t9 # (Method*, Thread*, SP) - move $a1, $s1 # pass Thread::Current -END art_portable_abstract_method_error_stub - /* * Quick abstract method error stub. $a0 contains method* on entry. */ @@ -1066,42 +989,6 @@ ENTRY art_quick_abstract_method_error_stub move $a2, $sp # pass SP END art_quick_abstract_method_error_stub - /* - * Jni dlsym lookup stub. - */ - .extern artFindNativeMethod -ENTRY art_jni_dlsym_lookup_stub - GENERATE_GLOBAL_POINTER - addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra - .cfi_adjust_cfa_offset 32 - sw $ra, 16($sp) - .cfi_rel_offset 31, 16 - sw $a3, 12($sp) - .cfi_rel_offset 7, 12 - sw $a2, 8($sp) - .cfi_rel_offset 6, 8 - sw $a1, 4($sp) - .cfi_rel_offset 5, 4 - sw $a0, 0($sp) - .cfi_rel_offset 4, 0 - jal artFindNativeMethod # (Thread*) - move $a0, $s1 # pass Thread::Current() - lw $a0, 0($sp) # restore registers from stack - lw $a1, 4($sp) - lw $a2, 8($sp) - lw $a3, 12($sp) - lw $ra, 16($sp) - beq $v0, $zero, no_native_code_found - addiu $sp, $sp, 32 # restore the stack - .cfi_adjust_cfa_offset -32 - move $t9, $v0 # put method code result in $t9 - jr $t9 # leaf call to method's code - nop -no_native_code_found: - jr $ra - nop -END art_jni_dlsym_lookup_stub - /* * Long integer shift. This is different from the generic 32/64-bit * binary operations because vAA/vBB are 64-bit but vCC (the shift diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc new file mode 100644 index 0000000000..7364de067e --- /dev/null +++ b/runtime/arch/mips/thread_mips.cc @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include "asm_support_mips.h" +#include "base/logging.h" + +namespace art { + +void Thread::InitCpu() { + CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S new file mode 100644 index 0000000000..7e6dce9c6a --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.S @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ + +#include "asm_support_x86.h" + +#if defined(__APPLE__) + // Mac OS' as(1) doesn't let you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name + #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name + #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name + #define END_MACRO .endmacro + + // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names + // are mangled with an extra underscore prefix. The use of $x for arguments + // mean that literals need to be represented with $$x in macros. + #define SYMBOL(name) _ ## name + #define VAR(name,index) SYMBOL($index) + #define REG_VAR(name,index) %$index + #define CALL_MACRO(name,index) $index + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $$value +#else + // Regular gas(1) lets you name macro parameters. + #define MACRO0(macro_name) .macro macro_name + #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 + #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 + #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 + #define END_MACRO .endm + + // Regular gas(1) uses \argument_name for macro arguments. + // We need to turn on alternate macro syntax so we can use & instead or the preprocessor + // will screw us by inserting a space between the \ and the name. Even in this mode there's + // no special meaning to $, so literals are still just $x. The use of altmacro means % is a + // special character meaning care needs to be taken when passing registers as macro arguments. + .altmacro + #define SYMBOL(name) name + #define VAR(name,index) name& + #define REG_VAR(name,index) %name + #define CALL_MACRO(name,index) name& + #define LITERAL(value) $value + #define MACRO_LITERAL(value) $value +#endif + + /* Cache alignment for function entry */ +MACRO0(ALIGN_FUNCTION_ENTRY) + .balign 16 +END_MACRO + +MACRO1(DEFINE_FUNCTION, c_name) + .type VAR(c_name, 0), @function + .globl VAR(c_name, 0) + ALIGN_FUNCTION_ENTRY +VAR(c_name, 0): + .cfi_startproc +END_MACRO + +MACRO1(END_FUNCTION, c_name) + .cfi_endproc + .size \c_name, .-\c_name +END_MACRO + +MACRO1(PUSH, reg) + pushl REG_VAR(reg, 0) + .cfi_adjust_cfa_offset 4 + .cfi_rel_offset REG_VAR(reg, 0), 0 +END_MACRO + +MACRO1(POP, reg) + popl REG_VAR(reg,0) + .cfi_adjust_cfa_offset -4 + .cfi_restore REG_VAR(reg,0) +END_MACRO + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_ diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h new file mode 100644 index 0000000000..1092910d78 --- /dev/null +++ b/runtime/arch/x86/asm_support_x86.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ +#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ + +#include "asm_support.h" + +// Offset of field Thread::self_ verified in InitCpu +#define THREAD_SELF_OFFSET 40 +// Offset of field Thread::exception_ verified in InitCpu +#define THREAD_EXCEPTION_OFFSET 12 + +#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_ diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc new file mode 100644 index 0000000000..d47dfef047 --- /dev/null +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" +#include "entrypoints/entrypoint_utils.h" + +namespace art { + +// Alloc entrypoints. +extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); +extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); +extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); +extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); +extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); + +// Cast entrypoints. +extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, + const mirror::Class* ref_class); +extern "C" void art_quick_can_put_array_element_from_code(void*, void*); +extern "C" void art_quick_check_cast_from_code(void*, void*); + +// DexCache entrypoints. +extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); +extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); +extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); + +// Field entrypoints. +extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); +extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); +extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); +extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); +extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); +extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); +extern "C" int32_t art_quick_get32_static_from_code(uint32_t); +extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); +extern "C" int64_t art_quick_get64_static_from_code(uint32_t); +extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); +extern "C" void* art_quick_get_obj_static_from_code(uint32_t); + +// FillArray entrypoint. +extern "C" void art_quick_handle_fill_data_from_code(void*, void*); + +// Lock entrypoints. +extern "C" void art_quick_lock_object_from_code(void*); +extern "C" void art_quick_unlock_object_from_code(void*); + +// Math entrypoints. +extern "C" double art_quick_fmod_from_code(double, double); +extern "C" float art_quick_fmodf_from_code(float, float); +extern "C" double art_quick_l2d_from_code(int64_t); +extern "C" float art_quick_l2f_from_code(int64_t); +extern "C" int64_t art_quick_d2l_from_code(double); +extern "C" int64_t art_quick_f2l_from_code(float); +extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); +extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); +extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); +extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); +extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); + +// Interpreter entrypoints. +extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); +extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, + const DexFile::CodeItem* code_item, + ShadowFrame* shadow_frame, JValue* result); + +// Intrinsic entrypoints. +extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); +extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); +extern "C" int32_t art_quick_string_compareto(void*, void*); +extern "C" void* art_quick_memcpy(void*, const void*, size_t); + +// Invoke entrypoints. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** sp, Thread* thread); +extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); +extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); +extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); + +// Thread entrypoints. +extern void CheckSuspendFromCode(Thread* thread); +extern "C" void art_quick_test_suspend(); + +// Throw entrypoints. +extern "C" void art_quick_deliver_exception_from_code(void*); +extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); +extern "C" void art_quick_throw_div_zero_from_code(); +extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); +extern "C" void art_quick_throw_null_pointer_exception_from_code(); +extern "C" void art_quick_throw_stack_overflow_from_code(void*); + +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) { + // Alloc + qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code; + qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; + qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code; + qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; + qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; + qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; + + // Cast + qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; + qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; + qpoints->pCheckCastFromCode = art_quick_check_cast_from_code; + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; + qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; + qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code; + qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code; + + // Field + qpoints->pSet32Instance = art_quick_set32_instance_from_code; + qpoints->pSet32Static = art_quick_set32_static_from_code; + qpoints->pSet64Instance = art_quick_set64_instance_from_code; + qpoints->pSet64Static = art_quick_set64_static_from_code; + qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code; + qpoints->pSetObjStatic = art_quick_set_obj_static_from_code; + qpoints->pGet32Instance = art_quick_get32_instance_from_code; + qpoints->pGet64Instance = art_quick_get64_instance_from_code; + qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code; + qpoints->pGet32Static = art_quick_get32_static_from_code; + qpoints->pGet64Static = art_quick_get64_static_from_code; + qpoints->pGetObjStatic = art_quick_get_obj_static_from_code; + + // FillArray + qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + + // Locks + qpoints->pLockObjectFromCode = art_quick_lock_object_from_code; + qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code; + + // Math + // points->pCmpgDouble = NULL; // Not needed on x86. + // points->pCmpgFloat = NULL; // Not needed on x86. + // points->pCmplDouble = NULL; // Not needed on x86. + // points->pCmplFloat = NULL; // Not needed on x86. + qpoints->pFmod = art_quick_fmod_from_code; + qpoints->pL2d = art_quick_l2d_from_code; + qpoints->pFmodf = art_quick_fmodf_from_code; + qpoints->pL2f = art_quick_l2f_from_code; + // points->pD2iz = NULL; // Not needed on x86. + // points->pF2iz = NULL; // Not needed on x86. + qpoints->pIdivmod = art_quick_idivmod_from_code; + qpoints->pD2l = art_quick_d2l_from_code; + qpoints->pF2l = art_quick_f2l_from_code; + qpoints->pLdiv = art_quick_ldiv_from_code; + qpoints->pLdivmod = art_quick_ldivmod_from_code; + qpoints->pLmul = art_quick_lmul_from_code; + qpoints->pShlLong = art_quick_lshl_from_code; + qpoints->pShrLong = art_quick_lshr_from_code; + qpoints->pUshrLong = art_quick_lushr_from_code; + + // Interpreter + qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; + qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry; + + // Intrinsics + qpoints->pIndexOf = art_quick_indexof; + qpoints->pMemcmp16 = art_quick_memcmp16; + qpoints->pStringCompareTo = art_quick_string_compareto; + qpoints->pMemcpy = art_quick_memcpy; + + // Invocation + qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; + qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pCheckSuspendFromCode = CheckSuspendFromCode; + qpoints->pTestSuspendFromCode = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception_from_code; + qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; + qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; + qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; + qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; + qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; + + // Portable + ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; +}; + +} // namespace art diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S new file mode 100644 index 0000000000..e9c88fec02 --- /dev/null +++ b/runtime/arch/x86/jni_entrypoints_x86.S @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable resolution trampoline. + */ +DEFINE_FUNCTION art_jni_dlsym_lookup_stub + subl LITERAL(8), %esp // align stack + .cfi_adjust_cfa_offset 8 + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + call SYMBOL(artFindNativeMethod) // (Thread*) + addl LITERAL(12), %esp // restore the stack + .cfi_adjust_cfa_offset -12 + cmpl LITERAL(0), %eax // check if returned method code is null + je no_native_code_found // if null, jump to return to handle + jmp *%eax // otherwise, tail call to intended method +no_native_code_found: + ret +END_FUNCTION art_jni_dlsym_lookup_stub diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S new file mode 100644 index 0000000000..a0fca6cee3 --- /dev/null +++ b/runtime/arch/x86/portable_entrypoints_x86.S @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "asm_support_x86.S" + + /* + * Portable invocation stub. + * On entry: + * [sp] = return address + * [sp + 4] = method pointer + * [sp + 8] = argument array or NULL for no argument methods + * [sp + 12] = size of argument array in bytes + * [sp + 16] = (managed) thread pointer + * [sp + 20] = JValue* result + * [sp + 24] = result type char + */ +DEFINE_FUNCTION art_portable_invoke_stub + PUSH ebp // save ebp + PUSH ebx // save ebx + mov %esp, %ebp // copy value of stack pointer into base pointer + .cfi_def_cfa_register ebp + mov 20(%ebp), %ebx // get arg array size + addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame + andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes + subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp + subl %ebx, %esp // reserve stack space for argument array + lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy + pushl 20(%ebp) // push size of region to memcpy + pushl 16(%ebp) // push arg array as source of memcpy + pushl %eax // push stack pointer as destination of memcpy + call SYMBOL(memcpy) // (void*, const void*, size_t) + addl LITERAL(12), %esp // pop arguments to memcpy + mov 12(%ebp), %eax // move method pointer into eax + mov %eax, (%esp) // push method pointer onto stack + call *METHOD_CODE_OFFSET(%eax) // call the method + mov %ebp, %esp // restore stack pointer + POP ebx // pop ebx + POP ebp // pop ebp + mov 20(%esp), %ecx // get result pointer + cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' + je return_double_portable + cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' + je return_float_portable + mov %eax, (%ecx) // store the result + mov %edx, 4(%ecx) // store the other half of the result + ret +return_double_portable: + fstpl (%ecx) // store the floating point result as double + ret +return_float_portable: + fstps (%ecx) // store the floating point result as float + ret +END_FUNCTION art_portable_invoke_stub + +DEFINE_FUNCTION art_portable_proxy_invoke_handler + // Fake callee save ref and args frame set up, note portable doesn't use callee save frames. + // TODO: just save the registers that are needed in artPortableProxyInvokeHandler. + PUSH edi // Save callee saves + PUSH esi + PUSH ebp + PUSH ebx // Save args + PUSH edx + PUSH ecx + PUSH eax // Align stack, eax will be clobbered by Method* + // Begin argument set up. + PUSH esp // pass SP + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + .cfi_adjust_cfa_offset 4 + PUSH ecx // pass receiver + PUSH eax // pass proxy method + call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) + movd %eax, %xmm0 // place return value also into floating point return value + movd %edx, %xmm1 + punpckldq %xmm1, %xmm0 + addl LITERAL(44), %esp // pop arguments + .cfi_adjust_cfa_offset -44 + ret +END_FUNCTION art_portable_proxy_invoke_handler + + /* + * Portable abstract method error stub. method* is at %esp + 4 on entry. + */ +DEFINE_FUNCTION art_portable_abstract_method_error_stub + PUSH ebp + movl %esp, %ebp // Remember SP. + .cfi_def_cfa_register ebp + subl LITERAL(12), %esp // Align stack. + PUSH esp // Pass sp (not used). + pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). + pushl 8(%ebp) // Pass Method*. + call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) + leave // Restore the stack and %ebp. + .cfi_def_cfa esp, 4 + .cfi_restore ebp + ret // Return to caller to handle pending exception. +END_FUNCTION art_portable_abstract_method_error_stub diff --git a/runtime/arch/x86/quick_entrypoints_init_x86.cc b/runtime/arch/x86/quick_entrypoints_init_x86.cc deleted file mode 100644 index cced91619c..0000000000 --- a/runtime/arch/x86/quick_entrypoints_init_x86.cc +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "entrypoints/quick/quick_entrypoints.h" -#include "runtime_support.h" - -namespace art { - -// Alloc entrypoints. -extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); -extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method); -extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method); -extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t); -extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t); - -// Cast entrypoints. -extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass, - const mirror::Class* ref_class); -extern "C" void art_quick_can_put_array_element_from_code(void*, void*); -extern "C" void art_quick_check_cast_from_code(void*, void*); - -// DexCache entrypoints. -extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*); -extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*); -extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t); - -// Field entrypoints. -extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t); -extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t); -extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t); -extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t); -extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*); -extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*); -extern "C" int32_t art_quick_get32_static_from_code(uint32_t); -extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*); -extern "C" int64_t art_quick_get64_static_from_code(uint32_t); -extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*); -extern "C" void* art_quick_get_obj_static_from_code(uint32_t); - -// FillArray entrypoint. -extern "C" void art_quick_handle_fill_data_from_code(void*, void*); - -// Lock entrypoints. -extern "C" void art_quick_lock_object_from_code(void*); -extern "C" void art_quick_unlock_object_from_code(void*); - -// Math entrypoints. -extern "C" double art_quick_fmod_from_code(double, double); -extern "C" float art_quick_fmodf_from_code(float, float); -extern "C" double art_quick_l2d_from_code(int64_t); -extern "C" float art_quick_l2f_from_code(int64_t); -extern "C" int64_t art_quick_d2l_from_code(double); -extern "C" int64_t art_quick_f2l_from_code(float); -extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t); -extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t); -extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t); -extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t); -extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t); - -// Interpreter entrypoints. -extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); -extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh, - const DexFile::CodeItem* code_item, - ShadowFrame* shadow_frame, JValue* result); - -// Intrinsic entrypoints. -extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t); -extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t); -extern "C" int32_t art_quick_string_compareto(void*, void*); -extern "C" void* art_quick_memcpy(void*, const void*, size_t); - -// Invoke entrypoints. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** sp, Thread* thread); -extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*); -extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*); -extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*); - -// Thread entrypoints. -extern void CheckSuspendFromCode(Thread* thread); -extern "C" void art_quick_test_suspend(); - -// Throw entrypoints. -extern "C" void art_quick_deliver_exception_from_code(void*); -extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit); -extern "C" void art_quick_throw_div_zero_from_code(); -extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx); -extern "C" void art_quick_throw_null_pointer_exception_from_code(); -extern "C" void art_quick_throw_stack_overflow_from_code(void*); - -void InitEntryPoints(QuickEntryPoints* points) { - // Alloc - points->pAllocArrayFromCode = art_quick_alloc_array_from_code; - points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check; - points->pAllocObjectFromCode = art_quick_alloc_object_from_code; - points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check; - points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code; - points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check; - - // Cast - points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code; - points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code; - points->pCheckCastFromCode = art_quick_check_cast_from_code; - - // DexCache - points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code; - points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code; - points->pInitializeTypeFromCode = art_quick_initialize_type_from_code; - points->pResolveStringFromCode = art_quick_resolve_string_from_code; - - // Field - points->pSet32Instance = art_quick_set32_instance_from_code; - points->pSet32Static = art_quick_set32_static_from_code; - points->pSet64Instance = art_quick_set64_instance_from_code; - points->pSet64Static = art_quick_set64_static_from_code; - points->pSetObjInstance = art_quick_set_obj_instance_from_code; - points->pSetObjStatic = art_quick_set_obj_static_from_code; - points->pGet32Instance = art_quick_get32_instance_from_code; - points->pGet64Instance = art_quick_get64_instance_from_code; - points->pGetObjInstance = art_quick_get_obj_instance_from_code; - points->pGet32Static = art_quick_get32_static_from_code; - points->pGet64Static = art_quick_get64_static_from_code; - points->pGetObjStatic = art_quick_get_obj_static_from_code; - - // FillArray - points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code; - - // JNI - points->pJniMethodStart = JniMethodStart; - points->pJniMethodStartSynchronized = JniMethodStartSynchronized; - points->pJniMethodEnd = JniMethodEnd; - points->pJniMethodEndSynchronized = JniMethodEndSynchronized; - points->pJniMethodEndWithReference = JniMethodEndWithReference; - points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - - // Locks - points->pLockObjectFromCode = art_quick_lock_object_from_code; - points->pUnlockObjectFromCode = art_quick_unlock_object_from_code; - - // Math - // points->pCmpgDouble = NULL; // Not needed on x86. - // points->pCmpgFloat = NULL; // Not needed on x86. - // points->pCmplDouble = NULL; // Not needed on x86. - // points->pCmplFloat = NULL; // Not needed on x86. - points->pFmod = art_quick_fmod_from_code; - points->pL2d = art_quick_l2d_from_code; - points->pFmodf = art_quick_fmodf_from_code; - points->pL2f = art_quick_l2f_from_code; - // points->pD2iz = NULL; // Not needed on x86. - // points->pF2iz = NULL; // Not needed on x86. - points->pIdivmod = art_quick_idivmod_from_code; - points->pD2l = art_quick_d2l_from_code; - points->pF2l = art_quick_f2l_from_code; - points->pLdiv = art_quick_ldiv_from_code; - points->pLdivmod = art_quick_ldivmod_from_code; - points->pLmul = art_quick_lmul_from_code; - points->pShlLong = art_quick_lshl_from_code; - points->pShrLong = art_quick_lshr_from_code; - points->pUshrLong = art_quick_lushr_from_code; - - // Interpreter - points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry; - points->pInterpreterToQuickEntry = artInterpreterToQuickEntry; - - // Intrinsics - points->pIndexOf = art_quick_indexof; - points->pMemcmp16 = art_quick_memcmp16; - points->pStringCompareTo = art_quick_string_compareto; - points->pMemcpy = art_quick_memcpy; - - // Invocation - points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline; - points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline; - points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check; - points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline; - points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check; - points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check; - points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check; - points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - points->pCheckSuspendFromCode = CheckSuspendFromCode; - points->pTestSuspendFromCode = art_quick_test_suspend; - - // Throws - points->pDeliverException = art_quick_deliver_exception_from_code; - points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code; - points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code; - points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code; - points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code; - points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code; -}; - -} // namespace art diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index ee6db0c3f8..89ea71a902 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -14,76 +14,7 @@ * limitations under the License. */ -#include "asm_support.h" - -#if defined(__APPLE__) - // Mac OS' as(1) doesn't let you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name - #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name - #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name - #define END_MACRO .endmacro - - // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names - // are mangled with an extra underscore prefix. The use of $x for arguments - // mean that literals need to be represented with $$x in macros. - #define SYMBOL(name) _ ## name - #define VAR(name,index) SYMBOL($index) - #define REG_VAR(name,index) %$index - #define CALL_MACRO(name,index) $index - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $$value -#else - // Regular gas(1) lets you name macro parameters. - #define MACRO0(macro_name) .macro macro_name - #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1 - #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2 - #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3 - #define END_MACRO .endm - - // Regular gas(1) uses \argument_name for macro arguments. - // We need to turn on alternate macro syntax so we can use & instead or the preprocessor - // will screw us by inserting a space between the \ and the name. Even in this mode there's - // no special meaning to $, so literals are still just $x. The use of altmacro means % is a - // special character meaning care needs to be taken when passing registers as macro arguments. - .altmacro - #define SYMBOL(name) name - #define VAR(name,index) name& - #define REG_VAR(name,index) %name - #define CALL_MACRO(name,index) name& - #define LITERAL(value) $value - #define MACRO_LITERAL(value) $value -#endif - - /* Cache alignment for function entry */ -MACRO0(ALIGN_FUNCTION_ENTRY) - .balign 16 -END_MACRO - -MACRO1(DEFINE_FUNCTION, c_name) - .type VAR(c_name, 0), @function - .globl VAR(c_name, 0) - ALIGN_FUNCTION_ENTRY -VAR(c_name, 0): - .cfi_startproc -END_MACRO - -MACRO1(END_FUNCTION, c_name) - .cfi_endproc - .size \c_name, .-\c_name -END_MACRO - -MACRO1(PUSH, reg) - pushl REG_VAR(reg, 0) - .cfi_adjust_cfa_offset 4 - .cfi_rel_offset REG_VAR(reg, 0), 0 -END_MACRO - -MACRO1(POP, reg) - popl REG_VAR(reg,0) - .cfi_adjust_cfa_offset -4 - .cfi_restore REG_VAR(reg,0) -END_MACRO +#include "asm_support_x86.S" /* * Macro that sets up the callee save frame to conform with @@ -301,55 +232,6 @@ INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvok INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck - /* - * Portable invocation stub. - * On entry: - * [sp] = return address - * [sp + 4] = method pointer - * [sp + 8] = argument array or NULL for no argument methods - * [sp + 12] = size of argument array in bytes - * [sp + 16] = (managed) thread pointer - * [sp + 20] = JValue* result - * [sp + 24] = result type char - */ -DEFINE_FUNCTION art_portable_invoke_stub - PUSH ebp // save ebp - PUSH ebx // save ebx - mov %esp, %ebp // copy value of stack pointer into base pointer - .cfi_def_cfa_register ebp - mov 20(%ebp), %ebx // get arg array size - addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame - andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes - subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp - subl %ebx, %esp // reserve stack space for argument array - lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy - pushl 20(%ebp) // push size of region to memcpy - pushl 16(%ebp) // push arg array as source of memcpy - pushl %eax // push stack pointer as destination of memcpy - call SYMBOL(memcpy) // (void*, const void*, size_t) - addl LITERAL(12), %esp // pop arguments to memcpy - mov 12(%ebp), %eax // move method pointer into eax - mov %eax, (%esp) // push method pointer onto stack - call *METHOD_CODE_OFFSET(%eax) // call the method - mov %ebp, %esp // restore stack pointer - POP ebx // pop ebx - POP ebp // pop ebp - mov 20(%esp), %ecx // get result pointer - cmpl LITERAL(68), 24(%esp) // test if result type char == 'D' - je return_double_portable - cmpl LITERAL(70), 24(%esp) // test if result type char == 'F' - je return_float_portable - mov %eax, (%ecx) // store the result - mov %edx, 4(%ecx) // store the other half of the result - ret -return_double_portable: - fstpl (%ecx) // store the floating point result as double - ret -return_float_portable: - fstps (%ecx) // store the floating point result as float - ret -END_FUNCTION art_portable_invoke_stub - /* * Quick invocation stub. * On entry: @@ -920,22 +802,6 @@ DEFINE_FUNCTION art_quick_get_obj_static_from_code RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception END_FUNCTION art_quick_get_obj_static_from_code -DEFINE_FUNCTION art_portable_proxy_invoke_handler - SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* - PUSH esp // pass SP - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - PUSH ecx // pass receiver - PUSH eax // pass proxy method - call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP) - movd %eax, %xmm0 // place return value also into floating point return value - movd %edx, %xmm1 - punpckldq %xmm1, %xmm0 - addl LITERAL(44), %esp // pop arguments - .cfi_adjust_cfa_offset -44 - ret -END_FUNCTION art_portable_proxy_invoke_handler - DEFINE_FUNCTION art_quick_proxy_invoke_handler SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method* PUSH esp // pass SP @@ -1053,24 +919,6 @@ DEFINE_FUNCTION art_quick_deoptimize int3 // Unreachable. END_FUNCTION art_quick_deoptimize - /* - * Portable abstract method error stub. method* is at %esp + 4 on entry. - */ -DEFINE_FUNCTION art_portable_abstract_method_error_stub - PUSH ebp - movl %esp, %ebp // Remember SP. - .cfi_def_cfa_register ebp - subl LITERAL(12), %esp // Align stack. - PUSH esp // Pass sp (not used). - pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current(). - pushl 8(%ebp) // Pass Method*. - call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP) - leave // Restore the stack and %ebp. - .cfi_def_cfa esp, 4 - .cfi_restore ebp - ret // Return to caller to handle pending exception. -END_FUNCTION art_portable_abstract_method_error_stub - /* * Quick abstract method error stub. %eax contains method* on entry. */ @@ -1086,24 +934,6 @@ DEFINE_FUNCTION art_quick_abstract_method_error_stub int3 // Unreachable. END_FUNCTION art_quick_abstract_method_error_stub - /* - * Portable resolution trampoline. - */ -DEFINE_FUNCTION art_jni_dlsym_lookup_stub - subl LITERAL(8), %esp // align stack - .cfi_adjust_cfa_offset 8 - pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() - .cfi_adjust_cfa_offset 4 - call SYMBOL(artFindNativeMethod) // (Thread*) - addl LITERAL(12), %esp // restore the stack - .cfi_adjust_cfa_offset -12 - cmpl LITERAL(0), %eax // check if returned method code is null - je no_native_code_found // if null, jump to return to handle - jmp *%eax // otherwise, tail call to intended method -no_native_code_found: - ret -END_FUNCTION art_jni_dlsym_lookup_stub - /* * String's indexOf. * diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc new file mode 100644 index 0000000000..dd3e7dd137 --- /dev/null +++ b/runtime/arch/x86/thread_x86.cc @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "thread.h" + +#include +#include + +#include "asm_support_x86.h" +#include "base/macros.h" +#include "thread.h" +#include "thread_list.h" + +#if defined(__APPLE__) +#include +#include +struct descriptor_table_entry_t { + uint16_t limit0; + uint16_t base0; + unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; + unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; +} __attribute__((packed)); +#define MODIFY_LDT_CONTENTS_DATA 0 +#else +#include +#endif + +namespace art { + +void Thread::InitCpu() { + static Mutex modify_ldt_lock("modify_ldt lock"); + MutexLock mu(Thread::Current(), modify_ldt_lock); + + const uintptr_t base = reinterpret_cast(this); + const size_t limit = kPageSize; + + const int contents = MODIFY_LDT_CONTENTS_DATA; + const int seg_32bit = 1; + const int read_exec_only = 0; + const int limit_in_pages = 0; + const int seg_not_present = 0; + const int useable = 1; + + int entry_number = -1; + +#if defined(__APPLE__) + descriptor_table_entry_t entry; + memset(&entry, 0, sizeof(entry)); + entry.limit0 = (limit & 0x0ffff); + entry.limit = (limit & 0xf0000) >> 16; + entry.base0 = (base & 0x0000ffff); + entry.base1 = (base & 0x00ff0000) >> 16; + entry.base2 = (base & 0xff000000) >> 24; + entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); + entry.s = 1; + entry.dpl = 0x3; + entry.p = seg_not_present ^ 1; + entry.avl = useable; + entry.l = 0; + entry.d = seg_32bit; + entry.g = limit_in_pages; + + entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); + if (entry_number == -1) { + PLOG(FATAL) << "i386_set_ldt failed"; + } +#else + // Read current LDT entries. + CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); + std::vector ldt(LDT_ENTRIES); + size_t ldt_size(sizeof(uint64_t) * ldt.size()); + memset(&ldt[0], 0, ldt_size); + // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? + syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); + + // Find the first empty slot. + for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { + } + if (entry_number >= LDT_ENTRIES) { + LOG(FATAL) << "Failed to find a free LDT slot"; + } + + // Update LDT entry. + user_desc ldt_entry; + memset(&ldt_entry, 0, sizeof(ldt_entry)); + ldt_entry.entry_number = entry_number; + ldt_entry.base_addr = base; + ldt_entry.limit = limit; + ldt_entry.seg_32bit = seg_32bit; + ldt_entry.contents = contents; + ldt_entry.read_exec_only = read_exec_only; + ldt_entry.limit_in_pages = limit_in_pages; + ldt_entry.seg_not_present = seg_not_present; + ldt_entry.useable = useable; + CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); + entry_number = ldt_entry.entry_number; +#endif + + // Change %fs to be new LDT entry. + uint16_t table_indicator = 1 << 2; // LDT + uint16_t rpl = 3; // Requested privilege level + uint16_t selector = (entry_number << 3) | table_indicator | rpl; + // TODO: use our assembler to generate code + __asm__ __volatile__("movw %w0, %%fs" + : // output + : "q"(selector) // input + :); // clobber + + // Allow easy indirection back to Thread*. + self_ = this; + + // Sanity check that reads from %fs point to this Thread*. + Thread* self_check; + // TODO: use our assembler to generate code + CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); + __asm__ __volatile__("movl %%fs:(%1), %0" + : "=r"(self_check) // output + : "r"(THREAD_SELF_OFFSET) // input + :); // clobber + CHECK_EQ(self_check, this); + + // Sanity check other offsets. + CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); +} + +} // namespace art diff --git a/runtime/asm_support.h b/runtime/asm_support.h index 7b20c7aee0..aca93a5552 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -30,29 +30,4 @@ // Offset of field Method::entry_point_from_compiled_code_ #define METHOD_CODE_OFFSET 40 -#if defined(__arm__) -// Register holding suspend check count down. -#define rSUSPEND r4 -// Register holding Thread::Current(). -#define rSELF r9 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__mips__) -// Register holding suspend check count down. -#define rSUSPEND $s0 -// Register holding Thread::Current(). -#define rSELF $s1 -// Offset of field Thread::suspend_count_ verified in InitCpu -#define THREAD_FLAGS_OFFSET 0 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#elif defined(__i386__) -// Offset of field Thread::self_ verified in InitCpu -#define THREAD_SELF_OFFSET 40 -// Offset of field Thread::exception_ verified in InitCpu -#define THREAD_EXCEPTION_OFFSET 12 -#endif - #endif // ART_RUNTIME_ASM_SUPPORT_H_ diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 72e0f48b3d..84f186d4b3 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -58,10 +58,7 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" -#if defined(ART_USE_PORTABLE_COMPILER) -#include "runtime_support_llvm.h" -#endif +#include "entrypoints/entrypoint_utils.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "sirt_ref.h" diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 75886cf7f0..4659fd1982 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -22,6 +22,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "mirror/class-inl.h" #include "mirror/dex_cache.h" @@ -32,7 +33,6 @@ #include "mirror/object_array-inl.h" #include "mirror/proxy.h" #include "mirror/stack_trace_element.h" -#include "runtime_support.h" #include "sirt_ref.h" using ::art::mirror::AbstractMethod; diff --git a/runtime/common_test.h b/runtime/common_test.h index 2c233401d2..7ee6fe20b2 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -31,6 +31,7 @@ #include "class_linker.h" #include "compiler/driver/compiler_driver.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/heap.h" #include "gtest/gtest.h" #include "instruction_set.h" @@ -39,7 +40,6 @@ #include "object_utils.h" #include "os.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "thread.h" diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc new file mode 100644 index 0000000000..c29784151c --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" + +#include "class_linker-inl.h" +#include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/class-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" +#include "mirror/object_array-inl.h" +#include "mirror/proxy.h" +#include "reflection.h" +#include "scoped_thread_state_change.h" +#include "ScopedLocalRef.h" +#include "well_known_classes.h" + +namespace art { + +// Helper function to allocate array for FILLED_NEW_ARRAY. +mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, + int32_t component_count, Thread* self, + bool access_check) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); + if (klass == NULL) { // Error + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { + if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { + ThrowRuntimeException("Bad filled array request for type %s", + PrettyDescriptor(klass).c_str()); + } else { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", + "Found type %s; filled-new-array not implemented for anything but \'int\'", + PrettyDescriptor(klass).c_str()); + } + return NULL; // Failure + } else { + if (access_check) { + mirror::Class* referrer_klass = referrer->GetDeclaringClass(); + if (UNLIKELY(!referrer_klass->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer_klass, klass); + return NULL; // Failure + } + } + DCHECK(klass->IsArrayClass()) << PrettyClass(klass); + return mirror::Array::Alloc(self, klass, component_count); + } +} + +mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) { + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. + default: is_primitive = true; is_set = true; is_static = true; break; + } + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); + if (UNLIKELY(resolved_field == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + if (access_check) { + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + // The referring class can't access the resolved field, this may occur as a result of a + // protected field being made public by a sub-class. Resort to the dex file to determine + // the correct class for the access check. + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + fields_class = class_linker->ResolveType(dex_file, + dex_file.GetFieldId(field_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(fields_class))) { + ThrowIllegalAccessErrorClass(referring_class, fields_class); + return NULL; // failure + } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()))) { + ThrowIllegalAccessErrorField(referring_class, resolved_field); + return NULL; // failure + } + } + if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { + ThrowIllegalAccessErrorFinalField(referrer, resolved_field); + return NULL; // failure + } else { + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(throw_location.GetMethod() == referrer); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", + "Attempted read of %zd-bit %s on field '%s'", + expected_size * (32 / sizeof(int32_t)), + is_primitive ? "primitive" : "non-primitive", + PrettyField(resolved_field, true).c_str()); + return NULL; // failure + } + } + } + if (!is_static) { + // instance fields must be being accessed on an initialized class + return resolved_field; + } else { + // If the class is initialized we're done. + if (fields_class->IsInitialized()) { + return resolved_field; + } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { + // Otherwise let's ensure the class is initialized before resolving the field. + return resolved_field; + } else { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind + return NULL; // failure + } + } +} + +// Slow path method resolution +mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + bool is_direct = type == kStatic || type == kDirect; + mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); + if (UNLIKELY(resolved_method == NULL)) { + DCHECK(self->IsExceptionPending()); // Throw exception and unwind. + return NULL; // Failure. + } else if (UNLIKELY(this_object == NULL && type != kStatic)) { + // Maintain interpreter-like semantics where NullPointerException is thrown + // after potential NoSuchMethodError from class linker. + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + DCHECK(referrer == throw_location.GetMethod()); + ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); + return NULL; // Failure. + } else { + if (!access_check) { + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); + } else { + vtable = this_object->GetClass()->GetVTable(); + } + // TODO: eliminate bounds check? + return vtable->Get(vtable_index); + } + } else { + // Incompatible class change should have been handled in resolve method. + if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { + ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, + referrer); + return NULL; // Failure. + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // The referring class can't access the resolved method, this may occur as a result of a + // protected method being made public by implementing an interface that re-declares the + // method public. Resort to the dex file to determine the correct class for the access check + const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); + methods_class = class_linker->ResolveType(dex_file, + dex_file.GetMethodId(method_idx).class_idx_, + referring_class); + if (UNLIKELY(!referring_class->CanAccess(methods_class))) { + ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, + referrer, resolved_method, type); + return NULL; // Failure. + } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + ThrowIllegalAccessErrorMethod(referring_class, resolved_method); + return NULL; // Failure. + } + } + if (is_direct) { + return resolved_method; + } else if (type == kInterface) { + mirror::AbstractMethod* interface_method = + this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + if (UNLIKELY(interface_method == NULL)) { + ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, + referrer); + return NULL; // Failure. + } else { + return interface_method; + } + } else { + mirror::ObjectArray* vtable; + uint16_t vtable_index = resolved_method->GetMethodIndex(); + if (type == kSuper) { + mirror::Class* super_class = referring_class->GetSuperClass(); + if (LIKELY(super_class != NULL)) { + vtable = referring_class->GetSuperClass()->GetVTable(); + } else { + vtable = NULL; + } + } else { + vtable = this_object->GetClass()->GetVTable(); + } + if (LIKELY(vtable != NULL && + vtable_index < static_cast(vtable->GetLength()))) { + return vtable->GetWithoutChecks(vtable_index); + } else { + // Behavior to agree with that of the verifier. + MethodHelper mh(resolved_method); + ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), + mh.GetSignature()); + return NULL; // Failure. + } + } + } + } +} + +void ThrowStackOverflowError(Thread* self) { + CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; + + if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { + // Remove extra entry pushed onto second stack during method tracing. + Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); + } + + self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. + JNIEnvExt* env = self->GetJniEnv(); + std::string msg("stack size "); + msg += PrettySize(self->GetStackSize()); + // Use low-level JNI routine and pre-baked error class to avoid class linking operations that + // would consume more stack. + int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, + msg.c_str(), NULL); + if (rc != JNI_OK) { + // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME + // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError + // instead. + LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; + CHECK(self->IsExceptionPending()); + } + self->ResetDefaultStackEnd(); // Return to default stack size. +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) { + DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); + + // Build argument array possibly triggering GC. + soa.Self()->AssertThreadSuspensionIsAllowable(); + jobjectArray args_jobj = NULL; + const JValue zero; + if (args.size() > 0) { + args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); + if (args_jobj == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + for (size_t i = 0; i < args.size(); ++i) { + if (shorty[i + 1] == 'L') { + jobject val = args.at(i).l; + soa.Env()->SetObjectArrayElement(args_jobj, i, val); + } else { + JValue jv; + jv.SetJ(args.at(i).j); + mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); + if (val == NULL) { + CHECK(soa.Self()->IsExceptionPending()); + return zero; + } + soa.Decode* >(args_jobj)->Set(i, val); + } + } + } + + // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). + jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, + WellKnownClasses::java_lang_reflect_Proxy_h); + jvalue invocation_args[3]; + invocation_args[0].l = rcvr_jobj; + invocation_args[1].l = interface_method_jobj; + invocation_args[2].l = args_jobj; + jobject result = + soa.Env()->CallObjectMethodA(inv_hand, + WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, + invocation_args); + + // Unbox result and handle error conditions. + if (LIKELY(!soa.Self()->IsExceptionPending())) { + if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { + // Do nothing. + return zero; + } else { + mirror::Object* result_ref = soa.Decode(result); + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); + mirror::AbstractMethod* proxy_method; + if (interface_method->GetDeclaringClass()->IsInterface()) { + proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + } else { + // Proxy dispatch to a method defined in Object. + DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); + proxy_method = interface_method; + } + ThrowLocation throw_location(rcvr, proxy_method, -1); + JValue result_unboxed; + if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { + DCHECK(soa.Self()->IsExceptionPending()); + return zero; + } + return result_unboxed; + } + } else { + // In the case of checked exceptions that aren't declared, the exception must be wrapped by + // a UndeclaredThrowableException. + mirror::Throwable* exception = soa.Self()->GetException(NULL); + if (exception->IsCheckedException()) { + mirror::Object* rcvr = soa.Decode(rcvr_jobj); + mirror::SynthesizedProxyClass* proxy_class = + down_cast(rcvr->GetClass()); + mirror::AbstractMethod* interface_method = + soa.Decode(interface_method_jobj); + mirror::AbstractMethod* proxy_method = + rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); + int throws_index = -1; + size_t num_virt_methods = proxy_class->NumVirtualMethods(); + for (size_t i = 0; i < num_virt_methods; i++) { + if (proxy_class->GetVirtualMethod(i) == proxy_method) { + throws_index = i; + break; + } + } + CHECK_NE(throws_index, -1); + mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); + mirror::Class* exception_class = exception->GetClass(); + bool declares_exception = false; + for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { + mirror::Class* declared_exception = declared_exceptions->Get(i); + declares_exception = declared_exception->IsAssignableFrom(exception_class); + } + if (!declares_exception) { + ThrowLocation throw_location(rcvr, proxy_method, -1); + soa.Self()->ThrowNewWrappedException(throw_location, + "Ljava/lang/reflect/UndeclaredThrowableException;", + NULL); + } + } + return zero; + } +} + +} // namespace art diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h new file mode 100644 index 0000000000..3f28b5e41f --- /dev/null +++ b/runtime/entrypoints/entrypoint_utils.h @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ +#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ + +#include "class_linker.h" +#include "common_throws.h" +#include "dex_file.h" +#include "indirect_reference_table.h" +#include "invoke_type.h" +#include "jni_internal.h" +#include "mirror/abstract_method.h" +#include "mirror/array.h" +#include "mirror/class-inl.h" +#include "mirror/throwable.h" +#include "object_utils.h" +#include "thread.h" + +extern "C" void art_interpreter_invoke_handler(); +extern "C" void art_jni_dlsym_lookup_stub(); +extern "C" void art_portable_abstract_method_error_stub(); +extern "C" void art_portable_proxy_invoke_handler(); +extern "C" void art_quick_abstract_method_error_stub(); +extern "C" void art_quick_deoptimize(); +extern "C" void art_quick_instrumentation_entry_from_code(void*); +extern "C" void art_quick_instrumentation_exit_from_code(); +extern "C" void art_quick_interpreter_entry(void*); +extern "C" void art_quick_proxy_invoke_handler(); +extern "C" void art_work_around_app_jni_bugs(); + +namespace art { +namespace mirror { +class Class; +class Field; +class Object; +} + +// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it +// cannot be resolved, throw an error. If it can, use it to create an instance. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + Thread* self, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(klass == NULL)) { + klass = runtime->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + } + if (access_check) { + if (UNLIKELY(!klass->IsInstantiable())) { + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", + PrettyDescriptor(klass).c_str()); + return NULL; // Failure + } + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + if (!klass->IsInitialized() && + !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { + DCHECK(self->IsExceptionPending()); + return NULL; // Failure + } + return klass->AllocObject(self); +} + +// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If +// it cannot be resolved, throw an error. If it can, use it to create an array. +// When verification/compiler hasn't been able to verify access, optionally perform an access +// check. +static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return NULL; // Failure + } + mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); + if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve + klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); + if (klass == NULL) { // Error + DCHECK(Thread::Current()->IsExceptionPending()); + return NULL; // Failure + } + CHECK(klass->IsArrayClass()) << PrettyClass(klass); + } + if (access_check) { + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return NULL; // Failure + } + } + return mirror::Array::Alloc(self, klass, component_count); +} + +extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, + int32_t component_count, + Thread* self, bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Type of find field operation for fast and slow case. +enum FindFieldType { + InstanceObjectRead, + InstanceObjectWrite, + InstancePrimitiveRead, + InstancePrimitiveWrite, + StaticObjectRead, + StaticObjectWrite, + StaticPrimitiveRead, + StaticPrimitiveWrite, +}; + +// Slow field find that can initialize classes and may throw exceptions. +extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, + Thread* self, FindFieldType type, size_t expected_size, + bool access_check) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Fast path field resolution that can't initialize classes or throw exceptions. +static inline mirror::Field* FindFieldFast(uint32_t field_idx, + const mirror::AbstractMethod* referrer, + FindFieldType type, size_t expected_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* resolved_field = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); + if (UNLIKELY(resolved_field == NULL)) { + return NULL; + } + mirror::Class* fields_class = resolved_field->GetDeclaringClass(); + // Check class is initiliazed or initializing. + if (UNLIKELY(!fields_class->IsInitializing())) { + return NULL; + } + // Check for incompatible class change. + bool is_primitive; + bool is_set; + bool is_static; + switch (type) { + case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; + case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; + case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; + case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; + case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; + case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; + case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; + case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; + default: + LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. + is_primitive = true; + is_set = true; + is_static = true; + break; + } + if (UNLIKELY(resolved_field->IsStatic() != is_static)) { + // Incompatible class change. + return NULL; + } + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(fields_class) || + !referring_class->CanAccessMember(fields_class, + resolved_field->GetAccessFlags()) || + (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { + // Illegal access. + return NULL; + } + FieldHelper fh(resolved_field); + if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || + fh.FieldSize() != expected_size)) { + return NULL; + } + return resolved_field; +} + +// Fast path method resolution that can't throw exceptions. +static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, + mirror::Object* this_object, + const mirror::AbstractMethod* referrer, + bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + bool is_direct = type == kStatic || type == kDirect; + if (UNLIKELY(this_object == NULL && !is_direct)) { + return NULL; + } + mirror::AbstractMethod* resolved_method = + referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); + if (UNLIKELY(resolved_method == NULL)) { + return NULL; + } + if (access_check) { + // Check for incompatible class change errors and access. + bool icce = resolved_method->CheckIncompatibleClassChange(type); + if (UNLIKELY(icce)) { + return NULL; + } + mirror::Class* methods_class = resolved_method->GetDeclaringClass(); + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (UNLIKELY(!referring_class->CanAccess(methods_class) || + !referring_class->CanAccessMember(methods_class, + resolved_method->GetAccessFlags()))) { + // Potential illegal access, may need to refine the method's class. + return NULL; + } + } + if (type == kInterface) { // Most common form of slow path dispatch. + return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); + } else if (is_direct) { + return resolved_method; + } else if (type == kSuper) { + return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> + Get(resolved_method->GetMethodIndex()); + } else { + DCHECK(type == kVirtual); + return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); + } +} + +extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* self, bool access_check, InvokeType type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, + const mirror::AbstractMethod* referrer, + Thread* self, bool can_run_clinit, + bool verify_access) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); + if (UNLIKELY(klass == NULL)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + // Perform access check if necessary. + mirror::Class* referring_class = referrer->GetDeclaringClass(); + if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referring_class, klass); + return NULL; // Failure - Indicate to caller to deliver exception + } + // If we're just implementing const-class, we shouldn't call . + if (!can_run_clinit) { + return klass; + } + // If we are the of this class, just return our storage. + // + // Do not set the DexCache InitializedStaticStorage, since that implies has finished + // running. + if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { + return klass; + } + if (!class_linker->EnsureInitialized(klass, true, true)) { + CHECK(self->IsExceptionPending()); + return NULL; // Failure - Indicate to caller to deliver exception + } + referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); + return klass; +} + +extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + return class_linker->ResolveString(string_idx, referrer); +} + +static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + UNLOCK_FUNCTION(monitor_lock_) { + // Save any pending exception over monitor exit call. + mirror::Throwable* saved_exception = NULL; + ThrowLocation saved_throw_location; + if (UNLIKELY(self->IsExceptionPending())) { + saved_exception = self->GetException(&saved_throw_location); + self->ClearException(); + } + // Decode locked object and unlock, before popping local references. + self->DecodeJObject(locked)->MonitorExit(self); + if (UNLIKELY(self->IsExceptionPending())) { + LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" + << saved_exception->Dump() + << "\nEncountered second exception during implicit MonitorExit:\n" + << self->GetException(NULL)->Dump(); + } + // Restore pending exception. + if (saved_exception != NULL) { + self->SetException(saved_throw_location, saved_exception); + } +} + +static inline void CheckReferenceResult(mirror::Object* o, Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (o == NULL) { + return; + } + mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); + if (o == kInvalidIndirectRefObject) { + JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); + } + // Make sure that the result is an instance of the type this method was expected to return. + mirror::Class* return_type = MethodHelper(m).GetReturnType(); + + if (!o->InstanceOf(return_type)) { + JniAbortF(NULL, "attempt to return an instance of %s from %s", + PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); + } +} + +static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (;;) { + if (thread->ReadFlag(kCheckpointRequest)) { + thread->RunCheckpointFunction(); + thread->AtomicClearFlag(kCheckpointRequest); + } else if (thread->ReadFlag(kSuspendRequest)) { + thread->FullSuspendCheck(); + } else { + break; + } + } +} + +JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, + jobject rcvr_jobj, jobject interface_method_jobj, + std::vector& args) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + +// Entry point for deoptimization. +static inline uintptr_t GetDeoptimizationEntryPoint() { + return reinterpret_cast(art_quick_deoptimize); +} + +// Return address of instrumentation stub. +static inline void* GetInstrumentationEntryPoint() { + return reinterpret_cast(art_quick_instrumentation_entry_from_code); +} + +// The return_pc of instrumentation exit stub. +static inline uintptr_t GetInstrumentationExitPc() { + return reinterpret_cast(art_quick_instrumentation_exit_from_code); +} + +// Return address of interpreter stub. +static inline void* GetInterpreterEntryPoint() { + return reinterpret_cast(art_quick_interpreter_entry); +} + +static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetPortableResolutionTrampoline(); +} + +static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { + return class_linker->GetQuickResolutionTrampoline(); +} + +// Return address of resolution trampoline stub for defined compiler. +static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableResolutionTrampoline(class_linker); +#else + return GetQuickResolutionTrampoline(class_linker); +#endif +} + +static inline void* GetPortableAbstractMethodErrorStub() { + return reinterpret_cast(art_portable_abstract_method_error_stub); +} + +static inline void* GetQuickAbstractMethodErrorStub() { + return reinterpret_cast(art_quick_abstract_method_error_stub); +} + +// Return address of abstract method error stub for defined compiler. +static inline void* GetAbstractMethodErrorStub() { +#if defined(ART_USE_PORTABLE_COMPILER) + return GetPortableAbstractMethodErrorStub(); +#else + return GetQuickAbstractMethodErrorStub(); +#endif +} + +static inline void* GetJniDlsymLookupStub() { + return reinterpret_cast(art_jni_dlsym_lookup_stub); +} + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_ diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc new file mode 100644 index 0000000000..98f7b1283c --- /dev/null +++ b/runtime/entrypoints/jni/jni_entrypoints.cc @@ -0,0 +1,46 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/logging.h" +#include "mirror/abstract_method.h" +#include "scoped_thread_state_change.h" +#include "thread.h" + +namespace art { + +// Used by the JNI dlsym stub to find the native method to invoke if none is registered. +extern "C" void* artFindNativeMethod(Thread* self) { + Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. + DCHECK(Thread::Current() == self); + ScopedObjectAccess soa(self); + + mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); + DCHECK(method != NULL); + + // Lookup symbol address for method, on failure we'll return NULL with an + // exception set, otherwise we return the address of the method we found. + void* native_code = soa.Vm()->FindCodeForNativeMethod(method); + if (native_code == NULL) { + DCHECK(self->IsExceptionPending()); + return NULL; + } else { + // Register so that future calls don't come here + method->RegisterNative(self, native_code); + return native_code; + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc new file mode 100644 index 0000000000..31d13c8cd5 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.cc @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +namespace art { + +extern "C" double art_l2d(int64_t l) { + return static_cast(l); +} + +extern "C" float art_l2f(int64_t l) { + return static_cast(l); +} + +/* + * Float/double conversion requires clamping to min and max of integer form. If + * target doesn't support this normally, use these. + */ +extern "C" int64_t art_d2l(double d) { + static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (d >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (d <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int64_t art_f2l(float f) { + static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); + static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); + if (f >= kMaxLong) { + return static_cast(0x7fffffffffffffffULL); + } else if (f <= kMinLong) { + return static_cast(0x8000000000000000ULL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +extern "C" int32_t art_d2i(double d) { + static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const double kMinInt = static_cast(static_cast(0x80000000UL)); + if (d >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (d <= kMinInt) { + return static_cast(0x80000000UL); + } else if (d != d) { // NaN case + return 0; + } else { + return static_cast(d); + } +} + +extern "C" int32_t art_f2i(float f) { + static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); + static const float kMinInt = static_cast(static_cast(0x80000000UL)); + if (f >= kMaxInt) { + return static_cast(0x7fffffffUL); + } else if (f <= kMinInt) { + return static_cast(0x80000000UL); + } else if (f != f) { // NaN case + return 0; + } else { + return static_cast(f); + } +} + +} // namespace art diff --git a/runtime/entrypoints/math_entrypoints.h b/runtime/entrypoints/math_entrypoints.h new file mode 100644 index 0000000000..717c7349bd --- /dev/null +++ b/runtime/entrypoints/math_entrypoints.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ + +#include + +extern "C" double art_l2d(int64_t l); +extern "C" float art_l2f(int64_t l); +extern "C" int64_t art_d2l(double d); +extern "C" int32_t art_d2i(double d); +extern "C" int64_t art_f2l(float f); +extern "C" int32_t art_f2i(float f); + +#endif // ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/math_entrypoints_test.cc b/runtime/entrypoints/math_entrypoints_test.cc new file mode 100644 index 0000000000..ca8b931309 --- /dev/null +++ b/runtime/entrypoints/math_entrypoints_test.cc @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "math_entrypoints.h" + +#include "common_test.h" +#include + +namespace art { + +class MathEntrypointsTest : public CommonTest {}; + +TEST_F(MathEntrypointsTest, DoubleToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); + EXPECT_EQ(0LL, art_d2l(0)); + EXPECT_EQ(1LL, art_d2l(1.0)); + EXPECT_EQ(10LL, art_d2l(10.0)); + EXPECT_EQ(100LL, art_d2l(100.0)); + EXPECT_EQ(-1LL, art_d2l(-1.0)); + EXPECT_EQ(-10LL, art_d2l(-10.0)); + EXPECT_EQ(-100LL, art_d2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToLong) { + EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); + EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); + EXPECT_EQ(0LL, art_f2l(0)); + EXPECT_EQ(1LL, art_f2l(1.0)); + EXPECT_EQ(10LL, art_f2l(10.0)); + EXPECT_EQ(100LL, art_f2l(100.0)); + EXPECT_EQ(-1LL, art_f2l(-1.0)); + EXPECT_EQ(-10LL, art_f2l(-10.0)); + EXPECT_EQ(-100LL, art_f2l(-100.0)); +} + +TEST_F(MathEntrypointsTest, DoubleToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); + EXPECT_EQ(0L, art_d2i(0)); + EXPECT_EQ(1L, art_d2i(1.0)); + EXPECT_EQ(10L, art_d2i(10.0)); + EXPECT_EQ(100L, art_d2i(100.0)); + EXPECT_EQ(-1L, art_d2i(-1.0)); + EXPECT_EQ(-10L, art_d2i(-10.0)); + EXPECT_EQ(-100L, art_d2i(-100.0)); +} + +TEST_F(MathEntrypointsTest, FloatToInt) { + EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); + EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); + EXPECT_EQ(0L, art_f2i(0)); + EXPECT_EQ(1L, art_f2i(1.0)); + EXPECT_EQ(10L, art_f2i(10.0)); + EXPECT_EQ(100L, art_f2i(100.0)); + EXPECT_EQ(-1L, art_f2i(-1.0)); + EXPECT_EQ(-10L, art_f2i(-10.0)); + EXPECT_EQ(-100L, art_f2i(-100.0)); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc new file mode 100644 index 0000000000..286926909c --- /dev/null +++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, false); +} + +extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocObjectFromCode(type_idx, referrer, thread, true); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, false); +} + +extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return AllocArrayFromCode(type_idx, referrer, length, self, true); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); +} + +extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, + mirror::AbstractMethod* referrer, + uint32_t length, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_argument_visitor.h b/runtime/entrypoints/portable/portable_argument_visitor.h new file mode 100644 index 0000000000..f268baf790 --- /dev/null +++ b/runtime/entrypoints/portable/portable_argument_visitor.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ + +#include "object_utils.h" + +namespace art { + +// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. +class PortableArgumentVisitor { + public: +// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. +// Size of Runtime::kRefAndArgs callee save frame. +// Size of Method* and register parameters in out stack arguments. +#if defined(__arm__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 +#define PORTABLE_STACK_ARG_SKIP 0 +#elif defined(__mips__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 +#define PORTABLE_STACK_ARG_SKIP 16 +#elif defined(__i386__) +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 +#define PORTABLE_STACK_ARG_SKIP 4 +#else +#error "Unsupported architecture" +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 +#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 +#define PORTABLE_STACK_ARG_SKIP 0 +#endif + + PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : + caller_mh_(caller_mh), + args_in_regs_(ComputeArgsInRegs(caller_mh)), + num_params_(caller_mh.NumArgs()), + reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), + stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE + + PORTABLE_STACK_ARG_SKIP), + cur_args_(reg_args_), + cur_arg_index_(0), + param_index_(0) { + } + + virtual ~PortableArgumentVisitor() {} + + virtual void Visit() = 0; + + bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamAReference(param_index_); + } + + bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.IsParamALongOrDouble(param_index_); + } + + Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return caller_mh_.GetParamPrimitiveType(param_index_); + } + + byte* GetParamAddress() const { + return cur_args_ + (cur_arg_index_ * kPointerSize); + } + + void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ == 2) { + break; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + cur_args_ = stack_args_; + cur_arg_index_ = 0; + while (param_index_ < num_params_) { +#if (defined(__arm__) || defined(__mips__)) + if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { + cur_arg_index_++; + } +#endif + Visit(); + cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); + param_index_++; + } + } + + private: + static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { +#if (defined(__i386__)) + return 0; +#else + size_t args_in_regs = 0; + size_t num_params = mh.NumArgs(); + for (size_t i = 0; i < num_params; i++) { + args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); + if (args_in_regs > 3) { + args_in_regs = 3; + break; + } + } + return args_in_regs; +#endif + } + MethodHelper& caller_mh_; + const size_t args_in_regs_; + const size_t num_params_; + byte* const reg_args_; + byte* const stack_args_; + byte* cur_args_; + size_t cur_arg_index_; + size_t param_index_; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_ diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc new file mode 100644 index 0000000000..d343c5dc1f --- /dev/null +++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "common_throws.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type != NULL); + DCHECK(src_type != NULL); + return dest_type->IsAssignableFrom(src_type) ? 1 : 0; +} + +extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type, + const mirror::Class* src_type) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); + DCHECK(src_type->IsClass()) << PrettyClass(src_type); + if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { + ThrowClassCastException(dest_type, src_type); + } +} + +extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element, + const mirror::Object* array) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (element == NULL) { + return; + } + DCHECK(array != NULL); + mirror::Class* array_class = array->GetClass(); + DCHECK(array_class != NULL); + mirror::Class* component_type = array_class->GetComponentType(); + mirror::Class* element_class = element->GetClass(); + if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { + ThrowArrayStoreException(element_class, array_class); + } +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc new file mode 100644 index 0000000000..bdab587797 --- /dev/null +++ b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "gc/accounting/card_table-inl.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); +} + +extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Called when caller isn't guaranteed to have access to a type and the dex cache may be + // unpopulated + return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); +} + +extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer, + uint32_t string_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return ResolveStringFromCode(referrer, string_idx); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h new file mode 100644 index 0000000000..a229c76dbd --- /dev/null +++ b/runtime/entrypoints/portable/portable_entrypoints.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ + +#include "dex_file-inl.h" +#include "runtime.h" + +namespace art { +namespace mirror { + class AbstractMethod; + class Object; +} // namespace mirror +class Thread; + +#define PORTABLE_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \ + static_cast(OFFSETOF_MEMBER(PortableEntryPoints, x))) + +// Pointers to functions that are called by code generated by compiler's adhering to the portable +// compiler ABI. +struct PACKED(4) PortableEntryPoints { + // Invocation + const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, + mirror::AbstractMethod**, Thread*); +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc new file mode 100644 index 0000000000..aa0f03ce8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/field-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, + referrer, + StaticPrimitiveWrite, + sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint32_t), + true); + if (LIKELY(field != NULL)) { + field->Set32(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, + referrer, + Thread::Current(), + StaticPrimitiveWrite, + sizeof(uint64_t), + true); + if (LIKELY(field != NULL)) { + field->Set64(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(field->GetDeclaringClass(), new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticPrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + StaticObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(field->GetDeclaringClass()); + } + return 0; +} + +extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, uint32_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + field->Set32(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, int64_t new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveWrite, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + field->Set64(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj, + mirror::Object* new_value) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectWrite, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + field->SetObj(obj, new_value); + return 0; + } + return -1; +} + +extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint32_t), true); + if (LIKELY(field != NULL)) { + return field->Get32(obj); + } + return 0; +} + +extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstancePrimitiveRead, sizeof(uint64_t), true); + if (LIKELY(field != NULL)) { + return field->Get64(obj); + } + return 0; +} + +extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, + mirror::AbstractMethod* referrer, + mirror::Object* obj) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, + sizeof(mirror::Object*)); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + field = FindFieldFromCode(field_idx, referrer, Thread::Current(), + InstanceObjectRead, sizeof(mirror::Object*), true); + if (LIKELY(field != NULL)) { + return field->GetObj(obj); + } + return 0; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc new file mode 100644 index 0000000000..771608b604 --- /dev/null +++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method, + uint32_t dex_pc, + mirror::Array* array, + uint32_t payload_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile::CodeItem* code_item = MethodHelper(method).GetCodeItem(); + const Instruction::ArrayDataPayload* payload = + reinterpret_cast(code_item->insns_ + payload_offset); + DCHECK_EQ(payload->ident, static_cast(Instruction::kArrayDataSignature)); + if (UNLIKELY(array == NULL)) { + ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA"); + return; // Error + } + DCHECK(array->IsArrayInstance() && !array->IsObjectArray()); + if (UNLIKELY(static_cast(payload->element_count) > array->GetLength())) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", + "failed FILL_ARRAY_DATA; length=%d, index=%d", + array->GetLength(), payload->element_count - 1); + return; // Error + } + uint32_t size_in_bytes = payload->element_count * payload->element_width; + memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc new file mode 100644 index 0000000000..5911ba3d8b --- /dev/null +++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/dex_cache-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* caller_method, + bool access_check, + InvokeType type, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::AbstractMethod* method = FindMethodFast(method_idx, + this_object, + caller_method, + access_check, + type); + if (UNLIKELY(method == NULL)) { + method = FindMethodFromCode(method_idx, this_object, caller_method, + thread, access_check, type); + if (UNLIKELY(method == NULL)) { + CHECK(thread->IsExceptionPending()); + return 0; // failure + } + } + DCHECK(!thread->IsExceptionPending()); + const void* code = method->GetEntryPointFromCompiledCode(); + + // When we return, the caller will branch to this address, so it had better not be 0! + if (UNLIKELY(code == NULL)) { + MethodHelper mh(method); + LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) + << " location: " << mh.GetDexFile().GetLocation(); + } + return method; +} + +extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); +} + +extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); +} + +extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); +} + +extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); +} + +extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx, + mirror::Object* this_object, + mirror::AbstractMethod* referrer, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc new file mode 100644 index 0000000000..8df16ae931 --- /dev/null +++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "thread-inl.h" + +namespace art { + +// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. +extern "C" uint32_t art_portable_jni_method_start(Thread* self) + UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { + JNIEnvExt* env = self->GetJniEnv(); + uint32_t saved_local_ref_cookie = env->local_ref_cookie; + env->local_ref_cookie = env->locals.GetSegmentState(); + self->TransitionFromRunnableToSuspended(kNative); + return saved_local_ref_cookie; +} + +extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) + UNLOCK_FUNCTION(Locks::mutator_lock_) { + self->DecodeJObject(to_lock)->MonitorEnter(self); + return art_portable_jni_method_start(self); +} + +static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { + JNIEnvExt* env = self->GetJniEnv(); + env->locals.SetSegmentState(env->local_ref_cookie); + env->local_ref_cookie = saved_local_ref_cookie; +} + +extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + PopLocalReferences(saved_local_ref_cookie, self); +} + + +extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result, + uint32_t saved_local_ref_cookie, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + mirror::Object* o = self->DecodeJObject(result); // Must decode before pop. + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, + uint32_t saved_local_ref_cookie, + jobject locked, + Thread* self) + SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { + self->TransitionFromSuspendedToRunnable(); + UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. + mirror::Object* o = self->DecodeJObject(result); + PopLocalReferences(saved_local_ref_cookie, self); + // Process result. + if (UNLIKELY(self->GetJniEnv()->check_jni)) { + if (self->IsExceptionPending()) { + return NULL; + } + CheckReferenceResult(o, self); + } + return o; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc new file mode 100644 index 0000000000..44d3da9897 --- /dev/null +++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread) + EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + obj->MonitorEnter(thread); // May block. + DCHECK(thread->HoldsLock(obj)); + // Only possible exception is NPE and is handled before entry. + DCHECK(!thread->IsExceptionPending()); +} + +extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread) + UNLOCK_FUNCTION(monitor_lock_) { + DCHECK(obj != NULL); // Assumed to have been checked before entry. + // MonitorExit may throw exception. + obj->MonitorExit(thread); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_proxy_entrypoints.cc b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc new file mode 100644 index 0000000000..3db39cd0bd --- /dev/null +++ b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" +#include "portable_argument_visitor.h" +#include "scoped_thread_state_change.h" + +namespace art { + +// Visits arguments on the stack placing them into the args vector, Object* arguments are converted +// to jobjects. +class BuildPortableArgumentVisitor : public PortableArgumentVisitor { + public: + BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, + ScopedObjectAccessUnchecked& soa, std::vector& args) : + PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} + + virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + jvalue val; + Primitive::Type type = GetParamPrimitiveType(); + switch (type) { + case Primitive::kPrimNot: { + mirror::Object* obj = *reinterpret_cast(GetParamAddress()); + val.l = soa_.AddLocalReference(obj); + break; + } + case Primitive::kPrimLong: // Fall-through. + case Primitive::kPrimDouble: + val.j = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimBoolean: // Fall-through. + case Primitive::kPrimByte: // Fall-through. + case Primitive::kPrimChar: // Fall-through. + case Primitive::kPrimShort: // Fall-through. + case Primitive::kPrimInt: // Fall-through. + case Primitive::kPrimFloat: + val.i = *reinterpret_cast(GetParamAddress()); + break; + case Primitive::kPrimVoid: + LOG(FATAL) << "UNREACHABLE"; + val.j = 0; + break; + } + args_.push_back(val); + } + + private: + ScopedObjectAccessUnchecked& soa_; + std::vector& args_; + + DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); +}; + +// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method +// which is responsible for recording callee save registers. We explicitly place into jobjects the +// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a +// field within the proxy object, which will box the primitive arguments and deal with error cases. +extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, + mirror::Object* receiver, + Thread* self, mirror::AbstractMethod** sp) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Ensure we don't get thread suspension until the object arguments are safely in jobjects. + const char* old_cause = + self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); + self->VerifyStack(); + // Start new JNI local reference state. + JNIEnvExt* env = self->GetJniEnv(); + ScopedObjectAccessUnchecked soa(env); + ScopedJniEnvLocalRefState env_state(env); + // Create local ref. copies of proxy method and the receiver. + jobject rcvr_jobj = soa.AddLocalReference(receiver); + + // Placing arguments into args vector and remove the receiver. + MethodHelper proxy_mh(proxy_method); + std::vector args; + BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); + local_ref_visitor.VisitArguments(); + args.erase(args.begin()); + + // Convert proxy method into expected interface method. + mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); + DCHECK(interface_method != NULL); + DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); + jobject interface_method_jobj = soa.AddLocalReference(interface_method); + + // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code + // that performs allocations. + self->EndAssertNoThreadSuspension(old_cause); + JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), + rcvr_jobj, interface_method_jobj, args); + return result.GetJ(); +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_stub_entrypoints.cc b/runtime/entrypoints/portable/portable_stub_entrypoints.cc new file mode 100644 index 0000000000..c510c653ba --- /dev/null +++ b/runtime/entrypoints/portable/portable_stub_entrypoints.cc @@ -0,0 +1,145 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +// Lazily resolve a method for portable. Called by stub code. +extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, + mirror::Object* receiver, + mirror::AbstractMethod** called_addr, + Thread* thread) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + uint32_t dex_pc; + mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); + + ClassLinker* linker = Runtime::Current()->GetClassLinker(); + InvokeType invoke_type; + bool is_range; + if (called->IsRuntimeMethod()) { + const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); + CHECK_LT(dex_pc, code->insns_size_in_code_units_); + const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); + Instruction::Code instr_code = instr->Opcode(); + switch (instr_code) { + case Instruction::INVOKE_DIRECT: + invoke_type = kDirect; + is_range = false; + break; + case Instruction::INVOKE_DIRECT_RANGE: + invoke_type = kDirect; + is_range = true; + break; + case Instruction::INVOKE_STATIC: + invoke_type = kStatic; + is_range = false; + break; + case Instruction::INVOKE_STATIC_RANGE: + invoke_type = kStatic; + is_range = true; + break; + case Instruction::INVOKE_SUPER: + invoke_type = kSuper; + is_range = false; + break; + case Instruction::INVOKE_SUPER_RANGE: + invoke_type = kSuper; + is_range = true; + break; + case Instruction::INVOKE_VIRTUAL: + invoke_type = kVirtual; + is_range = false; + break; + case Instruction::INVOKE_VIRTUAL_RANGE: + invoke_type = kVirtual; + is_range = true; + break; + case Instruction::INVOKE_INTERFACE: + invoke_type = kInterface; + is_range = false; + break; + case Instruction::INVOKE_INTERFACE_RANGE: + invoke_type = kInterface; + is_range = true; + break; + default: + LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); + // Avoid used uninitialized warnings. + invoke_type = kDirect; + is_range = true; + } + uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); + called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); + // Refine called method based on receiver. + if (invoke_type == kVirtual) { + called = receiver->GetClass()->FindVirtualMethodForVirtual(called); + } else if (invoke_type == kInterface) { + called = receiver->GetClass()->FindVirtualMethodForInterface(called); + } + } else { + CHECK(called->IsStatic()) << PrettyMethod(called); + invoke_type = kStatic; + } + const void* code = NULL; + if (LIKELY(!thread->IsExceptionPending())) { + // Incompatible class change should have been handled in resolve method. + CHECK(!called->CheckIncompatibleClassChange(invoke_type)); + // Ensure that the called method's class is initialized. + mirror::Class* called_class = called->GetDeclaringClass(); + linker->EnsureInitialized(called_class, true, true); + if (LIKELY(called_class->IsInitialized())) { + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } else if (called_class->IsInitializing()) { + if (invoke_type == kStatic) { + // Class is still initializing, go to oat and grab code (trampoline must be left in place + // until class is initialized to stop races between threads). + code = linker->GetOatCodeFor(called); + } else { + // No trampoline for non-static methods. + code = called->GetEntryPointFromCompiledCode(); + // TODO: remove this after we solve the link issue. + { // for lazy link. + if (code == NULL) { + code = linker->GetOatCodeFor(called); + } + } + } + } else { + DCHECK(called_class->IsErroneous()); + } + } + if (LIKELY(code != NULL)) { + // Expect class to at least be initializing. + DCHECK(called->GetDeclaringClass()->IsInitializing()); + // Don't want infinite recursion. + DCHECK(code != GetResolutionTrampoline(linker)); + // Set up entry into main method + *called_addr = called; + } + return code; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc new file mode 100644 index 0000000000..dac73885a5 --- /dev/null +++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method.h" +#include "mirror/object-inl.h" +#include "verifier/dex_gc_map.h" +#include "stack.h" + +namespace art { + +class ShadowFrameCopyVisitor : public StackVisitor { + public: + explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), + top_frame_(NULL) {} + + bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (IsShadowFrame()) { + ShadowFrame* cur_frame = GetCurrentShadowFrame(); + size_t num_regs = cur_frame->NumberOfVRegs(); + mirror::AbstractMethod* method = cur_frame->GetMethod(); + uint32_t dex_pc = cur_frame->GetDexPC(); + ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); + + const uint8_t* gc_map = method->GetNativeGcMap(); + uint32_t gc_map_length = static_cast((gc_map[0] << 24) | + (gc_map[1] << 16) | + (gc_map[2] << 8) | + (gc_map[3] << 0)); + verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); + const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); + for (size_t reg = 0; reg < num_regs; ++reg) { + if (TestBitmap(reg, reg_bitmap)) { + new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); + } else { + new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); + } + } + + if (prev_frame_ != NULL) { + prev_frame_->SetLink(new_frame); + } else { + top_frame_ = new_frame; + } + prev_frame_ = new_frame; + } + return true; + } + + ShadowFrame* GetShadowFrameCopy() { + return top_frame_; + } + + private: + static bool TestBitmap(int reg, const uint8_t* reg_vector) { + return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; + } + + ShadowFrame* prev_frame_; + ShadowFrame* top_frame_; +}; + +extern "C" void art_portable_test_suspend_from_code(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckSuspend(self); + if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { + // Save out the shadow frame to the heap + ShadowFrameCopyVisitor visitor(self); + visitor.WalkStack(true); + self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); + self->SetDeoptimizationReturnValue(JValue()); + self->SetException(ThrowLocation(), reinterpret_cast(-1)); + } +} + +extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, + ShadowFrame* new_shadow_frame, + mirror::AbstractMethod* method, + uint32_t num_vregs) { + ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); + new_shadow_frame->SetMethod(method); + new_shadow_frame->SetNumberOfVRegs(num_vregs); + return old_frame; +} + +} // namespace art diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc new file mode 100644 index 0000000000..4b2b46b25f --- /dev/null +++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc @@ -0,0 +1,123 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "entrypoints/entrypoint_utils.h" +#include "mirror/abstract_method-inl.h" +#include "mirror/object-inl.h" + +namespace art { + +extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArithmeticExceptionDivideByZero(); +} + +extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowArrayIndexOutOfBoundsException(index, length); +} + +extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowNoSuchMethodError(method_idx); +} + +extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // TODO: remove dex_pc argument from caller. + UNUSED(dex_pc); + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + ThrowNullPointerExceptionFromDexPC(throw_location); +} + +extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + ThrowStackOverflowError(Thread::Current()); +} + +extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); + ThrowLocation throw_location = self->GetCurrentLocationForThrow(); + if (exception == NULL) { + ThrowNullPointerException(NULL, "throw with null exception"); + } else { + self->SetException(throw_location, exception); + } +} + +extern "C" void* art_portable_get_and_clear_exception(Thread* self) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(self->IsExceptionPending()); + // TODO: make this inline. + mirror::Throwable* exception = self->GetException(NULL); + self->ClearException(); + return exception; +} + +extern "C" int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method, + uint32_t ti_offset) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + Thread* self = Thread::Current(); // TODO: make an argument. + ThrowLocation throw_location; + mirror::Throwable* exception = self->GetException(&throw_location); + // Check for special deoptimization exception. + if (UNLIKELY(reinterpret_cast(exception) == -1)) { + return -1; + } + mirror::Class* exception_type = exception->GetClass(); + MethodHelper mh(current_method); + const DexFile::CodeItem* code_item = mh.GetCodeItem(); + DCHECK_LT(ti_offset, code_item->tries_size_); + const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); + + int iter_index = 0; + int result = -1; + uint32_t catch_dex_pc = -1; + // Iterate over the catch handlers associated with dex_pc + for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { + uint16_t iter_type_idx = it.GetHandlerTypeIndex(); + // Catch all case + if (iter_type_idx == DexFile::kDexNoIndex16) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + // Does this catch exception type apply? + mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); + if (UNLIKELY(iter_exception_type == NULL)) { + // TODO: check, the verifier (class linker?) should take care of resolving all exception + // classes early. + LOG(WARNING) << "Unresolved exception class when finding catch block: " + << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); + } else if (iter_exception_type->IsAssignableFrom(exception_type)) { + catch_dex_pc = it.GetHandlerAddress(); + result = iter_index; + break; + } + ++iter_index; + } + if (result != -1) { + // Handler found. + Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, + throw_location, + current_method, + catch_dex_pc, + exception); + } + return result; +} + +} // namespace art diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index f66fc848d5..9ed802a2bb 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -15,11 +15,11 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h index 4f81151cd1..35fa97269c 100644 --- a/runtime/entrypoints/quick/quick_argument_visitor.h +++ b/runtime/entrypoints/quick/quick_argument_visitor.h @@ -21,116 +21,6 @@ namespace art { -// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. -class PortableArgumentVisitor { - public: -// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame. -// Size of Runtime::kRefAndArgs callee save frame. -// Size of Method* and register parameters in out stack arguments. -#if defined(__arm__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48 -#define PORTABLE_STACK_ARG_SKIP 0 -#elif defined(__mips__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64 -#define PORTABLE_STACK_ARG_SKIP 16 -#elif defined(__i386__) -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32 -#define PORTABLE_STACK_ARG_SKIP 4 -#else -#error "Unsupported architecture" -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0 -#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0 -#define PORTABLE_STACK_ARG_SKIP 0 -#endif - - PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : - caller_mh_(caller_mh), - args_in_regs_(ComputeArgsInRegs(caller_mh)), - num_params_(caller_mh.NumArgs()), - reg_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET), - stack_args_(reinterpret_cast(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE - + PORTABLE_STACK_ARG_SKIP), - cur_args_(reg_args_), - cur_arg_index_(0), - param_index_(0) { - } - - virtual ~PortableArgumentVisitor() {} - - virtual void Visit() = 0; - - bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamAReference(param_index_); - } - - bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.IsParamALongOrDouble(param_index_); - } - - Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return caller_mh_.GetParamPrimitiveType(param_index_); - } - - byte* GetParamAddress() const { - return cur_args_ + (cur_arg_index_ * kPointerSize); - } - - void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ == 2) { - break; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - cur_args_ = stack_args_; - cur_arg_index_ = 0; - while (param_index_ < num_params_) { -#if (defined(__arm__) || defined(__mips__)) - if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) { - cur_arg_index_++; - } -#endif - Visit(); - cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1); - param_index_++; - } - } - - private: - static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#if (defined(__i386__)) - return 0; -#else - size_t args_in_regs = 0; - size_t num_params = mh.NumArgs(); - for (size_t i = 0; i < num_params; i++) { - args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1); - if (args_in_regs > 3) { - args_in_regs = 3; - break; - } - } - return args_in_regs; -#endif - } - MethodHelper& caller_mh_; - const size_t args_in_regs_; - const size_t num_params_; - byte* const reg_args_; - byte* const stack_args_; - byte* cur_args_; - size_t cur_arg_index_; - size_t param_index_; -}; - // Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame. class QuickArgumentVisitor { public: diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc index fe91e617bb..b810bb70a6 100644 --- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc @@ -15,10 +15,10 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc index 0af7a6281d..6400161b3e 100644 --- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc @@ -15,13 +15,13 @@ */ #include "callee_save_frame.h" -#include "gc/accounting/card_table-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "class_linker-inl.h" #include "dex_file-inl.h" +#include "gc/accounting/card_table-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index 8692e9267e..74b8cfd09b 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -20,15 +20,15 @@ #include "dex_file-inl.h" #include "runtime.h" -#define ENTRYPOINT_OFFSET(x) \ - (static_cast(OFFSETOF_MEMBER(Thread, entrypoints_)) + \ +#define QUICK_ENTRYPOINT_OFFSET(x) \ + (static_cast(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \ static_cast(OFFSETOF_MEMBER(QuickEntryPoints, x))) namespace art { namespace mirror { -class AbstractMethod; -class Class; -class Object; + class AbstractMethod; + class Class; + class Object; } // namespace mirror class DvmDex; class MethodHelper; @@ -123,8 +123,6 @@ struct PACKED(4) QuickEntryPoints { void* (*pMemcpy)(void*, const void*, size_t); // Invocation - const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, - mirror::AbstractMethod**, Thread*); const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*, mirror::AbstractMethod**, Thread*); void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*); @@ -167,9 +165,6 @@ extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result, jobject locked, Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR; -// Initialize an entry point data structure, architecture specific. -void InitEntryPoints(QuickEntryPoints* points); - } // namespace art #endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc index c20326c63e..a4e9dc9b27 100644 --- a/runtime/entrypoints/quick/quick_field_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc @@ -16,10 +16,10 @@ #include "callee_save_frame.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/abstract_method-inl.h" #include "mirror/class-inl.h" #include "mirror/field-inl.h" -#include "runtime_support.h" #include diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index a0b06fb521..b81ad12b7b 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -52,7 +52,7 @@ extern "C" int artHandleFillArrayDataFromCode(mirror::Array* array, ThrowLocation throw_location = self->GetCurrentLocationForThrow(); self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;", "failed FILL_ARRAY_DATA; length=%d, index=%d", - array->GetLength(), payload->element_count); + array->GetLength(), payload->element_count - 1); return -1; // Error } uint32_t size_in_bytes = payload->element_count * payload->element_width; diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc index 6a95f3c8ff..53b3628e2f 100644 --- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc @@ -16,12 +16,12 @@ #include "callee_save_frame.h" #include "dex_instruction-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/dex_cache-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" -#include "runtime_support.h" namespace art { diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc index 2d31160a4b..23a28f9cce 100644 --- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc @@ -15,13 +15,13 @@ */ #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/class-inl.h" #include "mirror/abstract_method-inl.h" #include "mirror/object.h" #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "object_utils.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc index e4ef45fdde..4e3d749e27 100644 --- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc @@ -16,12 +16,12 @@ #include "quick_argument_visitor.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/abstract_method-inl.h" #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "object_utils.h" #include "reflection.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "thread.h" #include "well_known_classes.h" @@ -30,50 +30,6 @@ namespace art { -// Visits arguments on the stack placing them into the args vector, Object* arguments are converted -// to jobjects. -class BuildPortableArgumentVisitor : public PortableArgumentVisitor { - public: - BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp, - ScopedObjectAccessUnchecked& soa, std::vector& args) : - PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {} - - virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - jvalue val; - Primitive::Type type = GetParamPrimitiveType(); - switch (type) { - case Primitive::kPrimNot: { - mirror::Object* obj = *reinterpret_cast(GetParamAddress()); - val.l = soa_.AddLocalReference(obj); - break; - } - case Primitive::kPrimLong: // Fall-through. - case Primitive::kPrimDouble: - val.j = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - case Primitive::kPrimFloat: - val.i = *reinterpret_cast(GetParamAddress()); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args_.push_back(val); - } - - private: - ScopedObjectAccessUnchecked& soa_; - std::vector& args_; - - DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor); -}; - // Visits arguments on the stack placing them into the args vector, Object* arguments are converted // to jobjects. class BuildQuickArgumentVisitor : public QuickArgumentVisitor { @@ -122,46 +78,6 @@ class BuildQuickArgumentVisitor : public QuickArgumentVisitor { DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor); }; -// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method -// which is responsible for recording callee save registers. We explicitly place into jobjects the -// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a -// field within the proxy object, which will box the primitive arguments and deal with error cases. -extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method, - mirror::Object* receiver, - Thread* self, mirror::AbstractMethod** sp) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - // Create local ref. copies of proxy method and the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Placing arguments into args vector and remove the receiver. - MethodHelper proxy_mh(proxy_method); - std::vector args; - BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args); - local_ref_visitor.VisitArguments(); - args.erase(args.begin()); - - // Convert proxy method into expected interface method. - mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code - // that performs allocations. - self->EndAssertNoThreadSuspension(old_cause); - JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(), - rcvr_jobj, interface_method_jobj, args); - return result.GetJ(); -} - // Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method // which is responsible for recording callee save registers. We explicitly place into jobjects the // incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc index f2af6d28dc..d78bbf3bc8 100644 --- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_stub_entrypoints.cc @@ -30,127 +30,6 @@ extern "C" void art_quick_deliver_exception_from_code(void*); namespace art { -// Lazily resolve a method for portable. Called by stub code. -extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called, - mirror::Object* receiver, - mirror::AbstractMethod** called_addr, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - uint32_t dex_pc; - mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc); - - ClassLinker* linker = Runtime::Current()->GetClassLinker(); - InvokeType invoke_type; - bool is_range; - if (called->IsRuntimeMethod()) { - const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem(); - CHECK_LT(dex_pc, code->insns_size_in_code_units_); - const Instruction* instr = Instruction::At(&code->insns_[dex_pc]); - Instruction::Code instr_code = instr->Opcode(); - switch (instr_code) { - case Instruction::INVOKE_DIRECT: - invoke_type = kDirect; - is_range = false; - break; - case Instruction::INVOKE_DIRECT_RANGE: - invoke_type = kDirect; - is_range = true; - break; - case Instruction::INVOKE_STATIC: - invoke_type = kStatic; - is_range = false; - break; - case Instruction::INVOKE_STATIC_RANGE: - invoke_type = kStatic; - is_range = true; - break; - case Instruction::INVOKE_SUPER: - invoke_type = kSuper; - is_range = false; - break; - case Instruction::INVOKE_SUPER_RANGE: - invoke_type = kSuper; - is_range = true; - break; - case Instruction::INVOKE_VIRTUAL: - invoke_type = kVirtual; - is_range = false; - break; - case Instruction::INVOKE_VIRTUAL_RANGE: - invoke_type = kVirtual; - is_range = true; - break; - case Instruction::INVOKE_INTERFACE: - invoke_type = kInterface; - is_range = false; - break; - case Instruction::INVOKE_INTERFACE_RANGE: - invoke_type = kInterface; - is_range = true; - break; - default: - LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL); - // Avoid used uninitialized warnings. - invoke_type = kDirect; - is_range = true; - } - uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c(); - called = linker->ResolveMethod(dex_method_idx, caller, invoke_type); - // Refine called method based on receiver. - if (invoke_type == kVirtual) { - called = receiver->GetClass()->FindVirtualMethodForVirtual(called); - } else if (invoke_type == kInterface) { - called = receiver->GetClass()->FindVirtualMethodForInterface(called); - } - } else { - CHECK(called->IsStatic()) << PrettyMethod(called); - invoke_type = kStatic; - } - const void* code = NULL; - if (LIKELY(!thread->IsExceptionPending())) { - // Incompatible class change should have been handled in resolve method. - CHECK(!called->CheckIncompatibleClassChange(invoke_type)); - // Ensure that the called method's class is initialized. - mirror::Class* called_class = called->GetDeclaringClass(); - linker->EnsureInitialized(called_class, true, true); - if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { - // Class is still initializing, go to oat and grab code (trampoline must be left in place - // until class is initialized to stop races between threads). - code = linker->GetOatCodeFor(called); - } else { - // No trampoline for non-static methods. - code = called->GetEntryPointFromCompiledCode(); - // TODO: remove this after we solve the link issue. - { // for lazy link. - if (code == NULL) { - code = linker->GetOatCodeFor(called); - } - } - } - } else { - DCHECK(called_class->IsErroneous()); - } - } - if (LIKELY(code != NULL)) { - // Expect class to at least be initializing. - DCHECK(called->GetDeclaringClass()->IsInitializing()); - // Don't want infinite recursion. - DCHECK(code != GetResolutionTrampoline(linker)); - // Set up entry into main method - *called_addr = called; - } - return code; -} - // Lazily resolve a method for quick. Called by stub code. extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called, mirror::Object* receiver, @@ -413,26 +292,4 @@ extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* meth self->QuickDeliverException(); } -// Used by the JNI dlsym stub to find the native method to invoke if none is registered. -extern "C" void* artFindNativeMethod(Thread* self) { - Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native. - DCHECK(Thread::Current() == self); - ScopedObjectAccess soa(self); - - mirror::AbstractMethod* method = self->GetCurrentMethod(NULL); - DCHECK(method != NULL); - - // Lookup symbol address for method, on failure we'll return NULL with an - // exception set, otherwise we return the address of the method we found. - void* native_code = soa.Vm()->FindCodeForNativeMethod(method); - if (native_code == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; - } else { - // Register so that future calls don't come here - method->RegisterNative(self, native_code); - return native_code; - } -} - } // namespace art diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc index e7117147a9..b4d6c0ba8d 100644 --- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -15,7 +15,7 @@ */ #include "callee_save_frame.h" -#include "runtime_support.h" +#include "entrypoints/entrypoint_utils.h" #include "thread.h" #include "thread_list.h" diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 9588698bb2..3bfa2f2611 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -15,9 +15,9 @@ */ #include "callee_save_frame.h" +#include "entrypoints/entrypoint_utils.h" #include "mirror/object.h" #include "object_utils.h" -#include "runtime_support.h" #include "thread.h" #include "well_known_classes.h" diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 37c45fa6ec..ef4b95c037 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -24,6 +24,7 @@ #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "dex_instruction.h" +#include "entrypoints/entrypoint_utils.h" #include "gc/accounting/card_table-inl.h" #include "invoke_arg_array_builder.h" #include "nth_caller_visitor.h" @@ -35,7 +36,6 @@ #include "mirror/object-inl.h" #include "mirror/object_array-inl.h" #include "object_utils.h" -#include "runtime_support.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" #include "thread.h" diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h index 2df1367637..d235e3eed8 100644 --- a/runtime/mirror/abstract_method-inl.h +++ b/runtime/mirror/abstract_method-inl.h @@ -20,9 +20,9 @@ #include "abstract_method.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "object_array.h" #include "runtime.h" -#include "runtime_support.h" namespace art { namespace mirror { diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index 53a1df95a6..540ff9f68e 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -26,6 +26,7 @@ #include "class_linker-inl.h" #include "common_test.h" #include "dex_file.h" +#include "entrypoints/entrypoint_utils.h" #include "field-inl.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -33,7 +34,6 @@ #include "abstract_method-inl.h" #include "object-inl.h" #include "object_array-inl.h" -#include "runtime_support.h" #include "sirt_ref.h" #include "UniquePtr.h" diff --git a/runtime/runtime_support.cc b/runtime/runtime_support.cc deleted file mode 100644 index d28aad1e8f..0000000000 --- a/runtime/runtime_support.cc +++ /dev/null @@ -1,475 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "gc/accounting/card_table-inl.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "mirror/proxy.h" -#include "reflection.h" -#include "scoped_thread_state_change.h" -#include "ScopedLocalRef.h" -#include "well_known_classes.h" - -double art_l2d(int64_t l) { - return static_cast(l); -} - -float art_l2f(int64_t l) { - return static_cast(l); -} - -/* - * Float/double conversion requires clamping to min and max of integer form. If - * target doesn't support this normally, use these. - */ -int64_t art_d2l(double d) { - static const double kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const double kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (d >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (d <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int64_t art_f2l(float f) { - static const float kMaxLong = static_cast(static_cast(0x7fffffffffffffffULL)); - static const float kMinLong = static_cast(static_cast(0x8000000000000000ULL)); - if (f >= kMaxLong) { - return static_cast(0x7fffffffffffffffULL); - } else if (f <= kMinLong) { - return static_cast(0x8000000000000000ULL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -int32_t art_d2i(double d) { - static const double kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const double kMinInt = static_cast(static_cast(0x80000000UL)); - if (d >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (d <= kMinInt) { - return static_cast(0x80000000UL); - } else if (d != d) { // NaN case - return 0; - } else { - return static_cast(d); - } -} - -int32_t art_f2i(float f) { - static const float kMaxInt = static_cast(static_cast(0x7fffffffUL)); - static const float kMinInt = static_cast(static_cast(0x80000000UL)); - if (f >= kMaxInt) { - return static_cast(0x7fffffffUL); - } else if (f <= kMinInt) { - return static_cast(0x80000000UL); - } else if (f != f) { // NaN case - return 0; - } else { - return static_cast(f); - } -} - -namespace art { - -// Helper function to allocate array for FILLED_NEW_ARRAY. -mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* referrer, - int32_t component_count, Thread* self, - bool access_check) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer); - if (klass == NULL) { // Error - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (UNLIKELY(klass->IsPrimitive() && !klass->IsPrimitiveInt())) { - if (klass->IsPrimitiveLong() || klass->IsPrimitiveDouble()) { - ThrowRuntimeException("Bad filled array request for type %s", - PrettyDescriptor(klass).c_str()); - } else { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/InternalError;", - "Found type %s; filled-new-array not implemented for anything but \'int\'", - PrettyDescriptor(klass).c_str()); - } - return NULL; // Failure - } else { - if (access_check) { - mirror::Class* referrer_klass = referrer->GetDeclaringClass(); - if (UNLIKELY(!referrer_klass->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer_klass, klass); - return NULL; // Failure - } - } - DCHECK(klass->IsArrayClass()) << PrettyClass(klass); - return mirror::Array::Alloc(self, klass, component_count); - } -} - -mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) { - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: // Keep GCC happy by having a default handler, fall-through. - default: is_primitive = true; is_set = true; is_static = true; break; - } - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Field* resolved_field = class_linker->ResolveField(field_idx, referrer, is_static); - if (UNLIKELY(resolved_field == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - if (access_check) { - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer); - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - // The referring class can't access the resolved field, this may occur as a result of a - // protected field being made public by a sub-class. Resort to the dex file to determine - // the correct class for the access check. - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - fields_class = class_linker->ResolveType(dex_file, - dex_file.GetFieldId(field_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(fields_class))) { - ThrowIllegalAccessErrorClass(referring_class, fields_class); - return NULL; // failure - } else if (UNLIKELY(!referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()))) { - ThrowIllegalAccessErrorField(referring_class, resolved_field); - return NULL; // failure - } - } - if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) { - ThrowIllegalAccessErrorFinalField(referrer, resolved_field); - return NULL; // failure - } else { - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(throw_location.GetMethod() == referrer); - self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", - "Attempted read of %zd-bit %s on field '%s'", - expected_size * (32 / sizeof(int32_t)), - is_primitive ? "primitive" : "non-primitive", - PrettyField(resolved_field, true).c_str()); - return NULL; // failure - } - } - } - if (!is_static) { - // instance fields must be being accessed on an initialized class - return resolved_field; - } else { - // If the class is initialized we're done. - if (fields_class->IsInitialized()) { - return resolved_field; - } else if (Runtime::Current()->GetClassLinker()->EnsureInitialized(fields_class, true, true)) { - // Otherwise let's ensure the class is initialized before resolving the field. - return resolved_field; - } else { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind - return NULL; // failure - } - } -} - -// Slow path method resolution -mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - bool is_direct = type == kStatic || type == kDirect; - mirror::AbstractMethod* resolved_method = class_linker->ResolveMethod(method_idx, referrer, type); - if (UNLIKELY(resolved_method == NULL)) { - DCHECK(self->IsExceptionPending()); // Throw exception and unwind. - return NULL; // Failure. - } else if (UNLIKELY(this_object == NULL && type != kStatic)) { - // Maintain interpreter-like semantics where NullPointerException is thrown - // after potential NoSuchMethodError from class linker. - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - DCHECK(referrer == throw_location.GetMethod()); - ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type); - return NULL; // Failure. - } else { - if (!access_check) { - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - vtable = referrer->GetDeclaringClass()->GetSuperClass()->GetVTable(); - } else { - vtable = this_object->GetClass()->GetVTable(); - } - // TODO: eliminate bounds check? - return vtable->Get(vtable_index); - } - } else { - // Incompatible class change should have been handled in resolve method. - if (UNLIKELY(resolved_method->CheckIncompatibleClassChange(type))) { - ThrowIncompatibleClassChangeError(type, resolved_method->GetInvokeType(), resolved_method, - referrer); - return NULL; // Failure. - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // The referring class can't access the resolved method, this may occur as a result of a - // protected method being made public by implementing an interface that re-declares the - // method public. Resort to the dex file to determine the correct class for the access check - const DexFile& dex_file = *referring_class->GetDexCache()->GetDexFile(); - methods_class = class_linker->ResolveType(dex_file, - dex_file.GetMethodId(method_idx).class_idx_, - referring_class); - if (UNLIKELY(!referring_class->CanAccess(methods_class))) { - ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class, - referrer, resolved_method, type); - return NULL; // Failure. - } else if (UNLIKELY(!referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - ThrowIllegalAccessErrorMethod(referring_class, resolved_method); - return NULL; // Failure. - } - } - if (is_direct) { - return resolved_method; - } else if (type == kInterface) { - mirror::AbstractMethod* interface_method = - this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - if (UNLIKELY(interface_method == NULL)) { - ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method, this_object, - referrer); - return NULL; // Failure. - } else { - return interface_method; - } - } else { - mirror::ObjectArray* vtable; - uint16_t vtable_index = resolved_method->GetMethodIndex(); - if (type == kSuper) { - mirror::Class* super_class = referring_class->GetSuperClass(); - if (LIKELY(super_class != NULL)) { - vtable = referring_class->GetSuperClass()->GetVTable(); - } else { - vtable = NULL; - } - } else { - vtable = this_object->GetClass()->GetVTable(); - } - if (LIKELY(vtable != NULL && - vtable_index < static_cast(vtable->GetLength()))) { - return vtable->GetWithoutChecks(vtable_index); - } else { - // Behavior to agree with that of the verifier. - MethodHelper mh(resolved_method); - ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(), mh.GetName(), - mh.GetSignature()); - return NULL; // Failure. - } - } - } - } -} - -void ThrowStackOverflowError(Thread* self) { - CHECK(!self->IsHandlingStackOverflow()) << "Recursive stack overflow."; - - if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) { - // Remove extra entry pushed onto second stack during method tracing. - Runtime::Current()->GetInstrumentation()->PopMethodForUnwind(self, false); - } - - self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. - JNIEnvExt* env = self->GetJniEnv(); - std::string msg("stack size "); - msg += PrettySize(self->GetStackSize()); - // Use low-level JNI routine and pre-baked error class to avoid class linking operations that - // would consume more stack. - int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError, - msg.c_str(), NULL); - if (rc != JNI_OK) { - // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME - // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError - // instead. - LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed."; - CHECK(self->IsExceptionPending()); - } - self->ResetDefaultStackEnd(); // Return to default stack size. -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) { - DCHECK(soa.Env()->IsInstanceOf(rcvr_jobj, WellKnownClasses::java_lang_reflect_Proxy)); - - // Build argument array possibly triggering GC. - soa.Self()->AssertThreadSuspensionIsAllowable(); - jobjectArray args_jobj = NULL; - const JValue zero; - if (args.size() > 0) { - args_jobj = soa.Env()->NewObjectArray(args.size(), WellKnownClasses::java_lang_Object, NULL); - if (args_jobj == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - for (size_t i = 0; i < args.size(); ++i) { - if (shorty[i + 1] == 'L') { - jobject val = args.at(i).l; - soa.Env()->SetObjectArrayElement(args_jobj, i, val); - } else { - JValue jv; - jv.SetJ(args.at(i).j); - mirror::Object* val = BoxPrimitive(Primitive::GetType(shorty[i + 1]), jv); - if (val == NULL) { - CHECK(soa.Self()->IsExceptionPending()); - return zero; - } - soa.Decode* >(args_jobj)->Set(i, val); - } - } - } - - // Call InvocationHandler.invoke(Object proxy, Method method, Object[] args). - jobject inv_hand = soa.Env()->GetObjectField(rcvr_jobj, - WellKnownClasses::java_lang_reflect_Proxy_h); - jvalue invocation_args[3]; - invocation_args[0].l = rcvr_jobj; - invocation_args[1].l = interface_method_jobj; - invocation_args[2].l = args_jobj; - jobject result = - soa.Env()->CallObjectMethodA(inv_hand, - WellKnownClasses::java_lang_reflect_InvocationHandler_invoke, - invocation_args); - - // Unbox result and handle error conditions. - if (LIKELY(!soa.Self()->IsExceptionPending())) { - if (shorty[0] == 'V' || (shorty[0] == 'L' && result == NULL)) { - // Do nothing. - return zero; - } else { - mirror::Object* result_ref = soa.Decode(result); - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::Class* result_type = MethodHelper(interface_method).GetReturnType(); - mirror::AbstractMethod* proxy_method; - if (interface_method->GetDeclaringClass()->IsInterface()) { - proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - } else { - // Proxy dispatch to a method defined in Object. - DCHECK(interface_method->GetDeclaringClass()->IsObjectClass()); - proxy_method = interface_method; - } - ThrowLocation throw_location(rcvr, proxy_method, -1); - JValue result_unboxed; - if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, result_unboxed)) { - DCHECK(soa.Self()->IsExceptionPending()); - return zero; - } - return result_unboxed; - } - } else { - // In the case of checked exceptions that aren't declared, the exception must be wrapped by - // a UndeclaredThrowableException. - mirror::Throwable* exception = soa.Self()->GetException(NULL); - if (exception->IsCheckedException()) { - mirror::Object* rcvr = soa.Decode(rcvr_jobj); - mirror::SynthesizedProxyClass* proxy_class = - down_cast(rcvr->GetClass()); - mirror::AbstractMethod* interface_method = - soa.Decode(interface_method_jobj); - mirror::AbstractMethod* proxy_method = - rcvr->GetClass()->FindVirtualMethodForInterface(interface_method); - int throws_index = -1; - size_t num_virt_methods = proxy_class->NumVirtualMethods(); - for (size_t i = 0; i < num_virt_methods; i++) { - if (proxy_class->GetVirtualMethod(i) == proxy_method) { - throws_index = i; - break; - } - } - CHECK_NE(throws_index, -1); - mirror::ObjectArray* declared_exceptions = proxy_class->GetThrows()->Get(throws_index); - mirror::Class* exception_class = exception->GetClass(); - bool declares_exception = false; - for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { - mirror::Class* declared_exception = declared_exceptions->Get(i); - declares_exception = declared_exception->IsAssignableFrom(exception_class); - } - if (!declares_exception) { - ThrowLocation throw_location(rcvr, proxy_method, -1); - soa.Self()->ThrowNewWrappedException(throw_location, - "Ljava/lang/reflect/UndeclaredThrowableException;", - NULL); - } - } - return zero; - } -} - -} // namespace art diff --git a/runtime/runtime_support.h b/runtime/runtime_support.h deleted file mode 100644 index 43c678428b..0000000000 --- a/runtime/runtime_support.h +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_H_ - -#include "class_linker.h" -#include "common_throws.h" -#include "dex_file.h" -#include "indirect_reference_table.h" -#include "invoke_type.h" -#include "jni_internal.h" -#include "mirror/abstract_method.h" -#include "mirror/array.h" -#include "mirror/class-inl.h" -#include "mirror/throwable.h" -#include "object_utils.h" -#include "thread.h" - -extern "C" void art_interpreter_invoke_handler(); -extern "C" void art_jni_dlsym_lookup_stub(); -extern "C" void art_portable_abstract_method_error_stub(); -extern "C" void art_portable_proxy_invoke_handler(); -extern "C" void art_quick_abstract_method_error_stub(); -extern "C" void art_quick_deoptimize(); -extern "C" void art_quick_instrumentation_entry_from_code(void*); -extern "C" void art_quick_instrumentation_exit_from_code(); -extern "C" void art_quick_interpreter_entry(void*); -extern "C" void art_quick_proxy_invoke_handler(); -extern "C" void art_work_around_app_jni_bugs(); - -extern "C" double art_l2d(int64_t l); -extern "C" float art_l2f(int64_t l); -extern "C" int64_t art_d2l(double d); -extern "C" int32_t art_d2i(double d); -extern "C" int64_t art_f2l(float f); -extern "C" int32_t art_f2i(float f); - -namespace art { -namespace mirror { -class Class; -class Field; -class Object; -} - -// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it -// cannot be resolved, throw an error. If it can, use it to create an instance. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - Thread* self, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - Runtime* runtime = Runtime::Current(); - if (UNLIKELY(klass == NULL)) { - klass = runtime->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - } - if (access_check) { - if (UNLIKELY(!klass->IsInstantiable())) { - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;", - PrettyDescriptor(klass).c_str()); - return NULL; // Failure - } - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - if (!klass->IsInitialized() && - !runtime->GetClassLinker()->EnsureInitialized(klass, true, true)) { - DCHECK(self->IsExceptionPending()); - return NULL; // Failure - } - return klass->AllocObject(self); -} - -// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If -// it cannot be resolved, throw an error. If it can, use it to create an array. -// When verification/compiler hasn't been able to verify access, optionally perform an access -// check. -static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (UNLIKELY(component_count < 0)) { - ThrowNegativeArraySizeException(component_count); - return NULL; // Failure - } - mirror::Class* klass = method->GetDexCacheResolvedTypes()->Get(type_idx); - if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve - klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method); - if (klass == NULL) { // Error - DCHECK(Thread::Current()->IsExceptionPending()); - return NULL; // Failure - } - CHECK(klass->IsArrayClass()) << PrettyClass(klass); - } - if (access_check) { - mirror::Class* referrer = method->GetDeclaringClass(); - if (UNLIKELY(!referrer->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referrer, klass); - return NULL; // Failure - } - } - return mirror::Array::Alloc(self, klass, component_count); -} - -extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::AbstractMethod* method, - int32_t component_count, - Thread* self, bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Type of find field operation for fast and slow case. -enum FindFieldType { - InstanceObjectRead, - InstanceObjectWrite, - InstancePrimitiveRead, - InstancePrimitiveWrite, - StaticObjectRead, - StaticObjectWrite, - StaticPrimitiveRead, - StaticPrimitiveWrite, -}; - -// Slow field find that can initialize classes and may throw exceptions. -extern mirror::Field* FindFieldFromCode(uint32_t field_idx, const mirror::AbstractMethod* referrer, - Thread* self, FindFieldType type, size_t expected_size, - bool access_check) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Fast path field resolution that can't initialize classes or throw exceptions. -static inline mirror::Field* FindFieldFast(uint32_t field_idx, - const mirror::AbstractMethod* referrer, - FindFieldType type, size_t expected_size) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - mirror::Field* resolved_field = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx); - if (UNLIKELY(resolved_field == NULL)) { - return NULL; - } - mirror::Class* fields_class = resolved_field->GetDeclaringClass(); - // Check class is initiliazed or initializing. - if (UNLIKELY(!fields_class->IsInitializing())) { - return NULL; - } - // Check for incompatible class change. - bool is_primitive; - bool is_set; - bool is_static; - switch (type) { - case InstanceObjectRead: is_primitive = false; is_set = false; is_static = false; break; - case InstanceObjectWrite: is_primitive = false; is_set = true; is_static = false; break; - case InstancePrimitiveRead: is_primitive = true; is_set = false; is_static = false; break; - case InstancePrimitiveWrite: is_primitive = true; is_set = true; is_static = false; break; - case StaticObjectRead: is_primitive = false; is_set = false; is_static = true; break; - case StaticObjectWrite: is_primitive = false; is_set = true; is_static = true; break; - case StaticPrimitiveRead: is_primitive = true; is_set = false; is_static = true; break; - case StaticPrimitiveWrite: is_primitive = true; is_set = true; is_static = true; break; - default: - LOG(FATAL) << "UNREACHABLE"; // Assignment below to avoid GCC warnings. - is_primitive = true; - is_set = true; - is_static = true; - break; - } - if (UNLIKELY(resolved_field->IsStatic() != is_static)) { - // Incompatible class change. - return NULL; - } - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(fields_class) || - !referring_class->CanAccessMember(fields_class, - resolved_field->GetAccessFlags()) || - (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) { - // Illegal access. - return NULL; - } - FieldHelper fh(resolved_field); - if (UNLIKELY(fh.IsPrimitiveType() != is_primitive || - fh.FieldSize() != expected_size)) { - return NULL; - } - return resolved_field; -} - -// Fast path method resolution that can't throw exceptions. -static inline mirror::AbstractMethod* FindMethodFast(uint32_t method_idx, - mirror::Object* this_object, - const mirror::AbstractMethod* referrer, - bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - bool is_direct = type == kStatic || type == kDirect; - if (UNLIKELY(this_object == NULL && !is_direct)) { - return NULL; - } - mirror::AbstractMethod* resolved_method = - referrer->GetDeclaringClass()->GetDexCache()->GetResolvedMethod(method_idx); - if (UNLIKELY(resolved_method == NULL)) { - return NULL; - } - if (access_check) { - // Check for incompatible class change errors and access. - bool icce = resolved_method->CheckIncompatibleClassChange(type); - if (UNLIKELY(icce)) { - return NULL; - } - mirror::Class* methods_class = resolved_method->GetDeclaringClass(); - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (UNLIKELY(!referring_class->CanAccess(methods_class) || - !referring_class->CanAccessMember(methods_class, - resolved_method->GetAccessFlags()))) { - // Potential illegal access, may need to refine the method's class. - return NULL; - } - } - if (type == kInterface) { // Most common form of slow path dispatch. - return this_object->GetClass()->FindVirtualMethodForInterface(resolved_method); - } else if (is_direct) { - return resolved_method; - } else if (type == kSuper) { - return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()-> - Get(resolved_method->GetMethodIndex()); - } else { - DCHECK(type == kVirtual); - return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex()); - } -} - -extern mirror::AbstractMethod* FindMethodFromCode(uint32_t method_idx, mirror::Object* this_object, - mirror::AbstractMethod* referrer, - Thread* self, bool access_check, InvokeType type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx, - const mirror::AbstractMethod* referrer, - Thread* self, bool can_run_clinit, - bool verify_access) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - mirror::Class* klass = class_linker->ResolveType(type_idx, referrer); - if (UNLIKELY(klass == NULL)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - // Perform access check if necessary. - mirror::Class* referring_class = referrer->GetDeclaringClass(); - if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) { - ThrowIllegalAccessErrorClass(referring_class, klass); - return NULL; // Failure - Indicate to caller to deliver exception - } - // If we're just implementing const-class, we shouldn't call . - if (!can_run_clinit) { - return klass; - } - // If we are the of this class, just return our storage. - // - // Do not set the DexCache InitializedStaticStorage, since that implies has finished - // running. - if (klass == referring_class && MethodHelper(referrer).IsClassInitializer()) { - return klass; - } - if (!class_linker->EnsureInitialized(klass, true, true)) { - CHECK(self->IsExceptionPending()); - return NULL; // Failure - Indicate to caller to deliver exception - } - referrer->GetDexCacheInitializedStaticStorage()->Set(type_idx, klass); - return klass; -} - -extern void ThrowStackOverflowError(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -static inline mirror::String* ResolveStringFromCode(const mirror::AbstractMethod* referrer, - uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - return class_linker->ResolveString(string_idx, referrer); -} - -static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - UNLOCK_FUNCTION(monitor_lock_) { - // Save any pending exception over monitor exit call. - mirror::Throwable* saved_exception = NULL; - ThrowLocation saved_throw_location; - if (UNLIKELY(self->IsExceptionPending())) { - saved_exception = self->GetException(&saved_throw_location); - self->ClearException(); - } - // Decode locked object and unlock, before popping local references. - self->DecodeJObject(locked)->MonitorExit(self); - if (UNLIKELY(self->IsExceptionPending())) { - LOG(FATAL) << "Synchronized JNI code returning with an exception:\n" - << saved_exception->Dump() - << "\nEncountered second exception during implicit MonitorExit:\n" - << self->GetException(NULL)->Dump(); - } - // Restore pending exception. - if (saved_exception != NULL) { - self->SetException(saved_throw_location, saved_exception); - } -} - -static inline void CheckReferenceResult(mirror::Object* o, Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (o == NULL) { - return; - } - mirror::AbstractMethod* m = self->GetCurrentMethod(NULL); - if (o == kInvalidIndirectRefObject) { - JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str()); - } - // Make sure that the result is an instance of the type this method was expected to return. - mirror::Class* return_type = MethodHelper(m).GetReturnType(); - - if (!o->InstanceOf(return_type)) { - JniAbortF(NULL, "attempt to return an instance of %s from %s", - PrettyTypeOf(o).c_str(), PrettyMethod(m).c_str()); - } -} - -static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - for (;;) { - if (thread->ReadFlag(kCheckpointRequest)) { - thread->RunCheckpointFunction(); - thread->AtomicClearFlag(kCheckpointRequest); - } else if (thread->ReadFlag(kSuspendRequest)) { - thread->FullSuspendCheck(); - } else { - break; - } - } -} - -JValue InvokeProxyInvocationHandler(ScopedObjectAccessUnchecked& soa, const char* shorty, - jobject rcvr_jobj, jobject interface_method_jobj, - std::vector& args) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - -// Entry point for deoptimization. -static inline uintptr_t GetDeoptimizationEntryPoint() { - return reinterpret_cast(art_quick_deoptimize); -} - -// Return address of instrumentation stub. -static inline void* GetInstrumentationEntryPoint() { - return reinterpret_cast(art_quick_instrumentation_entry_from_code); -} - -// The return_pc of instrumentation exit stub. -static inline uintptr_t GetInstrumentationExitPc() { - return reinterpret_cast(art_quick_instrumentation_exit_from_code); -} - -// Return address of interpreter stub. -static inline void* GetInterpreterEntryPoint() { - return reinterpret_cast(art_quick_interpreter_entry); -} - -static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetPortableResolutionTrampoline(); -} - -static inline const void* GetQuickResolutionTrampoline(ClassLinker* class_linker) { - return class_linker->GetQuickResolutionTrampoline(); -} - -// Return address of resolution trampoline stub for defined compiler. -static inline const void* GetResolutionTrampoline(ClassLinker* class_linker) { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableResolutionTrampoline(class_linker); -#else - return GetQuickResolutionTrampoline(class_linker); -#endif -} - -static inline void* GetPortableAbstractMethodErrorStub() { - return reinterpret_cast(art_portable_abstract_method_error_stub); -} - -static inline void* GetQuickAbstractMethodErrorStub() { - return reinterpret_cast(art_quick_abstract_method_error_stub); -} - -// Return address of abstract method error stub for defined compiler. -static inline void* GetAbstractMethodErrorStub() { -#if defined(ART_USE_PORTABLE_COMPILER) - return GetPortableAbstractMethodErrorStub(); -#else - return GetQuickAbstractMethodErrorStub(); -#endif -} - -static inline void* GetJniDlsymLookupStub() { - return reinterpret_cast(art_jni_dlsym_lookup_stub); -} - -} // namespace art - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_H_ diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc deleted file mode 100644 index 93396d6a96..0000000000 --- a/runtime/runtime_support_llvm.cc +++ /dev/null @@ -1,930 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support_llvm.h" - -#include "ScopedLocalRef.h" -#include "asm_support.h" -#include "class_linker.h" -#include "class_linker-inl.h" -#include "dex_file-inl.h" -#include "dex_instruction.h" -#include "mirror/abstract_method-inl.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache-inl.h" -#include "mirror/field-inl.h" -#include "mirror/object.h" -#include "mirror/object-inl.h" -#include "mirror/object_array-inl.h" -#include "nth_caller_visitor.h" -#include "object_utils.h" -#include "reflection.h" -#include "runtime_support.h" -#include "scoped_thread_state_change.h" -#include "thread.h" -#include "thread_list.h" -#include "verifier/dex_gc_map.h" -#include "verifier/method_verifier.h" -#include "well_known_classes.h" - -#include -#include -#include -#include -#include - -namespace art { - -using ::art::mirror::AbstractMethod; - -class ShadowFrameCopyVisitor : public StackVisitor { - public: - explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL), - top_frame_(NULL) {} - - bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsShadowFrame()) { - ShadowFrame* cur_frame = GetCurrentShadowFrame(); - size_t num_regs = cur_frame->NumberOfVRegs(); - AbstractMethod* method = cur_frame->GetMethod(); - uint32_t dex_pc = cur_frame->GetDexPC(); - ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc); - - const uint8_t* gc_map = method->GetNativeGcMap(); - uint32_t gc_map_length = static_cast((gc_map[0] << 24) | - (gc_map[1] << 16) | - (gc_map[2] << 8) | - (gc_map[3] << 0)); - verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); - const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); - for (size_t reg = 0; reg < num_regs; ++reg) { - if (TestBitmap(reg, reg_bitmap)) { - new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg)); - } else { - new_frame->SetVReg(reg, cur_frame->GetVReg(reg)); - } - } - - if (prev_frame_ != NULL) { - prev_frame_->SetLink(new_frame); - } else { - top_frame_ = new_frame; - } - prev_frame_ = new_frame; - } - return true; - } - - ShadowFrame* GetShadowFrameCopy() { - return top_frame_; - } - - private: - static bool TestBitmap(int reg, const uint8_t* reg_vector) { - return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; - } - - ShadowFrame* prev_frame_; - ShadowFrame* top_frame_; -}; - -} // namespace art - -extern "C" { -using ::art::CatchHandlerIterator; -using ::art::DexFile; -using ::art::FindFieldFast; -using ::art::FindMethodFast; -using ::art::InstanceObjectRead; -using ::art::InstanceObjectWrite; -using ::art::InstancePrimitiveRead; -using ::art::InstancePrimitiveWrite; -using ::art::Instruction; -using ::art::InvokeType; -using ::art::JNIEnvExt; -using ::art::JValue; -using ::art::Locks; -using ::art::MethodHelper; -using ::art::PrettyClass; -using ::art::PrettyMethod; -using ::art::Primitive; -using ::art::ResolveStringFromCode; -using ::art::Runtime; -using ::art::ScopedJniEnvLocalRefState; -using ::art::ScopedObjectAccessUnchecked; -using ::art::ShadowFrame; -using ::art::ShadowFrameCopyVisitor; -using ::art::StaticObjectRead; -using ::art::StaticObjectWrite; -using ::art::StaticPrimitiveRead; -using ::art::StaticPrimitiveWrite; -using ::art::Thread; -using ::art::Thread; -using ::art::ThrowArithmeticExceptionDivideByZero; -using ::art::ThrowArrayIndexOutOfBoundsException; -using ::art::ThrowArrayStoreException; -using ::art::ThrowClassCastException; -using ::art::ThrowLocation; -using ::art::ThrowNoSuchMethodError; -using ::art::ThrowNullPointerException; -using ::art::ThrowNullPointerExceptionFromDexPC; -using ::art::ThrowStackOverflowError; -using ::art::kDirect; -using ::art::kInterface; -using ::art::kNative; -using ::art::kStatic; -using ::art::kSuper; -using ::art::kVirtual; -using ::art::mirror::AbstractMethod; -using ::art::mirror::Array; -using ::art::mirror::Class; -using ::art::mirror::Field; -using ::art::mirror::Object; -using ::art::mirror::Throwable; - -//---------------------------------------------------------------------------- -// Thread -//---------------------------------------------------------------------------- - -Thread* art_portable_get_current_thread_from_code() { -#if defined(__arm__) || defined(__i386__) - LOG(FATAL) << "UNREACHABLE"; -#endif - return Thread::Current(); -} - -void* art_portable_set_current_thread_from_code(void* thread_object_addr) { - // Hijacked to set r9 on ARM. - LOG(FATAL) << "UNREACHABLE"; - return NULL; -} - -void art_portable_lock_object_from_code(Object* obj, Thread* thread) - EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - obj->MonitorEnter(thread); // May block - DCHECK(thread->HoldsLock(obj)); - // Only possible exception is NPE and is handled before entry - DCHECK(!thread->IsExceptionPending()); -} - -void art_portable_unlock_object_from_code(Object* obj, Thread* thread) - UNLOCK_FUNCTION(monitor_lock_) { - DCHECK(obj != NULL); // Assumed to have been checked before entry - // MonitorExit may throw exception - obj->MonitorExit(thread); -} - -void art_portable_test_suspend_from_code(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CheckSuspend(self); - if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) { - // Save out the shadow frame to the heap - ShadowFrameCopyVisitor visitor(self); - visitor.WalkStack(true); - self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy()); - self->SetDeoptimizationReturnValue(JValue()); - self->SetException(ThrowLocation(), reinterpret_cast(-1)); - } -} - -ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread, - ShadowFrame* new_shadow_frame, - AbstractMethod* method, - uint32_t num_vregs) { - ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame); - new_shadow_frame->SetMethod(method); - new_shadow_frame->SetNumberOfVRegs(num_vregs); - return old_frame; -} - -void art_portable_pop_shadow_frame_from_code(void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -void art_portable_mark_gc_card_from_code(void *, void*) { - LOG(FATAL) << "Implemented by IRBuilder."; -} - -//---------------------------------------------------------------------------- -// Exception -//---------------------------------------------------------------------------- - -bool art_portable_is_exception_pending_from_code() { - LOG(FATAL) << "Implemented by IRBuilder."; - return false; -} - -void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArithmeticExceptionDivideByZero(); -} - -void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowArrayIndexOutOfBoundsException(index, length); -} - -void art_portable_throw_no_such_method_from_code(int32_t method_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowNoSuchMethodError(method_idx); -} - -void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // TODO: remove dex_pc argument from caller. - UNUSED(dex_pc); - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - ThrowNullPointerExceptionFromDexPC(throw_location); -} - -void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - ThrowStackOverflowError(Thread::Current()); -} - -void art_portable_throw_exception_from_code(Throwable* exception) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); - ThrowLocation throw_location = self->GetCurrentLocationForThrow(); - if (exception == NULL) { - ThrowNullPointerException(NULL, "throw with null exception"); - } else { - self->SetException(throw_location, exception); - } -} - -void* art_portable_get_and_clear_exception(Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(self->IsExceptionPending()); - // TODO: make this inline. - Throwable* exception = self->GetException(NULL); - self->ClearException(); - return exception; -} - -int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method, - uint32_t ti_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Thread* self = Thread::Current(); // TODO: make an argument. - ThrowLocation throw_location; - Throwable* exception = self->GetException(&throw_location); - // Check for special deoptimization exception. - if (UNLIKELY(reinterpret_cast(exception) == -1)) { - return -1; - } - Class* exception_type = exception->GetClass(); - MethodHelper mh(current_method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - DCHECK_LT(ti_offset, code_item->tries_size_); - const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset); - - int iter_index = 0; - int result = -1; - uint32_t catch_dex_pc = -1; - // Iterate over the catch handlers associated with dex_pc - for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) { - uint16_t iter_type_idx = it.GetHandlerTypeIndex(); - // Catch all case - if (iter_type_idx == DexFile::kDexNoIndex16) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - // Does this catch exception type apply? - Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx); - if (UNLIKELY(iter_exception_type == NULL)) { - // TODO: check, the verifier (class linker?) should take care of resolving all exception - // classes early. - LOG(WARNING) << "Unresolved exception class when finding catch block: " - << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx); - } else if (iter_exception_type->IsAssignableFrom(exception_type)) { - catch_dex_pc = it.GetHandlerAddress(); - result = iter_index; - break; - } - ++iter_index; - } - if (result != -1) { - // Handler found. - Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self, - throw_location, - current_method, - catch_dex_pc, - exception); - // If the catch block has no move-exception then clear the exception for it. - const Instruction* first_catch_instr = - Instruction::At(&mh.GetCodeItem()->insns_[catch_dex_pc]); - if (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION) { - self->ClearException(); - } - } - return result; -} - - -//---------------------------------------------------------------------------- -// Object Space -//---------------------------------------------------------------------------- - -Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, false); -} - -Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocObjectFromCode(type_idx, referrer, thread, true); -} - -Object* art_portable_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, false); -} - -Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* self) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return AllocArrayFromCode(type_idx, referrer, length, self, true); -} - -Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false); -} - -Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx, - AbstractMethod* referrer, - uint32_t length, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true); -} - -static AbstractMethod* FindMethodHelper(uint32_t method_idx, - Object* this_object, - AbstractMethod* caller_method, - bool access_check, - InvokeType type, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - AbstractMethod* method = FindMethodFast(method_idx, - this_object, - caller_method, - access_check, - type); - if (UNLIKELY(method == NULL)) { - method = FindMethodFromCode(method_idx, this_object, caller_method, - thread, access_check, type); - if (UNLIKELY(method == NULL)) { - CHECK(thread->IsExceptionPending()); - return 0; // failure - } - } - DCHECK(!thread->IsExceptionPending()); - const void* code = method->GetEntryPointFromCompiledCode(); - - // When we return, the caller will branch to this address, so it had better not be 0! - if (UNLIKELY(code == NULL)) { - MethodHelper mh(method); - LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method) - << " location: " << mh.GetDexFile().GetLocation(); - } - return method; -} - -Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread); -} - -Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread); -} - -Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread); -} - -Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread); -} - -Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread); -} - -Object* art_portable_find_interface_method_from_code(uint32_t method_idx, - Object* this_object, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread); -} - -Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false); -} - -Object* art_portable_initialize_type_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false); -} - -Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx, - AbstractMethod* referrer, - Thread* thread) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Called when caller isn't guaranteed to have access to a type and the dex cache may be - // unpopulated - return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true); -} - -Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return ResolveStringFromCode(referrer, string_idx); -} - -int32_t art_portable_set32_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, - referrer, - StaticPrimitiveWrite, - sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint32_t), - true); - if (LIKELY(field != NULL)) { - field->Set32(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, - referrer, - Thread::Current(), - StaticPrimitiveWrite, - sizeof(uint64_t), - true); - if (LIKELY(field != NULL)) { - field->Set64(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_static_from_code(uint32_t field_idx, - AbstractMethod* referrer, - Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(field->GetDeclaringClass(), new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(field->GetDeclaringClass()); - } - return 0; -} - -int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticPrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(field->GetDeclaringClass()); - } - return 0; -} - -Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - StaticObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(field->GetDeclaringClass()); - } - return 0; -} - -int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, uint32_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - field->Set32(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, int64_t new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveWrite, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - field->Set64(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, - Object* obj, Object* new_value) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*)); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectWrite, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - field->SetObj(obj, new_value); - return 0; - } - return -1; -} - -int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t)); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint32_t), true); - if (LIKELY(field != NULL)) { - return field->Get32(obj); - } - return 0; -} - -int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t)); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstancePrimitiveRead, sizeof(uint64_t), true); - if (LIKELY(field != NULL)) { - return field->Get64(obj); - } - return 0; -} - -Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*)); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - field = FindFieldFromCode(field_idx, referrer, Thread::Current(), - InstanceObjectRead, sizeof(Object*), true); - if (LIKELY(field != NULL)) { - return field->GetObj(obj); - } - return 0; -} - -void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc, - Array* array, uint32_t payload_offset) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - // Test: Is array equal to null? (Guard NullPointerException) - if (UNLIKELY(array == NULL)) { - art_portable_throw_null_pointer_exception_from_code(dex_pc); - return; - } - - // Find the payload from the CodeItem - MethodHelper mh(method); - const DexFile::CodeItem* code_item = mh.GetCodeItem(); - - DCHECK_GT(code_item->insns_size_in_code_units_, payload_offset); - - const Instruction::ArrayDataPayload* payload = - reinterpret_cast( - code_item->insns_ + payload_offset); - - DCHECK_EQ(payload->ident, - static_cast(Instruction::kArrayDataSignature)); - - // Test: Is array big enough? - uint32_t array_len = static_cast(array->GetLength()); - if (UNLIKELY(array_len < payload->element_count)) { - int32_t last_index = payload->element_count - 1; - art_portable_throw_array_bounds_from_code(array_len, last_index); - return; - } - - // Copy the data - size_t size = payload->element_width * payload->element_count; - memcpy(array->GetRawData(payload->element_width), payload->data, size); -} - - - -//---------------------------------------------------------------------------- -// Type checking, in the nature of casting -//---------------------------------------------------------------------------- - -int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type != NULL); - DCHECK(src_type != NULL); - return dest_type->IsAssignableFrom(src_type) ? 1 : 0; -} - -void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - DCHECK(dest_type->IsClass()) << PrettyClass(dest_type); - DCHECK(src_type->IsClass()) << PrettyClass(src_type); - if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) { - ThrowClassCastException(dest_type, src_type); - } -} - -void art_portable_check_put_array_element_from_code(const Object* element, - const Object* array) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (element == NULL) { - return; - } - DCHECK(array != NULL); - Class* array_class = array->GetClass(); - DCHECK(array_class != NULL); - Class* component_type = array_class->GetComponentType(); - Class* element_class = element->GetClass(); - if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) { - ThrowArrayStoreException(element_class, array_class); - } - return; -} - -//---------------------------------------------------------------------------- -// JNI -//---------------------------------------------------------------------------- - -// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_. -uint32_t art_portable_jni_method_start(Thread* self) - UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) { - JNIEnvExt* env = self->GetJniEnv(); - uint32_t saved_local_ref_cookie = env->local_ref_cookie; - env->local_ref_cookie = env->locals.GetSegmentState(); - self->TransitionFromRunnableToSuspended(kNative); - return saved_local_ref_cookie; -} - -uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self) - UNLOCK_FUNCTION(Locks::mutator_lock_) { - self->DecodeJObject(to_lock)->MonitorEnter(self); - return art_portable_jni_method_start(self); -} - -static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) { - JNIEnvExt* env = self->GetJniEnv(); - env->locals.SetSegmentState(env->local_ref_cookie); - env->local_ref_cookie = saved_local_ref_cookie; -} - -void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - PopLocalReferences(saved_local_ref_cookie, self); -} - - -void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); -} - -Object* art_portable_jni_method_end_with_reference(jobject result, - uint32_t saved_local_ref_cookie, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - Object* o = self->DecodeJObject(result); // Must decode before pop. - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -Object* art_portable_jni_method_end_with_reference_synchronized(jobject result, - uint32_t saved_local_ref_cookie, - jobject locked, - Thread* self) - SHARED_LOCK_FUNCTION(Locks::mutator_lock_) { - self->TransitionFromSuspendedToRunnable(); - UnlockJniSynchronizedMethod(locked, self); // Must decode before pop. - Object* o = self->DecodeJObject(result); - PopLocalReferences(saved_local_ref_cookie, self); - // Process result. - if (UNLIKELY(self->GetJniEnv()->check_jni)) { - if (self->IsExceptionPending()) { - return NULL; - } - CheckReferenceResult(o, self); - } - return o; -} - -// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation -// handler which is a field within the proxy object receiver. The var args encode the arguments -// with the last argument being a pointer to a JValue to store the result in. -void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - va_list ap; - va_start(ap, proxy_method); - - Object* receiver = va_arg(ap, Object*); - Thread* self = va_arg(ap, Thread*); - MethodHelper proxy_mh(proxy_method); - - // Ensure we don't get thread suspension until the object arguments are safely in jobjects. - const char* old_cause = - self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments"); - self->VerifyStack(); - - // Start new JNI local reference state. - JNIEnvExt* env = self->GetJniEnv(); - ScopedObjectAccessUnchecked soa(env); - ScopedJniEnvLocalRefState env_state(env); - - // Create local ref. copies of the receiver. - jobject rcvr_jobj = soa.AddLocalReference(receiver); - - // Convert proxy method into expected interface method. - AbstractMethod* interface_method = proxy_method->FindOverriddenMethod(); - DCHECK(interface_method != NULL); - DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method); - jobject interface_method_jobj = soa.AddLocalReference(interface_method); - - // Record arguments and turn Object* arguments into jobject to survive GC. - std::vector args; - const size_t num_params = proxy_mh.NumArgs(); - for (size_t i = 1; i < num_params; ++i) { - jvalue val; - switch (proxy_mh.GetParamPrimitiveType(i)) { - case Primitive::kPrimNot: - val.l = soa.AddLocalReference(va_arg(ap, Object*)); - break; - case Primitive::kPrimBoolean: // Fall-through. - case Primitive::kPrimByte: // Fall-through. - case Primitive::kPrimChar: // Fall-through. - case Primitive::kPrimShort: // Fall-through. - case Primitive::kPrimInt: // Fall-through. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimFloat: - // TODO: should this be jdouble? Floats aren't passed to var arg routines. - val.i = va_arg(ap, jint); - break; - case Primitive::kPrimDouble: - val.d = (va_arg(ap, jdouble)); - break; - case Primitive::kPrimLong: - val.j = (va_arg(ap, jlong)); - break; - case Primitive::kPrimVoid: - LOG(FATAL) << "UNREACHABLE"; - val.j = 0; - break; - } - args.push_back(val); - } - self->EndAssertNoThreadSuspension(old_cause); - JValue* result_location = NULL; - const char* shorty = proxy_mh.GetShorty(); - if (shorty[0] != 'V') { - result_location = va_arg(ap, JValue*); - } - va_end(ap); - JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args); - if (result_location != NULL) { - *result_location = result; - } -} - -//---------------------------------------------------------------------------- -// Memory barrier -//---------------------------------------------------------------------------- - -void art_portable_constructor_barrier() { - LOG(FATAL) << "Implemented by IRBuilder."; -} -} // extern "C" diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h deleted file mode 100644 index 43ea953a96..0000000000 --- a/runtime/runtime_support_llvm.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ -#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ - -extern "C" { -//---------------------------------------------------------------------------- -// Runtime Support Function Lookup Callback -//---------------------------------------------------------------------------- -void* art_portable_find_runtime_support_func(void* context, const char* name); -} // extern "C" - -#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_ diff --git a/runtime/runtime_support_test.cc b/runtime/runtime_support_test.cc deleted file mode 100644 index b827813146..0000000000 --- a/runtime/runtime_support_test.cc +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2012 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runtime_support.h" - -#include "common_test.h" -#include - -namespace art { - -class RuntimeSupportTest : public CommonTest {}; - -TEST_F(RuntimeSupportTest, DoubleToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_d2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_d2l(-1.85e19)); - EXPECT_EQ(0LL, art_d2l(0)); - EXPECT_EQ(1LL, art_d2l(1.0)); - EXPECT_EQ(10LL, art_d2l(10.0)); - EXPECT_EQ(100LL, art_d2l(100.0)); - EXPECT_EQ(-1LL, art_d2l(-1.0)); - EXPECT_EQ(-10LL, art_d2l(-10.0)); - EXPECT_EQ(-100LL, art_d2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToLong) { - EXPECT_EQ(std::numeric_limits::max(), art_f2l(1.85e19)); - EXPECT_EQ(std::numeric_limits::min(), art_f2l(-1.85e19)); - EXPECT_EQ(0LL, art_f2l(0)); - EXPECT_EQ(1LL, art_f2l(1.0)); - EXPECT_EQ(10LL, art_f2l(10.0)); - EXPECT_EQ(100LL, art_f2l(100.0)); - EXPECT_EQ(-1LL, art_f2l(-1.0)); - EXPECT_EQ(-10LL, art_f2l(-10.0)); - EXPECT_EQ(-100LL, art_f2l(-100.0)); -} - -TEST_F(RuntimeSupportTest, DoubleToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_d2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_d2i(-4.3e9)); - EXPECT_EQ(0L, art_d2i(0)); - EXPECT_EQ(1L, art_d2i(1.0)); - EXPECT_EQ(10L, art_d2i(10.0)); - EXPECT_EQ(100L, art_d2i(100.0)); - EXPECT_EQ(-1L, art_d2i(-1.0)); - EXPECT_EQ(-10L, art_d2i(-10.0)); - EXPECT_EQ(-100L, art_d2i(-100.0)); -} - -TEST_F(RuntimeSupportTest, FloatToInt) { - EXPECT_EQ(std::numeric_limits::max(), art_f2i(4.3e9)); - EXPECT_EQ(std::numeric_limits::min(), art_f2i(-4.3e9)); - EXPECT_EQ(0L, art_f2i(0)); - EXPECT_EQ(1L, art_f2i(1.0)); - EXPECT_EQ(10L, art_f2i(10.0)); - EXPECT_EQ(100L, art_f2i(100.0)); - EXPECT_EQ(-1L, art_f2i(-1.0)); - EXPECT_EQ(-10L, art_f2i(-10.0)); - EXPECT_EQ(-100L, art_f2i(-100.0)); -} - -} // namespace art diff --git a/runtime/thread.cc b/runtime/thread.cc index d5fdd20400..97a1410892 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -38,6 +38,7 @@ #include "cutils/atomic-inline.h" #include "debugger.h" #include "dex_file-inl.h" +#include "entrypoints/entrypoint_utils.h" #include "gc_map.h" #include "gc/accounting/card_table-inl.h" #include "gc/heap.h" @@ -54,7 +55,6 @@ #include "object_utils.h" #include "reflection.h" #include "runtime.h" -#include "runtime_support.h" #include "scoped_thread_state_change.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" @@ -86,16 +86,23 @@ static void UnimplementedEntryPoint() { } #endif +void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints); + void Thread::InitFunctionPointers() { #if !defined(__APPLE__) // The Mac GCC is too old to accept this code. // Insert a placeholder so we can easily tell if we call an unimplemented entry point. - uintptr_t* begin = reinterpret_cast(&entrypoints_); - uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(entrypoints_)); + uintptr_t* begin = reinterpret_cast(&quick_entrypoints_); + uintptr_t* end = reinterpret_cast(reinterpret_cast(begin) + sizeof(quick_entrypoints_)); + for (uintptr_t* it = begin; it != end; ++it) { + *it = reinterpret_cast(UnimplementedEntryPoint); + } + begin = reinterpret_cast(&portable_entrypoints_); + end = reinterpret_cast(reinterpret_cast(begin) + sizeof(portable_entrypoints_)); for (uintptr_t* it = begin; it != end; ++it) { *it = reinterpret_cast(UnimplementedEntryPoint); } #endif - InitEntryPoints(&entrypoints_); + InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_); } void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { @@ -1582,86 +1589,87 @@ struct EntryPointInfo { uint32_t offset; const char* name; }; -#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x } +#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x } +#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x } static const EntryPointInfo gThreadEntryPointInfo[] = { - ENTRY_POINT_INFO(pAllocArrayFromCode), - ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pAllocObjectFromCode), - ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), - ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), - ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), - ENTRY_POINT_INFO(pCanPutArrayElementFromCode), - ENTRY_POINT_INFO(pCheckCastFromCode), - ENTRY_POINT_INFO(pInitializeStaticStorage), - ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), - ENTRY_POINT_INFO(pInitializeTypeFromCode), - ENTRY_POINT_INFO(pResolveStringFromCode), - ENTRY_POINT_INFO(pSet32Instance), - ENTRY_POINT_INFO(pSet32Static), - ENTRY_POINT_INFO(pSet64Instance), - ENTRY_POINT_INFO(pSet64Static), - ENTRY_POINT_INFO(pSetObjInstance), - ENTRY_POINT_INFO(pSetObjStatic), - ENTRY_POINT_INFO(pGet32Instance), - ENTRY_POINT_INFO(pGet32Static), - ENTRY_POINT_INFO(pGet64Instance), - ENTRY_POINT_INFO(pGet64Static), - ENTRY_POINT_INFO(pGetObjInstance), - ENTRY_POINT_INFO(pGetObjStatic), - ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), - ENTRY_POINT_INFO(pJniMethodStart), - ENTRY_POINT_INFO(pJniMethodStartSynchronized), - ENTRY_POINT_INFO(pJniMethodEnd), - ENTRY_POINT_INFO(pJniMethodEndSynchronized), - ENTRY_POINT_INFO(pJniMethodEndWithReference), - ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), - ENTRY_POINT_INFO(pLockObjectFromCode), - ENTRY_POINT_INFO(pUnlockObjectFromCode), - ENTRY_POINT_INFO(pCmpgDouble), - ENTRY_POINT_INFO(pCmpgFloat), - ENTRY_POINT_INFO(pCmplDouble), - ENTRY_POINT_INFO(pCmplFloat), - ENTRY_POINT_INFO(pFmod), - ENTRY_POINT_INFO(pSqrt), - ENTRY_POINT_INFO(pL2d), - ENTRY_POINT_INFO(pFmodf), - ENTRY_POINT_INFO(pL2f), - ENTRY_POINT_INFO(pD2iz), - ENTRY_POINT_INFO(pF2iz), - ENTRY_POINT_INFO(pIdivmod), - ENTRY_POINT_INFO(pD2l), - ENTRY_POINT_INFO(pF2l), - ENTRY_POINT_INFO(pLdiv), - ENTRY_POINT_INFO(pLdivmod), - ENTRY_POINT_INFO(pLmul), - ENTRY_POINT_INFO(pShlLong), - ENTRY_POINT_INFO(pShrLong), - ENTRY_POINT_INFO(pUshrLong), - ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), - ENTRY_POINT_INFO(pInterpreterToQuickEntry), - ENTRY_POINT_INFO(pIndexOf), - ENTRY_POINT_INFO(pMemcmp16), - ENTRY_POINT_INFO(pStringCompareTo), - ENTRY_POINT_INFO(pMemcpy), - ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), - ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), - ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), - ENTRY_POINT_INFO(pCheckSuspendFromCode), - ENTRY_POINT_INFO(pTestSuspendFromCode), - ENTRY_POINT_INFO(pDeliverException), - ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), - ENTRY_POINT_INFO(pThrowDivZeroFromCode), - ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), - ENTRY_POINT_INFO(pThrowNullPointerFromCode), - ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode), + QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), + QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), + QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode), + QUICK_ENTRY_POINT_INFO(pCheckCastFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage), + QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), + QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode), + QUICK_ENTRY_POINT_INFO(pResolveStringFromCode), + QUICK_ENTRY_POINT_INFO(pSet32Instance), + QUICK_ENTRY_POINT_INFO(pSet32Static), + QUICK_ENTRY_POINT_INFO(pSet64Instance), + QUICK_ENTRY_POINT_INFO(pSet64Static), + QUICK_ENTRY_POINT_INFO(pSetObjInstance), + QUICK_ENTRY_POINT_INFO(pSetObjStatic), + QUICK_ENTRY_POINT_INFO(pGet32Instance), + QUICK_ENTRY_POINT_INFO(pGet32Static), + QUICK_ENTRY_POINT_INFO(pGet64Instance), + QUICK_ENTRY_POINT_INFO(pGet64Static), + QUICK_ENTRY_POINT_INFO(pGetObjInstance), + QUICK_ENTRY_POINT_INFO(pGetObjStatic), + QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), + QUICK_ENTRY_POINT_INFO(pJniMethodStart), + QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEnd), + QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference), + QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), + QUICK_ENTRY_POINT_INFO(pLockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode), + QUICK_ENTRY_POINT_INFO(pCmpgDouble), + QUICK_ENTRY_POINT_INFO(pCmpgFloat), + QUICK_ENTRY_POINT_INFO(pCmplDouble), + QUICK_ENTRY_POINT_INFO(pCmplFloat), + QUICK_ENTRY_POINT_INFO(pFmod), + QUICK_ENTRY_POINT_INFO(pSqrt), + QUICK_ENTRY_POINT_INFO(pL2d), + QUICK_ENTRY_POINT_INFO(pFmodf), + QUICK_ENTRY_POINT_INFO(pL2f), + QUICK_ENTRY_POINT_INFO(pD2iz), + QUICK_ENTRY_POINT_INFO(pF2iz), + QUICK_ENTRY_POINT_INFO(pIdivmod), + QUICK_ENTRY_POINT_INFO(pD2l), + QUICK_ENTRY_POINT_INFO(pF2l), + QUICK_ENTRY_POINT_INFO(pLdiv), + QUICK_ENTRY_POINT_INFO(pLdivmod), + QUICK_ENTRY_POINT_INFO(pLmul), + QUICK_ENTRY_POINT_INFO(pShlLong), + QUICK_ENTRY_POINT_INFO(pShrLong), + QUICK_ENTRY_POINT_INFO(pUshrLong), + QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), + QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry), + QUICK_ENTRY_POINT_INFO(pIndexOf), + QUICK_ENTRY_POINT_INFO(pMemcmp16), + QUICK_ENTRY_POINT_INFO(pStringCompareTo), + QUICK_ENTRY_POINT_INFO(pMemcpy), + QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), + QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), + QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), + QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode), + QUICK_ENTRY_POINT_INFO(pDeliverException), + QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), + QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), + QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode), + QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode), + PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), }; -#undef ENTRY_POINT_INFO +#undef QUICK_ENTRY_POINT_INFO void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. @@ -1686,8 +1694,9 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_ #undef DO_THREAD_OFFSET size_t entry_point_count = arraysize(gThreadEntryPointInfo); - CHECK_EQ(entry_point_count * size_of_pointers, sizeof(QuickEntryPoints)); - uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_); + CHECK_EQ(entry_point_count * size_of_pointers, + sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints)); + uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_); for (size_t i = 0; i < entry_point_count; ++i) { CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; expected_offset += size_of_pointers; diff --git a/runtime/thread.h b/runtime/thread.h index d02ab361a9..ff0fe228c0 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -26,9 +26,10 @@ #include #include "base/macros.h" +#include "entrypoints/portable/portable_entrypoints.h" +#include "entrypoints/quick/quick_entrypoints.h" #include "globals.h" #include "jvalue.h" -#include "entrypoints/quick/quick_entrypoints.h" #include "locks.h" #include "offsets.h" #include "root_visitor.h" @@ -773,9 +774,10 @@ class PACKED(4) Thread { Closure* checkpoint_function_; public: - // Runtime support function pointers + // Entrypoint function pointers // TODO: move this near the top, since changing its offset requires all oats to be recompiled! - QuickEntryPoints entrypoints_; + QuickEntryPoints quick_entrypoints_; + PortableEntryPoints portable_entrypoints_; private: // How many times has our pthread key's destructor been called? diff --git a/runtime/thread_arm.cc b/runtime/thread_arm.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_arm.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_mips.cc b/runtime/thread_mips.cc deleted file mode 100644 index 0ef26bff5e..0000000000 --- a/runtime/thread_mips.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include "asm_support.h" -#include "base/macros.h" - -namespace art { - -void Thread::InitCpu() { - CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_)); - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art diff --git a/runtime/thread_x86.cc b/runtime/thread_x86.cc deleted file mode 100644 index c398b2877a..0000000000 --- a/runtime/thread_x86.cc +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "thread.h" - -#include -#include - -#include "asm_support.h" -#include "base/macros.h" -#include "thread.h" -#include "thread_list.h" - -#if defined(__APPLE__) -#include -#include -struct descriptor_table_entry_t { - uint16_t limit0; - uint16_t base0; - unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1; - unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8; -} __attribute__((packed)); -#define MODIFY_LDT_CONTENTS_DATA 0 -#else -#include -#endif - -namespace art { - -void Thread::InitCpu() { - static Mutex modify_ldt_lock("modify_ldt lock"); - MutexLock mu(Thread::Current(), modify_ldt_lock); - - const uintptr_t base = reinterpret_cast(this); - const size_t limit = kPageSize; - - const int contents = MODIFY_LDT_CONTENTS_DATA; - const int seg_32bit = 1; - const int read_exec_only = 0; - const int limit_in_pages = 0; - const int seg_not_present = 0; - const int useable = 1; - - int entry_number = -1; - -#if defined(__APPLE__) - descriptor_table_entry_t entry; - memset(&entry, 0, sizeof(entry)); - entry.limit0 = (limit & 0x0ffff); - entry.limit = (limit & 0xf0000) >> 16; - entry.base0 = (base & 0x0000ffff); - entry.base1 = (base & 0x00ff0000) >> 16; - entry.base2 = (base & 0xff000000) >> 24; - entry.type = ((read_exec_only ^ 1) << 1) | (contents << 2); - entry.s = 1; - entry.dpl = 0x3; - entry.p = seg_not_present ^ 1; - entry.avl = useable; - entry.l = 0; - entry.d = seg_32bit; - entry.g = limit_in_pages; - - entry_number = i386_set_ldt(LDT_AUTO_ALLOC, reinterpret_cast(&entry), 1); - if (entry_number == -1) { - PLOG(FATAL) << "i386_set_ldt failed"; - } -#else - // Read current LDT entries. - CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t)); - std::vector ldt(LDT_ENTRIES); - size_t ldt_size(sizeof(uint64_t) * ldt.size()); - memset(&ldt[0], 0, ldt_size); - // TODO: why doesn't this return LDT_ENTRY_SIZE * LDT_ENTRIES for the main thread? - syscall(__NR_modify_ldt, 0, &ldt[0], ldt_size); - - // Find the first empty slot. - for (entry_number = 0; entry_number < LDT_ENTRIES && ldt[entry_number] != 0; ++entry_number) { - } - if (entry_number >= LDT_ENTRIES) { - LOG(FATAL) << "Failed to find a free LDT slot"; - } - - // Update LDT entry. - user_desc ldt_entry; - memset(&ldt_entry, 0, sizeof(ldt_entry)); - ldt_entry.entry_number = entry_number; - ldt_entry.base_addr = base; - ldt_entry.limit = limit; - ldt_entry.seg_32bit = seg_32bit; - ldt_entry.contents = contents; - ldt_entry.read_exec_only = read_exec_only; - ldt_entry.limit_in_pages = limit_in_pages; - ldt_entry.seg_not_present = seg_not_present; - ldt_entry.useable = useable; - CHECK_EQ(0, syscall(__NR_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry))); - entry_number = ldt_entry.entry_number; -#endif - - // Change %fs to be new LDT entry. - uint16_t table_indicator = 1 << 2; // LDT - uint16_t rpl = 3; // Requested privilege level - uint16_t selector = (entry_number << 3) | table_indicator | rpl; - // TODO: use our assembler to generate code - __asm__ __volatile__("movw %w0, %%fs" - : // output - : "q"(selector) // input - :); // clobber - - // Allow easy indirection back to Thread*. - self_ = this; - - // Sanity check that reads from %fs point to this Thread*. - Thread* self_check; - // TODO: use our assembler to generate code - CHECK_EQ(THREAD_SELF_OFFSET, OFFSETOF_MEMBER(Thread, self_)); - __asm__ __volatile__("movl %%fs:(%1), %0" - : "=r"(self_check) // output - : "r"(THREAD_SELF_OFFSET) // input - :); // clobber - CHECK_EQ(self_check, this); - - // Sanity check other offsets. - CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_)); -} - -} // namespace art -- cgit v1.2.3-59-g8ed1b