x86 JNI compiler and unit tests.
Change-Id: I4c2e10328961a2e8e27c90777fe2a93737b21143
diff --git a/src/assembler.cc b/src/assembler.cc
index 689e0c8..b5a34d9 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -8,6 +8,10 @@
namespace art {
+std::ostream& operator<<(std::ostream& os, const Offset& offs) {
+ return os << offs.Int32Value();
+}
+
static byte* NewContents(size_t capacity) {
byte* result = new byte[capacity];
#if defined(DEBUG)
@@ -30,7 +34,7 @@
gap_ = ComputeGap();
// Make sure that extending the capacity leaves a big enough gap
// for any kind of instruction.
- CHECK(gap_ >= kMinimumGap);
+ CHECK_GE(gap_, kMinimumGap);
// Mark the buffer as having ensured the capacity.
CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
buffer->has_ensured_capacity_ = true;
@@ -43,7 +47,7 @@
// Make sure the generated instruction doesn't take up more
// space than the minimum gap.
int delta = gap_ - ComputeGap();
- CHECK(delta <= kMinimumGap);
+ CHECK_LE(delta, kMinimumGap);
}
#endif
diff --git a/src/assembler.h b/src/assembler.h
index 228612e..84462c5 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -13,6 +13,40 @@
class AssemblerBuffer;
class AssemblerFixup;
+// Allow the meaning of offsets to be strongly typed
+class Offset {
+ public:
+ explicit Offset(size_t val) : val_(val) {}
+ int32_t Int32Value() const {
+ return static_cast<int32_t>(val_);
+ }
+ uint32_t Uint32Value() const {
+ return static_cast<uint32_t>(val_);
+ }
+ protected:
+ size_t val_;
+};
+std::ostream& operator<<(std::ostream& os, const Offset& offs);
+
+// Offsets relative to the current frame
+class FrameOffset : public Offset {
+ public:
+ explicit FrameOffset(size_t val) : Offset(val) {}
+ bool operator>(FrameOffset other) const { return val_ > other.val_; }
+ bool operator<(FrameOffset other) const { return val_ < other.val_; }
+};
+
+// Offsets relative to the current running thread
+class ThreadOffset : public Offset {
+ public:
+ explicit ThreadOffset(size_t val) : Offset(val) {}
+};
+
+// Offsets relative to an object
+class MemberOffset : public Offset {
+ public:
+ explicit MemberOffset(size_t val) : Offset(val) {}
+};
class Label {
public:
@@ -160,7 +194,7 @@
// Make sure the generated instruction doesn't take up more
// space than the minimum gap.
int delta = gap_ - ComputeGap();
- CHECK(delta <= kMinimumGap);
+ CHECK_LE(delta, kMinimumGap);
}
private:
diff --git a/src/assembler_arm.cc b/src/assembler_arm.cc
index 3ce5199..91c259e 100644
--- a/src/assembler_arm.cc
+++ b/src/assembler_arm.cc
@@ -2,6 +2,7 @@
#include "src/assembler.h"
#include "src/logging.h"
+#include "src/utils.h"
namespace art {
@@ -69,7 +70,7 @@
if (rhs >= R0 && rhs <= PC) {
os << kRegisterNames[rhs];
} else {
- os << "Register[" << int(rhs) << "]";
+ os << "Register[" << static_cast<int>(rhs) << "]";
}
return os;
}
@@ -77,9 +78,9 @@
std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
if (rhs >= S0 && rhs < kNumberOfSRegisters) {
- os << "s" << int(rhs);
+ os << "s" << static_cast<int>(rhs);
} else {
- os << "SRegister[" << int(rhs) << "]";
+ os << "SRegister[" << static_cast<int>(rhs) << "]";
}
return os;
}
@@ -87,22 +88,23 @@
std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
if (rhs >= D0 && rhs < kNumberOfDRegisters) {
- os << "d" << int(rhs);
+ os << "d" << static_cast<int>(rhs);
} else {
- os << "DRegister[" << int(rhs) << "]";
+ os << "DRegister[" << static_cast<int>(rhs) << "]";
}
return os;
}
static const char* kConditionNames[] = {
- "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT", "LE", "AL",
+ "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
+ "LE", "AL",
};
std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
if (rhs >= EQ && rhs <= AL) {
os << kConditionNames[rhs];
} else {
- os << "Condition[" << int(rhs) << "]";
+ os << "Condition[" << static_cast<int>(rhs) << "]";
}
return os;
}
@@ -1076,6 +1078,7 @@
tst(R0, ShifterOperand(data), MI);
}
+
int32_t Assembler::EncodeBranchOffset(int offset, int32_t inst) {
// The offset is off by 8 due to the way the ARM CPUs read PC.
offset -= 8;
@@ -1094,4 +1097,415 @@
return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
}
+void Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
+ AddConstant(rd, rd, value, cond);
+}
+
+
+void Assembler::AddConstant(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ if (value == 0) {
+ if (rd != rn) {
+ mov(rd, ShifterOperand(rn), cond);
+ }
+ return;
+ }
+ // We prefer to select the shorter code sequence rather than selecting add for
+ // positive values and sub for negatives ones, which would slightly improve
+ // the readability of generated code for some constants.
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ add(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ sub(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ add(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ sub(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ add(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
+ Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ adds(rd, rn, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+ subs(rd, rn, shifter_op, cond);
+ } else {
+ CHECK(rn != IP);
+ if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ adds(rd, rn, ShifterOperand(IP), cond);
+ } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+ mvn(IP, shifter_op, cond);
+ subs(rd, rn, ShifterOperand(IP), cond);
+ } else {
+ movw(IP, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(IP, value_high, cond);
+ }
+ adds(rd, rn, ShifterOperand(IP), cond);
+ }
+ }
+}
+
+
+void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
+ ShifterOperand shifter_op;
+ if (ShifterOperand::CanHold(value, &shifter_op)) {
+ mov(rd, shifter_op, cond);
+ } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
+ mvn(rd, shifter_op, cond);
+ } else {
+ movw(rd, Low16Bits(value), cond);
+ uint16_t value_high = High16Bits(value);
+ if (value_high != 0) {
+ movt(rd, value_high, cond);
+ }
+ }
+}
+
+
+bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
+ switch (type) {
+ case kLoadSignedByte:
+ case kLoadSignedHalfword:
+ case kLoadUnsignedHalfword:
+ case kLoadWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kLoadUnsignedByte:
+ case kLoadWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kLoadSWord:
+ case kLoadDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
+ switch (type) {
+ case kStoreHalfword:
+ case kStoreWordPair:
+ return IsAbsoluteUint(8, offset); // Addressing mode 3.
+ case kStoreByte:
+ case kStoreWord:
+ return IsAbsoluteUint(12, offset); // Addressing mode 2.
+ case kStoreSWord:
+ case kStoreDWord:
+ return IsAbsoluteUint(10, offset); // VFP addressing mode.
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ return false;
+ }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset.
+void Assembler::LoadFromOffset(LoadOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldLoadOffset(type, offset)) {
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldLoadOffset(type, offset));
+ switch (type) {
+ case kLoadSignedByte:
+ ldrsb(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedByte:
+ ldrb(reg, Address(base, offset), cond);
+ break;
+ case kLoadSignedHalfword:
+ ldrsh(reg, Address(base, offset), cond);
+ break;
+ case kLoadUnsignedHalfword:
+ ldrh(reg, Address(base, offset), cond);
+ break;
+ case kLoadWord:
+ ldr(reg, Address(base, offset), cond);
+ break;
+ case kLoadWordPair:
+ ldrd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset.
+void Assembler::StoreToOffset(StoreOperandType type,
+ Register reg,
+ Register base,
+ int32_t offset,
+ Condition cond) {
+ if (!Address::CanHoldStoreOffset(type, offset)) {
+ CHECK(reg != IP);
+ CHECK(base != IP);
+ LoadImmediate(IP, offset, cond);
+ add(IP, IP, ShifterOperand(base), cond);
+ base = IP;
+ offset = 0;
+ }
+ CHECK(Address::CanHoldStoreOffset(type, offset));
+ switch (type) {
+ case kStoreByte:
+ strb(reg, Address(base, offset), cond);
+ break;
+ case kStoreHalfword:
+ strh(reg, Address(base, offset), cond);
+ break;
+ case kStoreWord:
+ str(reg, Address(base, offset), cond);
+ break;
+ case kStoreWordPair:
+ strd(reg, Address(base, offset), cond);
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+}
+
+// Emit code that will create an activation on the stack
+void Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg) {
+ CHECK(IsAligned(frame_size, 16));
+ // TODO: use stm/ldm
+ StoreToOffset(kStoreWord, LR, SP, 0);
+ StoreToOffset(kStoreWord, method_reg.AsCoreRegister(), SP, -4);
+ AddConstant(SP, -frame_size);
+}
+
+// Emit code that will remove an activation from the stack
+void Assembler::RemoveFrame(size_t frame_size) {
+ CHECK(IsAligned(frame_size, 16));
+ LoadFromOffset(kLoadWord, LR, SP, 0);
+ AddConstant(SP, frame_size);
+ mov(PC, ShifterOperand(LR));
+}
+
+void Assembler::IncreaseFrameSize(size_t adjust) {
+ CHECK(IsAligned(adjust, 16));
+ AddConstant(SP, -adjust);
+}
+
+void Assembler::DecreaseFrameSize(size_t adjust) {
+ CHECK(IsAligned(adjust, 16));
+ AddConstant(SP, adjust);
+}
+
+// Store bytes from the given register onto the stack
+void Assembler::Store(FrameOffset dest, ManagedRegister src, size_t size) {
+ if (src.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+ } else {
+ // VFP
+ LOG(FATAL) << "TODO";
+ }
+}
+
+void Assembler::StoreRef(FrameOffset dest, ManagedRegister src) {
+ CHECK(src.IsCoreRegister());
+ StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void Assembler::LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs) {
+ CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+ base.AsCoreRegister(), offs.Int32Value());
+}
+
+void Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCoreRegister());
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCoreRegister());
+ LoadImmediate(scratch.AsCoreRegister(), imm);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
+}
+
+void Assembler::Load(ManagedRegister dest, FrameOffset src, size_t size) {
+ if (dest.IsCoreRegister()) {
+ CHECK_EQ(4u, size);
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
+ } else {
+ // TODO: VFP
+ LOG(FATAL) << "Unimplemented";
+ }
+}
+
+void Assembler::LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset offs) {
+ CHECK(dest.IsCoreRegister());
+ LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+ TR, offs.Int32Value());
+}
+
+void Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCoreRegister());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+}
+
+void Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCoreRegister());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, fr_offs.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCoreRegister());
+ AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ TR, thr_offs.Int32Value());
+}
+
+void Assembler::Move(ManagedRegister dest, ManagedRegister src) {
+ if (dest.IsCoreRegister()) {
+ CHECK(src.IsCoreRegister());
+ mov(dest.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+ } else {
+ // TODO: VFP
+ LOG(FATAL) << "Unimplemented";
+ }
+}
+
+void Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
+ size_t size) {
+ CHECK(scratch.IsCoreRegister());
+ if (size == 4) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ SP, src.Int32Value());
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+ SP, dest.Int32Value());
+ } else {
+ // TODO: size != 4
+ LOG(FATAL) << "Unimplemented";
+ }
+}
+
+void Assembler::CreateStackHandle(ManagedRegister out_reg,
+ FrameOffset handle_offset,
+ ManagedRegister in_reg, bool null_allowed) {
+ CHECK(in_reg.IsCoreRegister());
+ CHECK(out_reg.IsCoreRegister());
+ if (null_allowed) {
+ // Null values get a handle value of 0. Otherwise, the handle value is
+ // the address in the stack handle block holding the reference.
+ // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
+ } else {
+ AddConstant(out_reg.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
+ }
+}
+
+void Assembler::CreateStackHandle(FrameOffset out_off,
+ FrameOffset handle_offset,
+ ManagedRegister scratch, bool null_allowed) {
+ CHECK(scratch.IsCoreRegister());
+ if (null_allowed) {
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+ handle_offset.Int32Value());
+ // Null values get a handle value of 0. Otherwise, the handle value is
+ // the address in the stack handle block holding the reference.
+ // e.g. scratch = (handle == 0) ? 0 : (SP+handle_offset)
+ cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+ AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), NE);
+ } else {
+ AddConstant(scratch.AsCoreRegister(), SP, handle_offset.Int32Value(), AL);
+ }
+ StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void Assembler::LoadReferenceFromStackHandle(ManagedRegister out_reg,
+ ManagedRegister in_reg,
+ FrameOffset shb_offset) {
+ CHECK(out_reg.IsCoreRegister());
+ CHECK(in_reg.IsCoreRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+ }
+ cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+ LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(), in_reg.AsCoreRegister(),
+ shb_offset.Int32Value(), NE);
+}
+
+void Assembler::ValidateRef(ManagedRegister src, bool could_be_null) {
+ // TODO: not validating references
+}
+
+void Assembler::ValidateRef(FrameOffset src, bool could_be_null) {
+ // TODO: not validating references
+}
+
+void Assembler::Call(ManagedRegister base, MemberOffset offset,
+ ManagedRegister scratch) {
+ CHECK(base.IsCoreRegister());
+ CHECK(scratch.IsCoreRegister());
+ LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+ base.AsCoreRegister(), offset.Int32Value());
+ blx(scratch.AsCoreRegister());
+ // TODO: place reference map on call
+}
+
+// Emit code that will lock the reference in the given register
+void Assembler::LockReferenceOnStack(FrameOffset fr_offs) {
+ LOG(FATAL) << "TODO";
+}
+// Emit code that will unlock the reference in the given register
+void Assembler::UnLockReferenceOnStack(FrameOffset fr_offs) {
+ LOG(FATAL) << "TODO";
+}
+
} // namespace art
diff --git a/src/assembler_arm.h b/src/assembler_arm.h
index b625249..a20a40b 100644
--- a/src/assembler_arm.h
+++ b/src/assembler_arm.h
@@ -3,7 +3,8 @@
#ifndef ART_SRC_ASSEMBLER_ARM_H_
#define ART_SRC_ASSEMBLER_ARM_H_
-#include "src/constants_arm.h"
+#include "src/constants.h"
+#include "src/managed_register.h"
#include "src/logging.h"
#include "src/utils.h"
@@ -165,7 +166,7 @@
uint32_t encoding3() const {
const uint32_t offset_mask = (1 << 12) - 1;
uint32_t offset = encoding_ & offset_mask;
- CHECK(offset < 256);
+ CHECK_LT(offset, 256u);
return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
}
@@ -412,12 +413,74 @@
void Untested(const char* message);
void Unreachable(const char* message);
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size, ManagedRegister method_reg);
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size);
+
+ void IncreaseFrameSize(size_t adjust);
+ void DecreaseFrameSize(size_t adjust);
+
+ // Store bytes from the given register onto the stack
+ void Store(FrameOffset dest, ManagedRegister src, size_t size);
+
+ void StoreRef(FrameOffset dest, ManagedRegister src);
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch);
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs);
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+ void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset offs);
+ void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+ void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ void StoreStackOffsetToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ void Move(ManagedRegister dest, ManagedRegister src);
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
+ size_t size);
+ void CreateStackHandle(ManagedRegister out_reg, FrameOffset handle_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ void CreateStackHandle(FrameOffset out_off, FrameOffset handle_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ void LoadReferenceFromStackHandle(ManagedRegister dst, ManagedRegister src,
+ FrameOffset shb_offset);
+
+ void ValidateRef(ManagedRegister src, bool could_be_null);
+
+ void ValidateRef(FrameOffset src, bool could_be_null);
+
+ void Call(ManagedRegister base, MemberOffset offset, ManagedRegister scratch);
+
+ // Emit code that will lock the reference in the given frame location
+ void LockReferenceOnStack(FrameOffset fr_offs);
+
+ // Emit code that will unlock the reference in the given frame location
+ void UnLockReferenceOnStack(FrameOffset fr_offs);
+
// Emit data (e.g. encoded instruction or immediate) to the
// instruction stream.
void Emit(int32_t value);
void Bind(Label* label);
+ size_t CodeSize() const { return buffer_.Size(); }
+
+ void FinalizeInstructions(const MemoryRegion& region) {
+ buffer_.FinalizeInstructions(region);
+ }
+
private:
AssemblerBuffer buffer_;
@@ -501,6 +564,6 @@
}
};
-} // namespace art
+} // namespace art
#endif // ART_SRC_ASSEMBLER_ARM_H_
diff --git a/src/assembler_x86.cc b/src/assembler_x86.cc
index 0c44c08..6f833ad 100644
--- a/src/assembler_x86.cc
+++ b/src/assembler_x86.cc
@@ -4,7 +4,6 @@
#include "src/assembler.h"
#include "src/casts.h"
#include "src/globals.h"
-#include "src/assembler.h"
#include "src/memory_region.h"
namespace art {
@@ -27,11 +26,18 @@
if (rhs >= EAX && rhs <= EDI) {
os << kRegisterNames[rhs];
} else {
- os << "Register[" << int(rhs) << "]";
+ os << "Register[" << static_cast<int>(rhs) << "]";
}
return os;
}
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
+ return os << "XMM" << static_cast<int>(reg);
+}
+
+std::ostream& operator<<(std::ostream& os, const X87Register& reg) {
+ return os << "ST" << static_cast<int>(reg);
+}
void Assembler::InitializeMemoryWithBreakpoints(byte* data, size_t length) {
memset(reinterpret_cast<void*>(data), Instr::kBreakPointInstruction, length);
@@ -234,19 +240,19 @@
}
-void Assembler::cmovs(Register dst, Register src) {
+void Assembler::cmovl(Condition condition, Register dst, Register src) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
- EmitUint8(0x48);
+ EmitUint8(0x40 + condition);
EmitRegisterOperand(dst, src);
}
-void Assembler::cmovns(Register dst, Register src) {
+void Assembler::setb(Condition condition, Register dst) {
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x0F);
- EmitUint8(0x49);
- EmitRegisterOperand(dst, src);
+ EmitUint8(0x90 + condition);
+ EmitOperand(0, Operand(dst));
}
@@ -1179,6 +1185,11 @@
EmitOperand(reg, address);
}
+void Assembler::fs() {
+ // TODO: fs is a prefix and not an instruction
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x64);
+}
void Assembler::AddImmediate(Register reg, const Immediate& imm) {
int value = imm.value();
@@ -1358,4 +1369,220 @@
EmitOperand(rm, Operand(operand));
}
-} // namespace art
+// Emit code that will create an activation on the stack
+void Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg) {
+ CHECK(IsAligned(frame_size, 16));
+ // return address then method on stack
+ addl(ESP, Immediate(-frame_size + 4 /*method*/ + 4 /*return address*/));
+ pushl(method_reg.AsCpuRegister());
+}
+
+// Emit code that will remove an activation from the stack
+void Assembler::RemoveFrame(size_t frame_size) {
+ CHECK(IsAligned(frame_size, 16));
+ addl(ESP, Immediate(frame_size - 4));
+ ret();
+}
+
+void Assembler::IncreaseFrameSize(size_t adjust) {
+ CHECK(IsAligned(adjust, 16));
+ addl(ESP, Immediate(-adjust));
+}
+
+void Assembler::DecreaseFrameSize(size_t adjust) {
+ CHECK(IsAligned(adjust, 16));
+ addl(ESP, Immediate(adjust));
+}
+
+// Store bytes from the given register onto the stack
+void Assembler::Store(FrameOffset offs, ManagedRegister src, size_t size) {
+ if (src.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ movl(Address(ESP, offs), src.AsCpuRegister());
+ } else if (src.IsXmmRegister()) {
+ if (size == 4) {
+ movss(Address(ESP, offs), src.AsXmmRegister());
+ } else {
+ movsd(Address(ESP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void Assembler::StoreRef(FrameOffset dest, ManagedRegister src) {
+ CHECK(src.IsCpuRegister());
+ movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCpuRegister());
+ movl(scratch.AsCpuRegister(), Address(ESP, src));
+ movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister) {
+ movl(Address(ESP, dest), Immediate(imm));
+}
+
+void Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister) {
+ fs();
+ movl(Address::Absolute(dest), Immediate(imm));
+}
+
+void Assembler::Load(ManagedRegister dest, FrameOffset src, size_t size) {
+ if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ movl(dest.AsCpuRegister(), Address(ESP, src));
+ } else {
+ // TODO: x87, SSE
+ LOG(FATAL) << "Unimplemented";
+ }
+}
+
+void Assembler::LoadRef(ManagedRegister dest, FrameOffset src) {
+ CHECK(dest.IsCpuRegister());
+ movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void Assembler::LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs) {
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ movl(dest.AsCpuRegister(), Address(base.AsCpuRegister(), offs));
+}
+
+void Assembler::LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset offs) {
+ CHECK(dest.IsCpuRegister());
+ fs();
+ movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void Assembler::CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCpuRegister());
+ fs();
+ movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+ Store(fr_offs, scratch, 4);
+}
+
+void Assembler::CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 4);
+ fs();
+ movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) {
+ CHECK(scratch.IsCpuRegister());
+ leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+ fs();
+ movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void Assembler::Move(ManagedRegister dest, ManagedRegister src) {
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ movl(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else {
+ // TODO: x87, SSE
+ LOG(FATAL) << "Unimplemented";
+ }
+ }
+}
+
+void Assembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
+ size_t size) {
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void Assembler::CreateStackHandle(ManagedRegister out_reg,
+ FrameOffset handle_offset,
+ ManagedRegister in_reg, bool null_allowed) {
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ ValidateRef(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(ZERO, &null_arg);
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_offset));
+ Bind(&null_arg);
+ } else {
+ leal(out_reg.AsCpuRegister(), Address(ESP, handle_offset));
+ }
+}
+
+void Assembler::CreateStackHandle(FrameOffset out_off,
+ FrameOffset handle_offset,
+ ManagedRegister scratch, bool null_allowed) {
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ movl(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ j(ZERO, &null_arg);
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+ Bind(&null_arg);
+ } else {
+ leal(scratch.AsCpuRegister(), Address(ESP, handle_offset));
+ }
+ Store(out_off, scratch, 4);
+}
+
+// Given a stack handle, load the associated reference.
+void Assembler::LoadReferenceFromStackHandle(ManagedRegister out_reg,
+ ManagedRegister in_reg,
+ FrameOffset shb_offset) {
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(ZERO, &null_arg);
+ movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ Bind(&null_arg);
+}
+
+void Assembler::ValidateRef(ManagedRegister src, bool could_be_null) {
+ // TODO: not validating references
+}
+
+void Assembler::ValidateRef(FrameOffset src, bool could_be_null) {
+ // TODO: not validating references
+}
+
+void Assembler::Call(ManagedRegister base, MemberOffset offset,
+ ManagedRegister) {
+ CHECK(base.IsCpuRegister());
+ call(Address(base.AsCpuRegister(), offset));
+ // TODO: place reference map on call
+}
+
+// Emit code that will lock the reference in the given register
+void Assembler::LockReferenceOnStack(FrameOffset fr_offs) {
+ LOG(FATAL) << "TODO";
+}
+// Emit code that will unlock the reference in the given register
+void Assembler::UnLockReferenceOnStack(FrameOffset fr_offs) {
+ LOG(FATAL) << "TODO";
+}
+
+
+} // namespace art
diff --git a/src/assembler_x86.h b/src/assembler_x86.h
index e2445c5..981141a 100644
--- a/src/assembler_x86.h
+++ b/src/assembler_x86.h
@@ -6,7 +6,8 @@
#include <stdint.h>
#include <string.h>
#include "src/assembler.h"
-#include "src/constants_x86.h"
+#include "src/constants.h"
+#include "src/managed_register.h"
#include "src/macros.h"
#include "src/utils.h"
@@ -120,6 +121,19 @@
class Address : public Operand {
public:
Address(Register base, int32_t disp) {
+ Init(base, disp);
+ }
+
+ Address(Register base, FrameOffset disp) {
+ CHECK_EQ(base, ESP);
+ Init(ESP, disp.Int32Value());
+ }
+
+ Address(Register base, MemberOffset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ void Init(Register base, int32_t disp) {
if (disp == 0 && base != EBP) {
SetModRM(0, base);
if (base == ESP) SetSIB(TIMES_1, ESP, base);
@@ -134,6 +148,7 @@
}
}
+
Address(Register index, ScaleFactor scale, int32_t disp) {
CHECK_NE(index, ESP); // Illegal addressing mode.
SetModRM(0, ESP);
@@ -164,6 +179,10 @@
return result;
}
+ static Address Absolute(ThreadOffset addr) {
+ return Absolute(addr.Int32Value());
+ }
+
private:
Address() {}
@@ -214,8 +233,9 @@
void leal(Register dst, const Address& src);
- void cmovs(Register dst, Register src);
- void cmovns(Register dst, Register src);
+ void cmovl(Condition condition, Register dst, Register src);
+
+ void setb(Condition condition, Register dst);
void movss(XmmRegister dst, const Address& src);
void movss(const Address& dst, XmmRegister src);
@@ -381,9 +401,74 @@
void lock();
void cmpxchgl(const Address& address, Register reg);
- /*
- * Macros for High-level operations.
- */
+ void fs();
+
+ //
+ // Macros for High-level operations.
+ //
+
+ // Emit code that will create an activation on the stack
+ void BuildFrame(size_t frame_size, ManagedRegister method_reg);
+
+ // Emit code that will remove an activation from the stack
+ void RemoveFrame(size_t frame_size);
+
+ void IncreaseFrameSize(size_t adjust);
+ void DecreaseFrameSize(size_t adjust);
+
+ // Store bytes from the given register onto the stack
+ void Store(FrameOffset offs, ManagedRegister src, size_t size);
+ void StoreRef(FrameOffset dest, ManagedRegister src);
+
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch);
+
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs);
+
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset offs);
+
+ void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+
+ void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ void StoreStackOffsetToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+ void Move(ManagedRegister dest, ManagedRegister src);
+
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch,
+ unsigned int size);
+
+ void CreateStackHandle(ManagedRegister out_reg, FrameOffset handle_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ void CreateStackHandle(FrameOffset out_off, FrameOffset handle_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ void LoadReferenceFromStackHandle(ManagedRegister dst, ManagedRegister src,
+ FrameOffset shb_offset);
+
+ void ValidateRef(ManagedRegister src, bool could_be_null);
+ void ValidateRef(FrameOffset src, bool could_be_null);
+
+ void Call(ManagedRegister base, MemberOffset offset, ManagedRegister scratch);
+
+ // Emit code that will lock the reference in the given frame location
+ void LockReferenceOnStack(FrameOffset fr_offs);
+
+ // Emit code that will unlock the reference in the given frame location
+ void UnLockReferenceOnStack(FrameOffset fr_offs);
+
void AddImmediate(Register reg, const Immediate& imm);
void LoadDoubleConstant(XmmRegister dst, double value);
@@ -398,14 +483,14 @@
cmpxchgl(address, reg);
}
- /*
- * Misc. functionality
- */
+ //
+ // Misc. functionality
+ //
int PreferredLoopAlignment() { return 16; }
void Align(int alignment, int offset);
void Bind(Label* label);
- int CodeSize() const { return buffer_.Size(); }
+ size_t CodeSize() const { return buffer_.Size(); }
void FinalizeInstructions(const MemoryRegion& region) {
buffer_.FinalizeInstructions(region);
diff --git a/src/assembler_x86_test.cc b/src/assembler_x86_test.cc
index e49e91f..c0d1495 100644
--- a/src/assembler_x86_test.cc
+++ b/src/assembler_x86_test.cc
@@ -1,10 +1,19 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Author: cshapiro@google.com (Carl Shapiro)
+
#include "src/assembler_x86.h"
-#include <stdio.h>
#include "gtest/gtest.h"
-TEST(AssemblerX86Test, Init) {
- ASSERT_TRUE(true);
+namespace art {
+
+TEST(AssemblerX86, CreateBuffer) {
+ AssemblerBuffer buffer;
+ AssemblerBuffer::EnsureCapacity ensured(&buffer);
+ buffer.Emit<uint8_t>(0x42);
+ ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
+ buffer.Emit<int32_t>(42);
+ ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
}
+
+} // namespace art
diff --git a/src/calling_convention.cc b/src/calling_convention.cc
new file mode 100644
index 0000000..4760a8c
--- /dev/null
+++ b/src/calling_convention.cc
@@ -0,0 +1,120 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+
+#include "src/calling_convention.h"
+#include "src/logging.h"
+#include "src/utils.h"
+
+namespace art {
+
+size_t ManagedRuntimeCallingConvention::FrameSize() {
+ LOG(FATAL) << "Unimplemented";
+ return 0;
+}
+
+bool ManagedRuntimeCallingConvention::HasNext() {
+ return itr_position_ < GetMethod()->NumArgs();
+}
+
+void ManagedRuntimeCallingConvention::Next() {
+ CHECK(HasNext());
+ if (((itr_position_ != 0) || GetMethod()->IsStatic()) &&
+ GetMethod()->IsParamALongOrDouble(itr_position_)) {
+ itr_longs_and_doubles_++;
+ }
+ itr_position_++;
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamPossiblyNull() {
+ // for a virtual method, this should never be NULL
+ return GetMethod()->IsStatic() || (itr_position_ != 0);
+}
+
+size_t ManagedRuntimeCallingConvention::CurrentParamSizeInBytes() {
+ return GetMethod()->ParamSizeInBytes(itr_position_);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() {
+ return GetMethod()->IsParamAReference(itr_position_);
+}
+
+
+size_t JniCallingConvention::FrameSize() {
+ // Return address and Method*
+ size_t frame_data_size = 2 * kPointerSize;
+ // Handles plus 2 words for SHB header
+ size_t handle_area_size = (HandleCount() + 2) * kPointerSize;
+ return RoundUp(frame_data_size + handle_area_size, 16);
+}
+
+size_t JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, 16);
+}
+
+size_t JniCallingConvention::HandleCount() {
+ const Method* method = GetMethod();
+ return method->NumReferenceArgs() + (method->IsStatic() ? 1 : 0);
+}
+
+bool JniCallingConvention::HasNext() {
+ if (itr_position_ <= kObjectOrClass) {
+ return true;
+ } else {
+ unsigned int arg_pos = itr_position_ - (GetMethod()->IsStatic() ? 2 : 1);
+ return arg_pos < GetMethod()->NumArgs();
+ }
+}
+
+void JniCallingConvention::Next() {
+ CHECK(HasNext());
+ if (itr_position_ > kObjectOrClass) {
+ int arg_pos = itr_position_ - (GetMethod()->IsStatic() ? 2 : 1);
+ if (GetMethod()->IsParamALongOrDouble(arg_pos)) {
+ itr_longs_and_doubles_++;
+ }
+ }
+ itr_position_++;
+}
+
+bool JniCallingConvention::IsCurrentParamAReference() {
+ switch (itr_position_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return true; // jobject or jclass
+ default: {
+ int arg_pos = itr_position_ - (GetMethod()->IsStatic() ? 2 : 1);
+ return GetMethod()->IsParamAReference(arg_pos);
+ }
+ }
+}
+
+// Return position of handle holding reference at the current iterator position
+FrameOffset JniCallingConvention::CurrentParamHandleOffset() {
+ CHECK(IsCurrentParamAReference());
+ CHECK_GT(ShbLinkOffset(), ShbNumRefsOffset());
+ // Address of 1st handle
+ int result = ShbLinkOffset().Int32Value() + kPointerSize;
+ if (itr_position_ != kObjectOrClass) {
+ bool is_static = GetMethod()->IsStatic();
+ int arg_pos = itr_position_ - (is_static ? 2 : 1);
+ int previous_refs = GetMethod()->NumReferenceArgsBefore(arg_pos);
+ if (is_static) {
+ previous_refs++; // account for jclass
+ }
+ result += previous_refs * kPointerSize;
+ }
+ CHECK_GT(result, ShbLinkOffset().Int32Value());
+ return FrameOffset(result);
+}
+
+unsigned int JniCallingConvention::CurrentParamSizeInBytes() {
+ if (itr_position_ <= kObjectOrClass) {
+ return kPointerSize; // JNIEnv or jobject/jclass
+ } else {
+ int arg_pos = itr_position_ - (GetMethod()->IsStatic() ? 2 : 1);
+ return GetMethod()->ParamSizeInBytes(arg_pos);
+ }
+}
+
+} // namespace art
diff --git a/src/calling_convention.h b/src/calling_convention.h
new file mode 100644
index 0000000..cf12692
--- /dev/null
+++ b/src/calling_convention.h
@@ -0,0 +1,145 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+
+#ifndef ART_SRC_CALLING_CONVENTION_H_
+#define ART_SRC_CALLING_CONVENTION_H_
+
+#include "src/managed_register.h"
+#include "src/object.h"
+#include "src/thread.h"
+
+namespace art {
+
+// Top-level abstraction for different calling conventions
+class CallingConvention {
+ public:
+ CallingConvention* GetCallingConvention(Method* method);
+
+ bool IsReturnAReference() const { return method_->IsReturnAReference(); }
+
+ // Register that holds the incoming method argument
+ ManagedRegister MethodRegister();
+ // Register that holds result of this method
+ ManagedRegister ReturnRegister();
+ // Register reserved for scratch usage during procedure calls
+ ManagedRegister InterproceduralScratchRegister();
+
+ // Iterator interface
+
+ // Place iterator at start of arguments. The displacement is applied to
+ // frame offset methods to account for frames which may be on the stack
+ // below the one being iterated over.
+ void ResetIterator(FrameOffset displacement) {
+ displacement_ = displacement;
+ itr_position_ = 0;
+ itr_longs_and_doubles_ = 0;
+ }
+
+ protected:
+ explicit CallingConvention(Method* method) : displacement_(0),
+ method_(method) {}
+ const Method* GetMethod() const { return method_; }
+
+ // position along argument list
+ unsigned int itr_position_;
+ // number of longs and doubles seen along argument list
+ unsigned int itr_longs_and_doubles_;
+ // Space for frames below this on the stack
+ FrameOffset displacement_;
+
+ private:
+ const Method* method_;
+};
+
+// Abstraction for managed code's calling conventions
+class ManagedRuntimeCallingConvention : public CallingConvention {
+ public:
+ explicit ManagedRuntimeCallingConvention(Method* method) :
+ CallingConvention(method) {}
+
+ size_t FrameSize();
+
+ // Iterator interface
+ bool HasNext();
+ void Next();
+ bool IsCurrentParamAReference();
+ bool IsCurrentParamInRegister();
+ bool IsCurrentParamOnStack();
+ bool IsCurrentParamPossiblyNull();
+ size_t CurrentParamSizeInBytes();
+ ManagedRegister CurrentParamRegister();
+ FrameOffset CurrentParamStackOffset();
+
+ DISALLOW_COPY_AND_ASSIGN(ManagedRuntimeCallingConvention);
+};
+
+// Abstraction for JNI calling conventions
+// | incoming stack args | <-- Prior SP
+// | { Spilled registers |
+// | & return address } |
+// | { Saved JNI Env Data } |
+// | { Stack Handle Block |
+// | ... |
+// | length/link } | (here to prior SP is frame size)
+// | Method* | <-- Anchor SP written to thread
+// | { Outgoing stack args |
+// | ... } | <-- SP at point of call
+// | Native frame |
+class JniCallingConvention : public CallingConvention {
+ public:
+ explicit JniCallingConvention(Method* native_method) :
+ CallingConvention(native_method) {}
+
+ // Size of frame excluding space for outgoing args (its assumed Method* is
+ // always at the bottom of a frame, but this doesn't work for outgoing
+ // native args). Includes alignment.
+ size_t FrameSize();
+ // Size of outgoing arguments, including alignment
+ size_t OutArgSize();
+ // Number of handles in stack handle block
+ size_t HandleCount();
+
+ // Iterator interface
+ bool HasNext();
+ void Next();
+ bool IsCurrentParamAReference();
+ bool IsCurrentParamInRegister();
+ bool IsCurrentParamOnStack();
+ size_t CurrentParamSizeInBytes();
+ ManagedRegister CurrentParamRegister();
+ FrameOffset CurrentParamStackOffset();
+
+ // Iterator interface extension for JNI
+ FrameOffset CurrentParamHandleOffset();
+
+ // Position of stack handle block and interior fields
+ FrameOffset ShbOffset() {
+ return FrameOffset(displacement_.Int32Value() +
+ kPointerSize); // above Method*
+ }
+ FrameOffset ShbNumRefsOffset() {
+ return FrameOffset(ShbOffset().Int32Value() +
+ StackHandleBlock::NumberOfReferencesOffset());
+ }
+ FrameOffset ShbLinkOffset() {
+ return FrameOffset(ShbOffset().Int32Value() +
+ StackHandleBlock::LinkOffset());
+ }
+
+ private:
+ // Named iterator positions
+ enum IteratorPos {
+ kJniEnv = 0,
+ kObjectOrClass = 1
+ };
+
+ // Number of stack slots for outgoing arguments, above which handles are
+ // located
+ size_t NumberOfOutgoingStackArgs();
+
+ DISALLOW_COPY_AND_ASSIGN(JniCallingConvention);
+};
+
+} // namespace art
+
+#endif // ART_SRC_CALLING_CONVENTION_H_
diff --git a/src/calling_convention_arm.cc b/src/calling_convention_arm.cc
new file mode 100644
index 0000000..233905a
--- /dev/null
+++ b/src/calling_convention_arm.cc
@@ -0,0 +1,83 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+
+#include "src/calling_convention.h"
+#include "src/logging.h"
+
+namespace art {
+
+ManagedRegister CallingConvention::MethodRegister() {
+ return ManagedRegister::FromCoreRegister(R0);
+}
+
+ManagedRegister CallingConvention::InterproceduralScratchRegister() {
+ return ManagedRegister::FromCoreRegister(R12);
+}
+
+ManagedRegister CallingConvention::ReturnRegister() {
+ if (GetMethod()->IsReturnAFloat()) {
+ return ManagedRegister::FromSRegister(S0);
+ } else if (GetMethod()->IsReturnAFloat()) {
+ return ManagedRegister::FromDRegister(D0);
+ } else if (GetMethod()->IsReturnALong()) {
+ return ManagedRegister::FromRegisterPair(R0_R1);
+ } else {
+ return ManagedRegister::FromCoreRegister(R0);
+ }
+}
+
+// Managed runtime calling convention
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return itr_position_ < 3;
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return itr_position_ >= 3;
+}
+
+static const Register kManagedArgumentRegisters[] = {
+ R1, R2, R3
+};
+ManagedRegister ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ CHECK_LT(itr_position_, 3u);
+ return
+ ManagedRegister::FromCoreRegister(kManagedArgumentRegisters[itr_position_]);
+}
+
+FrameOffset ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ CHECK_GE(itr_position_, 3u);
+ return FrameOffset(displacement_.Int32Value() +
+ ((itr_position_ + itr_longs_and_doubles_ - 3) * kPointerSize));
+}
+
+// JNI calling convention
+
+bool JniCallingConvention::IsCurrentParamInRegister() {
+ return itr_position_ < 4;
+}
+
+bool JniCallingConvention::IsCurrentParamOnStack() {
+ return itr_position_ >= 4;
+}
+
+static const Register kJniArgumentRegisters[] = {
+ R0, R1, R2, R3
+};
+ManagedRegister JniCallingConvention::CurrentParamRegister() {
+ CHECK_LT(itr_position_, 4u);
+ return
+ ManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_position_]);
+}
+
+FrameOffset JniCallingConvention::CurrentParamStackOffset() {
+ CHECK_GE(itr_position_, 4u);
+ return FrameOffset(displacement_.Int32Value() - OutArgSize()
+ + ((itr_position_ + itr_longs_and_doubles_ - 4) * kPointerSize));
+}
+
+size_t JniCallingConvention::NumberOfOutgoingStackArgs() {
+ return GetMethod()->NumArgs() + GetMethod()->NumLongOrDoubleArgs() - 2;
+}
+
+} // namespace art
diff --git a/src/calling_convention_x86.cc b/src/calling_convention_x86.cc
new file mode 100644
index 0000000..a72f361
--- /dev/null
+++ b/src/calling_convention_x86.cc
@@ -0,0 +1,73 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+
+#include "src/calling_convention.h"
+#include "src/logging.h"
+#include "src/utils.h"
+
+namespace art {
+
+ManagedRegister CallingConvention::MethodRegister() {
+ return ManagedRegister::FromCpuRegister(EDI);
+}
+
+ManagedRegister CallingConvention::InterproceduralScratchRegister() {
+ return ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister CallingConvention::ReturnRegister() {
+ if (GetMethod()->IsReturnAFloatOrDouble()) {
+ return ManagedRegister::FromX87Register(ST0);
+ } else if (GetMethod()->IsReturnALong()) {
+ return ManagedRegister::FromRegisterPair(EAX_EDX);
+ } else {
+ return ManagedRegister::FromCpuRegister(EAX);
+ }
+}
+
+// Managed runtime calling convention
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything is passed by stack
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return true; // Everything is passed by stack
+}
+
+ManagedRegister ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ int num_slots = itr_position_ + itr_longs_and_doubles_;
+ return FrameOffset(displacement_.Int32Value() + (num_slots * kPointerSize));
+}
+
+// JNI calling convention
+
+bool JniCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything is passed by stack
+}
+
+bool JniCallingConvention::IsCurrentParamOnStack() {
+ return true; // Everything is passed by stack
+}
+
+ManagedRegister JniCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset JniCallingConvention::CurrentParamStackOffset() {
+ int num_slots = itr_position_ + itr_longs_and_doubles_;
+ return FrameOffset(displacement_.Int32Value() - OutArgSize() +
+ (num_slots * kPointerSize));
+}
+
+size_t JniCallingConvention::NumberOfOutgoingStackArgs() {
+ return GetMethod()->NumArgs() + GetMethod()->NumLongOrDoubleArgs();
+}
+
+} // namespace art
diff --git a/src/common_test.h b/src/common_test.h
index f341407..4531b39 100644
--- a/src/common_test.h
+++ b/src/common_test.h
@@ -135,4 +135,33 @@
"EAAAAgAAAMgBAAACIAAADwAAAN4BAAADIAAABQAAAE0CAAAAIAAAAQAAAHICAAAAEAAAAQAAAIwC"
"AAA=";
+// javac MyClass.java && dx --dex --output=MyClass.dex
+// --core-library MyClass.class java/lang/Object.class && base64 MyClass.dex
+// package java.lang;
+// public class Object {}
+// class MyClass {
+// native void foo();
+// native int fooI(int x);
+// native int fooII(int x, int y);
+// native double fooDD(double x, double y);
+// native Object fooIOO(int x, Object y, Object z);
+// static native Object fooSIOO(int x, Object y, Object z);
+// }
+static const char kMyClassNativesDex[] =
+ "ZGV4CjAzNQDuVKYsovltNxptaisjQasgppGak46k/n0sAwAAcAAAAHhWNBIAAAAAAAAAAJgCAAAS"
+ "AAAAcAAAAAUAAAC4AAAABQAAAMwAAAAAAAAAAAAAAAgAAAAIAQAAAgAAAEgBAACkAQAAiAEAANYB"
+ "AADeAQAA4QEAAOYBAADpAQAA7QEAAPIBAAD4AQAAAwIAABcCAAAlAgAAMgIAADUCAAA6AgAAQQIA"
+ "AEcCAABOAgAAVgIAAAEAAAADAAAABwAAAAgAAAALAAAAAgAAAAAAAAC0AQAABAAAAAEAAAC8AQAA"
+ "BQAAAAEAAADEAQAABgAAAAMAAADMAQAACwAAAAQAAAAAAAAAAgAEAAAAAAACAAQADAAAAAIAAAAN"
+ "AAAAAgABAA4AAAACAAIADwAAAAIAAwAQAAAAAgADABEAAAADAAQAAAAAAAMAAAABAAAA/////wAA"
+ "AAAKAAAAAAAAAGkCAAAAAAAAAgAAAAAAAAADAAAAAAAAAAkAAAAAAAAAcwIAAAAAAAABAAEAAAAA"
+ "AF8CAAABAAAADgAAAAEAAQABAAAAZAIAAAQAAABwEAcAAAAOAAIAAAAAAAAAAQAAAAEAAAACAAAA"
+ "AQABAAMAAAABAAMAAwAGPGluaXQ+AAFEAANEREQAAUkAAklJAANJSUkABExJTEwACUxNeUNsYXNz"
+ "OwASTGphdmEvbGFuZy9PYmplY3Q7AAxNeUNsYXNzLmphdmEAC09iamVjdC5qYXZhAAFWAANmb28A"
+ "BWZvb0REAARmb29JAAVmb29JSQAGZm9vSU9PAAdmb29TSU9PAAMABw4AAQAHDgAAAAEAB4GABIgD"
+ "AAACBQCAgAScAwaIAgABgAIAAYACAAGAAgABgAIAAYACAAAAAAwAAAAAAAAAAQAAAAAAAAABAAAA"
+ "EgAAAHAAAAACAAAABQAAALgAAAADAAAABQAAAMwAAAAFAAAACAAAAAgBAAAGAAAAAgAAAEgBAAAB"
+ "IAAAAgAAAIgBAAABEAAABAAAALQBAAACIAAAEgAAANYBAAADIAAAAgAAAF8CAAAAIAAAAgAAAGkC"
+ "AAAAEAAAAQAAAJgCAAA=";
+
} // namespace art
diff --git a/src/constants.h b/src/constants.h
new file mode 100644
index 0000000..22a4043
--- /dev/null
+++ b/src/constants.h
@@ -0,0 +1,12 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#ifndef ART_SRC_CONSTANTS_H_
+#define ART_SRC_CONSTANTS_H_
+
+#if defined(__i386__)
+#include "src/constants_x86.h"
+#elif defined(__arm__)
+#include "src/constants_arm.h"
+#endif
+
+#endif // ART_SRC_CONSTANTS_H_
diff --git a/src/constants_arm.h b/src/constants_arm.h
index bb693d3..c2aed30 100644
--- a/src/constants_arm.h
+++ b/src/constants_arm.h
@@ -3,9 +3,10 @@
#ifndef ART_SRC_CONSTANTS_ARM_H_
#define ART_SRC_CONSTANTS_ARM_H_
-#include <iosfwd>
#include <stdint.h>
+#include <iosfwd>
#include "src/casts.h"
+#include "src/globals.h"
#include "src/logging.h"
namespace art {
@@ -47,6 +48,7 @@
R13 = 13,
R14 = 14,
R15 = 15,
+ TR = 9, // thread register
FP = 11,
IP = 12,
SP = 13,
@@ -451,8 +453,8 @@
// See "ARM Architecture Reference Manual ARMv7-A and ARMv7-R edition",
// section A5.1 "ARM instruction set encoding".
inline bool IsDataProcessing() const {
- CHECK(ConditionField() != kSpecialCondition);
- CHECK(Bits(26, 2) == 0); // Type 0 or 1.
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
return ((Bits(20, 5) & 0x19) != 0x10) &&
((Bit(25) == 1) || // Data processing immediate.
(Bit(4) == 0) || // Data processing register.
@@ -463,11 +465,11 @@
// as well as multiplications, synchronization primitives, and miscellaneous).
// Can only be called for a type 0 or 1 instruction.
inline bool IsMiscellaneous() const {
- CHECK(Bits(26, 2) == 0); // Type 0 or 1.
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
return ((Bit(25) == 0) && ((Bits(20, 5) & 0x19) == 0x10) && (Bit(7) == 0));
}
inline bool IsMultiplyOrSyncPrimitive() const {
- CHECK(Bits(26, 2) == 0); // Type 0 or 1.
+ CHECK_EQ(Bits(26, 2), 0); // Type 0 or 1.
return ((Bit(25) == 0) && (Bits(4, 4) == 9));
}
@@ -503,8 +505,8 @@
// Test for VFP data processing or single transfer instructions of type 7.
inline bool IsVFPDataProcessingOrSingleTransfer() const {
- CHECK(ConditionField() != kSpecialCondition);
- CHECK(TypeField() == 7);
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 7);
return ((Bit(24) == 0) && (Bits(9, 3) == 5));
// Bit(4) == 0: Data Processing
// Bit(4) == 1: 8, 16, or 32-bit Transfer between ARM Core and VFP
@@ -512,16 +514,16 @@
// Test for VFP 64-bit transfer instructions of type 6.
inline bool IsVFPDoubleTransfer() const {
- CHECK(ConditionField() != kSpecialCondition);
- CHECK(TypeField() == 6);
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 6);
return ((Bits(21, 4) == 2) && (Bits(9, 3) == 5) &&
((Bits(4, 4) & 0xd) == 1));
}
// Test for VFP load and store instructions of type 6.
inline bool IsVFPLoadStore() const {
- CHECK(ConditionField() != kSpecialCondition);
- CHECK(TypeField() == 6);
+ CHECK_NE(ConditionField(), kSpecialCondition);
+ CHECK_EQ(TypeField(), 6);
return ((Bits(20, 5) & 0x12) == 0x10) && (Bits(9, 3) == 5);
}
diff --git a/src/constants_x86.h b/src/constants_x86.h
index d96e6dc..c7457c9 100644
--- a/src/constants_x86.h
+++ b/src/constants_x86.h
@@ -3,12 +3,11 @@
#ifndef ART_SRC_CONSTANTS_X86_H_
#define ART_SRC_CONSTANTS_X86_H_
+#include <iosfwd>
#include "src/globals.h"
#include "src/logging.h"
#include "src/macros.h"
-#include <iosfwd>
-
namespace art {
enum Register {
@@ -26,7 +25,6 @@
};
std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
enum ByteRegister {
AL = 0,
CL = 1,
@@ -52,7 +50,21 @@
kNumberOfXmmRegisters = 8,
kNoXmmRegister = -1 // Signals an illegal register.
};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+enum X87Register {
+ ST0 = 0,
+ ST1 = 1,
+ ST2 = 2,
+ ST3 = 3,
+ ST4 = 4,
+ ST5 = 5,
+ ST6 = 6,
+ ST7 = 7,
+ kNumberOfX87Registers = 8,
+ kNoX87Register = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const X87Register& reg);
enum ScaleFactor {
TIMES_1 = 0,
diff --git a/src/jni_compiler.cc b/src/jni_compiler.cc
new file mode 100644
index 0000000..67f8bc8
--- /dev/null
+++ b/src/jni_compiler.cc
@@ -0,0 +1,290 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+#include "src/jni_compiler.h"
+#include <sys/mman.h>
+#include "src/assembler.h"
+#include "src/calling_convention.h"
+#include "src/macros.h"
+#include "src/managed_register.h"
+#include "src/logging.h"
+#include "src/thread.h"
+
+namespace art {
+
+// Generate the JNI bridge for the given method, general contract:
+// - Arguments are in the managed runtime format, either on stack or in
+// registers, a reference to the method object is supplied as part of this
+// convention.
+//
+void JniCompiler::Compile(Assembler* jni_asm, Method* native_method) {
+ CHECK(native_method->IsNative());
+ JniCallingConvention jni_conv(native_method);
+ ManagedRuntimeCallingConvention mr_conv(native_method);
+ const bool is_static = native_method->IsStatic();
+
+ // 1. Build the frame
+ const size_t frame_size(jni_conv.FrameSize());
+ jni_asm->BuildFrame(frame_size, mr_conv.MethodRegister());
+
+ // 2. Save callee save registers that aren't callee save in the native code
+ // TODO: implement computing the difference of the callee saves
+ // and saving
+
+ // 3. Set up the StackHandleBlock
+ mr_conv.ResetIterator(FrameOffset(frame_size));
+ jni_conv.ResetIterator(FrameOffset(0));
+ jni_asm->StoreImmediateToFrame(jni_conv.ShbNumRefsOffset(),
+ jni_conv.HandleCount(),
+ mr_conv.InterproceduralScratchRegister());
+ jni_asm->CopyRawPtrFromThread(jni_conv.ShbLinkOffset(),
+ Thread::TopShbOffset(),
+ mr_conv.InterproceduralScratchRegister());
+ jni_asm->StoreStackOffsetToThread(Thread::TopShbOffset(),
+ jni_conv.ShbOffset(),
+ mr_conv.InterproceduralScratchRegister());
+
+ // 4. Place incoming reference arguments into handle block
+ jni_conv.Next(); // Skip JNIEnv*
+ // 4.5. Create Class argument for static methods out of passed method
+ if (is_static) {
+ FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+ // Check handle offset is within frame
+ CHECK_LT(handle_offset.Uint32Value(), frame_size);
+ jni_asm->LoadRef(jni_conv.InterproceduralScratchRegister(),
+ mr_conv.MethodRegister(), Method::ClassOffset());
+ jni_asm->ValidateRef(jni_conv.InterproceduralScratchRegister(), false);
+ jni_asm->StoreRef(handle_offset, jni_conv.InterproceduralScratchRegister());
+ jni_conv.Next(); // handlerized so move to next argument
+ }
+ while (mr_conv.HasNext()) {
+ CHECK(jni_conv.HasNext());
+ bool ref_param = jni_conv.IsCurrentParamAReference();
+ CHECK(!ref_param || mr_conv.IsCurrentParamAReference());
+ // References need handlerization and the handle address passing
+ if (ref_param) {
+ // Compute handle offset, note null is handlerized but its boxed value
+ // must be NULL
+ FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+ // Check handle offset is within frame
+ CHECK_LT(handle_offset.Uint32Value(), frame_size);
+ bool input_in_reg = mr_conv.IsCurrentParamInRegister();
+ CHECK(input_in_reg || mr_conv.IsCurrentParamOnStack());
+ if (input_in_reg) {
+ LOG(FATAL) << "UNTESTED";
+ ManagedRegister in_reg = mr_conv.CurrentParamRegister();
+ jni_asm->ValidateRef(in_reg, mr_conv.IsCurrentParamPossiblyNull());
+ jni_asm->StoreRef(handle_offset, in_reg);
+ } else {
+ FrameOffset in_off = mr_conv.CurrentParamStackOffset();
+ jni_asm->ValidateRef(in_off, mr_conv.IsCurrentParamPossiblyNull());
+ jni_asm->CopyRef(handle_offset, in_off,
+ mr_conv.InterproceduralScratchRegister());
+ }
+ }
+ mr_conv.Next();
+ jni_conv.Next();
+ }
+
+ // 5. Acquire lock for synchronized methods. Done here as references are held
+ // live in handle block but we're in managed code and can work on
+ // references
+ if (native_method->IsSynchronized()) {
+ jni_conv.ResetIterator(FrameOffset(0));
+ jni_conv.Next(); // skip JNI environment
+ jni_asm->LockReferenceOnStack(jni_conv.CurrentParamHandleOffset());
+ }
+
+ // 6. Transition from being in managed to native code
+ // TODO: write out anchor, ensure the transition to native follow a store
+ // fence.
+ jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kNative,
+ mr_conv.InterproceduralScratchRegister());
+
+ // 7. Move frame down to allow space for out going args. Do for as short a
+ // time as possible to aid profiling..
+ const size_t out_arg_size = jni_conv.OutArgSize();
+ jni_asm->IncreaseFrameSize(out_arg_size);
+
+ // 8. Iterate over arguments placing values from managed calling convention in
+ // to the convention required for a native call (shuffling). For references
+ // place an index/pointer to the reference after checking whether it is
+ // NULL (which must be encoded as NULL).
+ // NB. we do this prior to materializing the JNIEnv* and static's jclass to
+ // give as many free registers for the shuffle as possible
+ mr_conv.ResetIterator(FrameOffset(frame_size+out_arg_size));
+ jni_conv.ResetIterator(FrameOffset(out_arg_size));
+ jni_conv.Next(); // Skip JNIEnv*
+ if (is_static) {
+ FrameOffset handle_offset = jni_conv.CurrentParamHandleOffset();
+ if (jni_conv.IsCurrentParamOnStack()) {
+ FrameOffset out_off = jni_conv.CurrentParamStackOffset();
+ jni_asm->CreateStackHandle(out_off, handle_offset,
+ mr_conv.InterproceduralScratchRegister(),
+ false);
+ } else {
+ ManagedRegister out_reg = jni_conv.CurrentParamRegister();
+ jni_asm->CreateStackHandle(out_reg, handle_offset,
+ ManagedRegister::NoRegister(), false);
+ }
+ jni_conv.Next();
+ }
+ while (mr_conv.HasNext()) {
+ CHECK(jni_conv.HasNext());
+ bool input_in_reg = mr_conv.IsCurrentParamInRegister();
+ bool output_in_reg = jni_conv.IsCurrentParamInRegister();
+ FrameOffset handle_offset(0);
+ bool null_allowed = false;
+ bool ref_param = jni_conv.IsCurrentParamAReference();
+ CHECK(!ref_param || mr_conv.IsCurrentParamAReference());
+ CHECK(input_in_reg || mr_conv.IsCurrentParamOnStack());
+ CHECK(output_in_reg || jni_conv.IsCurrentParamOnStack());
+ // References need handlerization and the handle address passing
+ if (ref_param) {
+ null_allowed = mr_conv.IsCurrentParamPossiblyNull();
+ // Compute handle offset. Note null is placed in the SHB but the jobject
+ // passed to the native code must be null (not a pointer into the SHB
+ // as with regular references).
+ handle_offset = jni_conv.CurrentParamHandleOffset();
+ // Check handle offset is within frame.
+ CHECK_LT(handle_offset.Uint32Value(), (frame_size+out_arg_size));
+ }
+ if (input_in_reg && output_in_reg) {
+ LOG(FATAL) << "UNTESTED";
+ ManagedRegister in_reg = mr_conv.CurrentParamRegister();
+ ManagedRegister out_reg = jni_conv.CurrentParamRegister();
+ if (ref_param) {
+ jni_asm->CreateStackHandle(out_reg, handle_offset, in_reg,
+ null_allowed);
+ } else {
+ jni_asm->Move(out_reg, in_reg);
+ }
+ } else if (!input_in_reg && !output_in_reg) {
+ FrameOffset out_off = jni_conv.CurrentParamStackOffset();
+ if (ref_param) {
+ jni_asm->CreateStackHandle(out_off, handle_offset,
+ mr_conv.InterproceduralScratchRegister(),
+ null_allowed);
+ } else {
+ FrameOffset in_off = mr_conv.CurrentParamStackOffset();
+ size_t param_size = mr_conv.CurrentParamSizeInBytes();
+ CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes());
+ jni_asm->Copy(out_off, in_off, mr_conv.InterproceduralScratchRegister(),
+ param_size);
+ }
+ } else if (!input_in_reg && output_in_reg) {
+ LOG(FATAL) << "UNTESTED";
+ FrameOffset in_off = mr_conv.CurrentParamStackOffset();
+ ManagedRegister out_reg = jni_conv.CurrentParamRegister();
+ // Check that incoming stack arguments are above the current stack frame.
+ CHECK_GT(in_off.Uint32Value(), frame_size);
+ if (ref_param) {
+ jni_asm->CreateStackHandle(out_reg, handle_offset,
+ ManagedRegister::NoRegister(), null_allowed);
+ } else {
+ unsigned int param_size = mr_conv.CurrentParamSizeInBytes();
+ CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes());
+ jni_asm->Load(out_reg, in_off, param_size);
+ }
+ } else {
+ LOG(FATAL) << "UNTESTED";
+ CHECK(input_in_reg && !output_in_reg);
+ ManagedRegister in_reg = mr_conv.CurrentParamRegister();
+ FrameOffset out_off = jni_conv.CurrentParamStackOffset();
+ // Check outgoing argument is within frame
+ CHECK_LT(out_off.Uint32Value(), frame_size);
+ if (ref_param) {
+ // TODO: recycle value in in_reg rather than reload from handle
+ jni_asm->CreateStackHandle(out_off, handle_offset,
+ mr_conv.InterproceduralScratchRegister(),
+ null_allowed);
+ } else {
+ size_t param_size = mr_conv.CurrentParamSizeInBytes();
+ CHECK_EQ(param_size, jni_conv.CurrentParamSizeInBytes());
+ jni_asm->Store(out_off, in_reg, param_size);
+ }
+ }
+ mr_conv.Next();
+ jni_conv.Next();
+ }
+ // 9. Create 1st argument, the JNI environment ptr
+ jni_conv.ResetIterator(FrameOffset(out_arg_size));
+ if (jni_conv.IsCurrentParamInRegister()) {
+ jni_asm->LoadRawPtrFromThread(jni_conv.CurrentParamRegister(),
+ Thread::JniEnvOffset());
+ } else {
+ jni_asm->CopyRawPtrFromThread(jni_conv.CurrentParamStackOffset(),
+ Thread::JniEnvOffset(),
+ jni_conv.InterproceduralScratchRegister());
+ }
+
+ // 10. Plant call to native code associated with method
+ jni_asm->Call(mr_conv.MethodRegister(), Method::NativeMethodOffset(),
+ mr_conv.InterproceduralScratchRegister());
+
+ // 11. Release outgoing argument area
+ jni_asm->DecreaseFrameSize(out_arg_size);
+
+ // 12. Transition from being in native to managed code, possibly entering a
+ // safepoint
+ jni_asm->StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable,
+ mr_conv.InterproceduralScratchRegister());
+ // TODO: check for safepoint transition
+
+ // 13. Move to first handle offset
+ jni_conv.ResetIterator(FrameOffset(0));
+ jni_conv.Next(); // skip JNI environment
+
+ // 14. Release lock for synchronized methods (done in the managed state so
+ // references can be touched)
+ if (native_method->IsSynchronized()) {
+ jni_asm->UnLockReferenceOnStack(jni_conv.CurrentParamHandleOffset());
+ }
+
+ // 15. Place result in correct register possibly dehandlerizing
+ if (jni_conv.IsReturnAReference()) {
+ jni_asm->LoadReferenceFromStackHandle(mr_conv.ReturnRegister(),
+ jni_conv.ReturnRegister(),
+ jni_conv.CurrentParamHandleOffset());
+ } else {
+ jni_asm->Move(mr_conv.ReturnRegister(), jni_conv.ReturnRegister());
+ }
+
+ // 16. Remove stack handle block from thread
+ jni_asm->CopyRawPtrToThread(Thread::TopShbOffset(), jni_conv.ShbLinkOffset(),
+ jni_conv.InterproceduralScratchRegister());
+
+ // 17. Remove activation
+ jni_asm->RemoveFrame(frame_size);
+
+ // 18. Finalize code generation
+ size_t cs = jni_asm->CodeSize();
+ MemoryRegion code(AllocateCode(cs), cs);
+ jni_asm->FinalizeInstructions(code);
+ native_method->SetCode(code.pointer());
+}
+
+void* JniCompiler::AllocateCode(size_t size) {
+ CHECK_LT(((jni_code_top_ - jni_code_) + size), jni_code_size_);
+ void *result = jni_code_top_;
+ jni_code_top_ += size;
+ return result;
+}
+
+JniCompiler::JniCompiler() {
+ // TODO: this shouldn't be managed by the JniCompiler, we should have a
+ // code cache.
+ jni_code_size_ = 4096;
+ jni_code_ = static_cast<byte*>(mmap(NULL, jni_code_size_,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
+ CHECK_NE(MAP_FAILED, jni_code_);
+ jni_code_top_ = jni_code_;
+}
+
+JniCompiler::~JniCompiler() {
+ // TODO: this shouldn't be managed by the JniCompiler, we should have a
+ // code cache.
+ CHECK_EQ(0, munmap(jni_code_, jni_code_size_));
+}
+
+} // namespace art
diff --git a/src/jni_compiler.h b/src/jni_compiler.h
new file mode 100644
index 0000000..4a6b1ba
--- /dev/null
+++ b/src/jni_compiler.h
@@ -0,0 +1,40 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+#ifndef ART_SRC_JNI_COMPILER_H_
+#define ART_SRC_JNI_COMPILER_H_
+
+#include "globals.h"
+#include "macros.h"
+
+namespace art {
+
+class Assembler;
+class Method;
+
+// A JNI compiler generates code that acts as the bridge between managed code
+// and native code.
+// TODO: move the responsibility of managing memory to somewhere else
+class JniCompiler {
+ public:
+ JniCompiler();
+ ~JniCompiler();
+ void Compile(Assembler* jni_asm, Method* method);
+ private:
+ // A poor man's code cache
+ void* AllocateCode(size_t size);
+
+ // Base of memory region for allocated code
+ byte* jni_code_;
+
+ // Allocated code size
+ size_t jni_code_size_;
+
+ // Pointer to the free space
+ byte* jni_code_top_;
+
+ DISALLOW_COPY_AND_ASSIGN(JniCompiler);
+};
+
+} // namespace art
+
+#endif // ART_SRC_JNI_COMPILER_H_
diff --git a/src/jni_compiler_test.cc b/src/jni_compiler_test.cc
new file mode 100644
index 0000000..59d8c2b
--- /dev/null
+++ b/src/jni_compiler_test.cc
@@ -0,0 +1,390 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: irogers@google.com (Ian Rogers)
+
+#include <sys/mman.h>
+#include "src/assembler.h"
+#include "src/class_linker.h"
+#include "src/common_test.h"
+#include "src/dex_file.h"
+#include "src/jni_compiler.h"
+#include "src/runtime.h"
+#include "src/thread.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class JniCompilerTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ // Create runtime and attach thread
+ runtime_ = Runtime::Create();
+ CHECK(runtime_->AttachCurrentThread());
+ // Create thunk code that performs the native to managed transition
+ thunk_code_size_ = 4096;
+ thunk_ = mmap(NULL, thunk_code_size_, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ CHECK_NE(MAP_FAILED, thunk_);
+ Assembler thk_asm;
+ // TODO: shouldn't have machine specific code in a general purpose file
+#if defined(__i386__)
+ thk_asm.pushl(EDI); // preserve EDI
+ thk_asm.movl(EAX, Address(ESP, 8)); // EAX = method->GetCode()
+ thk_asm.movl(EDI, Address(ESP, 12)); // EDI = method
+ thk_asm.pushl(Immediate(0)); // push pad
+ thk_asm.pushl(Immediate(0)); // push pad
+ thk_asm.pushl(Address(ESP, 40)); // push pad or jlong high
+ thk_asm.pushl(Address(ESP, 40)); // push jint or jlong low
+ thk_asm.pushl(Address(ESP, 40)); // push jint or jlong high
+ thk_asm.pushl(Address(ESP, 40)); // push jint or jlong low
+ thk_asm.pushl(Address(ESP, 40)); // push jobject
+ thk_asm.call(EAX); // Continue in method->GetCode()
+ thk_asm.addl(ESP, Immediate(28)); // pop arguments
+ thk_asm.popl(EDI); // restore EDI
+ thk_asm.ret();
+#else
+ LOG(FATAL) << "Unimplemented";
+#endif
+ size_t cs = thk_asm.CodeSize();
+ MemoryRegion code(thunk_, cs);
+ thk_asm.FinalizeInstructions(code);
+ thunk_entry1_ = reinterpret_cast<jint (*)(const void*, art::Method*,
+ jobject, jint, jint, jint)
+ >(code.pointer());
+ thunk_entry2_ = reinterpret_cast<jdouble (*)(const void*, art::Method*,
+ jobject, jdouble, jdouble)
+ >(code.pointer());
+ }
+
+ virtual void TearDown() {
+ // Release thunk code
+ CHECK(runtime_->DetachCurrentThread());
+ CHECK_EQ(0, munmap(thunk_, thunk_code_size_));
+ }
+
+ // Run generated code associated with method passing and returning int size
+ // arguments
+ jvalue RunMethod(Method* method, jvalue a, jvalue b, jvalue c, jvalue d) {
+ jvalue result;
+ // sanity checks
+ EXPECT_NE(static_cast<void*>(NULL), method->GetCode());
+ EXPECT_EQ(0u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kRunnable, Thread::Current()->GetState());
+ // perform call
+ result.i = (*thunk_entry1_)(method->GetCode(), method, a.l, b.i, c.i, d.i);
+ // sanity check post-call
+ EXPECT_EQ(0u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kRunnable, Thread::Current()->GetState());
+ return result;
+ }
+
+ // Run generated code associated with method passing and returning double size
+ // arguments
+ jvalue RunMethodD(Method* method, jvalue a, jvalue b, jvalue c) {
+ jvalue result;
+ // sanity checks
+ EXPECT_NE(static_cast<void*>(NULL), method->GetCode());
+ EXPECT_EQ(0u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kRunnable, Thread::Current()->GetState());
+ // perform call
+ result.d = (*thunk_entry2_)(method->GetCode(), method, a.l, b.d, c.d);
+ // sanity check post-call
+ EXPECT_EQ(0u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kRunnable, Thread::Current()->GetState());
+ return result;
+ }
+
+ Runtime* runtime_;
+ void* thunk_;
+ size_t thunk_code_size_;
+ jint (*thunk_entry1_)(const void*, Method*, jobject, jint, jint, jint);
+ jdouble (*thunk_entry2_)(const void*, Method*, jobject, jdouble, jdouble);
+};
+
+int gJava_MyClass_foo_calls = 0;
+void Java_MyClass_foo(JNIEnv*, jobject) {
+ EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_foo_calls++;
+}
+
+int gJava_MyClass_fooI_calls = 0;
+jint Java_MyClass_fooI(JNIEnv*, jobject, jint x) {
+ EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_fooI_calls++;
+ return x;
+}
+
+int gJava_MyClass_fooII_calls = 0;
+jint Java_MyClass_fooII(JNIEnv*, jobject, jint x, jint y) {
+ EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_fooII_calls++;
+ return x - y; // non-commutative operator
+}
+
+int gJava_MyClass_fooDD_calls = 0;
+jdouble Java_MyClass_fooDD(JNIEnv*, jobject, jdouble x, jdouble y) {
+ EXPECT_EQ(1u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_fooDD_calls++;
+ return x - y; // non-commutative operator
+}
+
+int gJava_MyClass_fooIOO_calls = 0;
+jobject Java_MyClass_fooIOO(JNIEnv*, jobject thisObject, jint x, jobject y,
+ jobject z) {
+ EXPECT_EQ(3u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_fooIOO_calls++;
+ switch (x) {
+ case 1:
+ return y;
+ case 2:
+ return z;
+ default:
+ return thisObject;
+ }
+}
+
+int gJava_MyClass_fooSIOO_calls = 0;
+jobject Java_MyClass_fooSIOO(JNIEnv*, jclass klass, jint x, jobject y,
+ jobject z) {
+ EXPECT_EQ(3u, Thread::Current()->NumShbHandles());
+ EXPECT_EQ(Thread::kNative, Thread::Current()->GetState());
+ gJava_MyClass_fooSIOO_calls++;
+ switch (x) {
+ case 1:
+ return y;
+ case 2:
+ return z;
+ default:
+ return klass;
+ }
+}
+
+TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("foo");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ // JNIEnv* env = Thread::Current()->GetJniEnv();
+ // JNINativeMethod methods[] = {{"foo", "()V", (void*)&Java_MyClass_foo}};
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_foo));
+
+ jvalue a;
+ a.l = (jobject)NULL;
+ EXPECT_EQ(0, gJava_MyClass_foo_calls);
+ RunMethod(method, a, a, a, a);
+ EXPECT_EQ(1, gJava_MyClass_foo_calls);
+ RunMethod(method, a, a, a, a);
+ EXPECT_EQ(2, gJava_MyClass_foo_calls);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("fooI");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooI));
+
+ jvalue a, b, c;
+ a.l = (jobject)NULL;
+ b.i = 42;
+ EXPECT_EQ(0, gJava_MyClass_fooI_calls);
+ c = RunMethod(method, a, b, a, a);
+ ASSERT_EQ(42, c.i);
+ EXPECT_EQ(1, gJava_MyClass_fooI_calls);
+ b.i = 0xCAFED00D;
+ c = RunMethod(method, a, b, a, a);
+ ASSERT_EQ((jint)0xCAFED00D, c.i);
+ EXPECT_EQ(2, gJava_MyClass_fooI_calls);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("fooII");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooII));
+
+ jvalue a, b, c, d;
+ a.l = (jobject)NULL;
+ b.i = 99;
+ c.i = 10;
+ EXPECT_EQ(0, gJava_MyClass_fooII_calls);
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ(99 - 10, d.i);
+ EXPECT_EQ(1, gJava_MyClass_fooII_calls);
+ b.i = 0xCAFEBABE;
+ c.i = 0xCAFED00D;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jint)(0xCAFEBABE - 0xCAFED00D), d.i);
+ EXPECT_EQ(2, gJava_MyClass_fooII_calls);
+}
+
+
+TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("fooDD");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooDD));
+
+ jvalue a, b, c, d;
+ a.l = (jobject)NULL;
+ b.d = 99;
+ c.d = 10;
+ EXPECT_EQ(0, gJava_MyClass_fooDD_calls);
+ d = RunMethodD(method, a, b, c);
+ ASSERT_EQ(b.d - c.d, d.d);
+ EXPECT_EQ(1, gJava_MyClass_fooDD_calls);
+ b.d = 3.14159265358979323846;
+ c.d = 0.69314718055994530942;
+ d = RunMethodD(method, a, b, c);
+ ASSERT_EQ(b.d - c.d, d.d);
+ EXPECT_EQ(2, gJava_MyClass_fooDD_calls);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindVirtualMethod("fooIOO");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooIOO));
+
+ jvalue a, b, c, d, e;
+ a.l = (jobject)NULL;
+ b.i = 0;
+ c.l = (jobject)NULL;
+ d.l = (jobject)NULL;
+ EXPECT_EQ(0, gJava_MyClass_fooIOO_calls);
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)NULL, e.l);
+ EXPECT_EQ(1, gJava_MyClass_fooIOO_calls);
+ a.l = (jobject)8;
+ b.i = 0;
+ c.l = (jobject)NULL;
+ d.l = (jobject)16;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)8, e.l);
+ EXPECT_EQ(2, gJava_MyClass_fooIOO_calls);
+ b.i = 1;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)NULL, e.l);
+ EXPECT_EQ(3, gJava_MyClass_fooIOO_calls);
+ b.i = 2;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)16, e.l);
+ EXPECT_EQ(4, gJava_MyClass_fooIOO_calls);
+ a.l = (jobject)8;
+ b.i = 0;
+ c.l = (jobject)16;
+ d.l = (jobject)NULL;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)8, e.l);
+ EXPECT_EQ(5, gJava_MyClass_fooIOO_calls);
+ b.i = 1;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)16, e.l);
+ EXPECT_EQ(6, gJava_MyClass_fooIOO_calls);
+ b.i = 2;
+ e = RunMethod(method, a, b, c, d);
+ ASSERT_EQ((jobject)NULL, e.l);
+ EXPECT_EQ(7, gJava_MyClass_fooIOO_calls);
+}
+
+TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
+ scoped_ptr<DexFile> dex(OpenDexFileBase64(kMyClassNativesDex));
+ scoped_ptr<ClassLinker> linker(ClassLinker::Create());
+ linker->AppendToClassPath(dex.get());
+ Class* klass = linker->FindClass("LMyClass;", NULL);
+ Method* method = klass->FindDirectMethod("fooSIOO");
+
+ Assembler jni_asm;
+ JniCompiler jni_compiler;
+ jni_compiler.Compile(&jni_asm, method);
+
+ // TODO: should really use JNIEnv to RegisterNative, but missing a
+ // complete story on this, so hack the RegisterNative below
+ method->RegisterNative(reinterpret_cast<void*>(&Java_MyClass_fooSIOO));
+
+ jvalue a, b, c, d;
+ a.i = 0;
+ b.l = (jobject)NULL;
+ c.l = (jobject)NULL;
+ EXPECT_EQ(0, gJava_MyClass_fooSIOO_calls);
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)method->GetClass(), d.l);
+ EXPECT_EQ(1, gJava_MyClass_fooSIOO_calls);
+ a.i = 0;
+ b.l = (jobject)NULL;
+ c.l = (jobject)16;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)method->GetClass(), d.l);
+ EXPECT_EQ(2, gJava_MyClass_fooSIOO_calls);
+ a.i = 1;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)NULL, d.l);
+ EXPECT_EQ(3, gJava_MyClass_fooSIOO_calls);
+ a.i = 2;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)16, d.l);
+ EXPECT_EQ(4, gJava_MyClass_fooSIOO_calls);
+ a.i = 0;
+ b.l = (jobject)16;
+ c.l = (jobject)NULL;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)method->GetClass(), d.l);
+ EXPECT_EQ(5, gJava_MyClass_fooSIOO_calls);
+ a.i = 1;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)16, d.l);
+ EXPECT_EQ(6, gJava_MyClass_fooSIOO_calls);
+ a.i = 2;
+ d = RunMethod(method, a, b, c, a);
+ ASSERT_EQ((jobject)NULL, d.l);
+ EXPECT_EQ(7, gJava_MyClass_fooSIOO_calls);
+}
+
+} // namespace art
diff --git a/src/managed_register.h b/src/managed_register.h
new file mode 100644
index 0000000..7e077a0
--- /dev/null
+++ b/src/managed_register.h
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#ifndef ART_SRC_MANAGED_REGISTER_H_
+#define ART_SRC_MANAGED_REGISTER_H_
+
+#if defined(__i386__)
+#include "src/managed_register_x86.h"
+#elif defined(__arm__)
+#include "src/managed_register_arm.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // ART_SRC_MANAGED_REGISTER_H_
diff --git a/src/managed_register_arm.cc b/src/managed_register_arm.cc
new file mode 100644
index 0000000..d203689
--- /dev/null
+++ b/src/managed_register_arm.cc
@@ -0,0 +1,93 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/globals.h"
+#include "src/calling_convention.h"
+#include "src/managed_register.h"
+
+namespace art {
+
+// We need all registers for caching of locals.
+// Register R9 .. R15 are reserved.
+static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
+static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+ kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Returns true if this managed-register overlaps the other managed-register.
+bool ManagedRegister::Overlaps(const ManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return ManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+ ManagedRegister::FromCoreRegister(high).Overlaps(other);
+ }
+ if (IsOverlappingDRegister()) {
+ if (other.IsDRegister()) return Equals(other);
+ if (other.IsSRegister()) {
+ SRegister low = AsOverlappingDRegisterLow();
+ SRegister high = AsOverlappingDRegisterHigh();
+ SRegister other_sreg = other.AsSRegister();
+ return (low == other_sreg) || (high == other_sreg);
+ }
+ return false;
+ }
+ if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int ManagedRegister::AllocIdLow() const {
+ CHECK(IsOverlappingDRegister() || IsRegisterPair());
+ const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ int low;
+ if (r < kNumberOfOverlappingDRegIds) {
+ CHECK(IsOverlappingDRegister());
+ low = (r * 2) + kNumberOfCoreRegIds; // Return a SRegister.
+ } else {
+ CHECK(IsRegisterPair());
+ low = (r - kNumberOfDRegIds) * 2; // Return a Register.
+ }
+ return low;
+}
+
+
+int ManagedRegister::AllocIdHigh() const {
+ return AllocIdLow() + 1;
+}
+
+
+void ManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsCoreRegister()) {
+ os << "Core: " << static_cast<int>(AsCoreRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << static_cast<int>(AsRegisterPairLow()) << ", "
+ << static_cast<int>(AsRegisterPairHigh());
+ } else if (IsSRegister()) {
+ os << "SRegister: " << static_cast<int>(AsSRegister());
+ } else if (IsDRegister()) {
+ os << "DRegister: " << static_cast<int>(AsDRegister());
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const ManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& r) {
+ os << ManagedRegister::FromRegisterPair(r);
+ return os;
+}
+
+} // namespace art
diff --git a/src/managed_register_arm.h b/src/managed_register_arm.h
new file mode 100644
index 0000000..843095c
--- /dev/null
+++ b/src/managed_register_arm.h
@@ -0,0 +1,266 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#ifndef ART_SRC_MANAGED_REGISTER_ARM_H_
+#define ART_SRC_MANAGED_REGISTER_ARM_H_
+
+#include "src/constants.h"
+#include "src/logging.h"
+
+namespace art {
+
+// Values for register pairs.
+enum RegisterPair {
+ R0_R1 = 0,
+ R2_R3 = 1,
+ R4_R5 = 2,
+ R6_R7 = 3,
+ kNumberOfRegisterPairs = 4,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfSRegIds = kNumberOfSRegisters;
+const int kNumberOfSAllocIds = kNumberOfSRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds +
+ kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+ kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..D[ double precision VFP registers (enum DRegister)
+// [D..P[ core register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCoreRegIds
+// S = R + kNumberOfSRegIds
+// D = S + kNumberOfDRegIds
+// P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ core registers (enum Register)
+// [R..S[ single precision VFP registers (enum SRegister)
+// [S..N[ non-overlapping double precision VFP registers (16-31 in enum
+// DRegister, VFPv3-D32 only)
+// where
+// R = kNumberOfCoreAllocIds
+// S = R + kNumberOfSAllocIds
+// N = S + kNumberOfDAllocIds
+
+
+// An instance of class 'ManagedRegister' represents a single ARM register or a
+// pair of core ARM registers (enum RegisterPair). A single register is either a
+// core register (enum Register), a VFP single precision register
+// (enum SRegister), or a VFP double precision register (enum DRegister).
+// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ManagedRegister {
+ public:
+ // ManagedRegister is a value class. There exists no method to change the
+ // internal state. We therefore allow a copy constructor and an
+ // assignment-operator.
+ ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+
+ ManagedRegister& operator=(const ManagedRegister& other) {
+ id_ = other.id_;
+ return *this;
+ }
+
+ Register AsCoreRegister() const {
+ CHECK(IsCoreRegister());
+ return static_cast<Register>(id_);
+ }
+
+ SRegister AsSRegister() const {
+ CHECK(IsSRegister());
+ return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
+ }
+
+ DRegister AsDRegister() const {
+ CHECK(IsDRegister());
+ return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+ }
+
+ SRegister AsOverlappingDRegisterLow() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2);
+ }
+
+ SRegister AsOverlappingDRegisterHigh() const {
+ CHECK(IsOverlappingDRegister());
+ DRegister d_reg = AsDRegister();
+ return static_cast<SRegister>(d_reg * 2 + 1);
+ }
+
+ RegisterPair AsRegisterPair() const {
+ CHECK(IsRegisterPair());
+ Register reg_low = AsRegisterPairLow();
+ return static_cast<RegisterPair>(reg_low / 2);
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCoreRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCoreRegister();
+ }
+
+ bool IsCoreRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+ }
+
+ bool IsSRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCoreRegIds;
+ return (0 <= test) && (test < kNumberOfSRegIds);
+ }
+
+ bool IsDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfDRegIds);
+ }
+
+ // Returns true if this DRegister overlaps SRegisters.
+ bool IsOverlappingDRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+ return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test =
+ id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ bool IsSameType(ManagedRegister test) const {
+ CHECK(IsValidManagedRegister() && test.IsValidManagedRegister());
+ return
+ (IsCoreRegister() && test.IsCoreRegister()) ||
+ (IsSRegister() && test.IsSRegister()) ||
+ (IsDRegister() && test.IsDRegister()) ||
+ (IsRegisterPair() && test.IsRegisterPair());
+ }
+
+ bool IsNoRegister() const {
+ return id_ == kNoRegister;
+ }
+
+ // It is valid to invoke Equals on and with a NoRegister.
+ bool Equals(const ManagedRegister& other) const {
+ return id_ == other.id_;
+ }
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const ManagedRegister& other) const;
+
+ void Print(std::ostream& os) const;
+
+ static ManagedRegister NoRegister() {
+ return ManagedRegister();
+ }
+
+ static ManagedRegister FromCoreRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static ManagedRegister FromSRegister(SRegister r) {
+ CHECK_NE(r, kNoSRegister);
+ return FromRegId(r + kNumberOfCoreRegIds);
+ }
+
+ static ManagedRegister FromDRegister(DRegister r) {
+ CHECK_NE(r, kNoDRegister);
+ return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds));
+ }
+
+ static ManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCoreRegIds +
+ kNumberOfSRegIds + kNumberOfDRegIds));
+ }
+
+ // Return a RegisterPair consisting of Register r_low and r_low + 1.
+ static ManagedRegister FromCoreRegisterPair(Register r_low) {
+ CHECK_NE(r_low, kNoRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfPairRegIds);
+ return FromRegisterPair(static_cast<RegisterPair>(r));
+ }
+
+ // Return a DRegister overlapping SRegister r_low and r_low + 1.
+ static ManagedRegister FromSRegisterPair(SRegister r_low) {
+ CHECK_NE(r_low, kNoSRegister);
+ CHECK_EQ(0, (r_low % 2));
+ const int r = r_low / 2;
+ CHECK_LT(r, kNumberOfOverlappingDRegIds);
+ return FromDRegister(static_cast<DRegister>(r));
+ }
+
+ private:
+ static const int kNoRegister = -1;
+
+ ManagedRegister() : id_(kNoRegister) { }
+
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() &&
+ !IsOverlappingDRegister() && !IsRegisterPair());
+ int r = id_;
+ if ((kNumberOfDAllocIds > 0) && IsDRegister()) { // VFPv3-D32 only.
+ r -= kNumberOfOverlappingDRegIds;
+ }
+ CHECK_LT(r, kNumberOfAllocIds);
+ return r;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ static ManagedRegister FromRegId(int reg_id) {
+ ManagedRegister reg;
+ reg.id_ = reg_id;
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+
+ int id_;
+};
+
+std::ostream& operator<<(std::ostream& os, const ManagedRegister& reg);
+
+} // namespace art
+
+#endif // ART_SRC_MANAGED_REGISTER_ARM_H_
diff --git a/src/managed_register_arm_test.cc b/src/managed_register_arm_test.cc
new file mode 100644
index 0000000..7ffe894
--- /dev/null
+++ b/src/managed_register_arm_test.cc
@@ -0,0 +1,739 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/globals.h"
+#include "src/managed_register_arm.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ManagedRegister, NoRegister) {
+ ManagedRegister reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ManagedRegister, CoreRegister) {
+ ManagedRegister reg = ManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R0, reg.AsCoreRegister());
+
+ reg = ManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R1, reg.AsCoreRegister());
+
+ reg = ManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R8, reg.AsCoreRegister());
+
+ reg = ManagedRegister::FromCoreRegister(R15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(R15, reg.AsCoreRegister());
+}
+
+
+TEST(ManagedRegister, SRegister) {
+ ManagedRegister reg = ManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S0, reg.AsSRegister());
+
+ reg = ManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S1, reg.AsSRegister());
+
+ reg = ManagedRegister::FromSRegister(S3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S3, reg.AsSRegister());
+
+ reg = ManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S15, reg.AsSRegister());
+
+ reg = ManagedRegister::FromSRegister(S30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S30, reg.AsSRegister());
+
+ reg = ManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(S31, reg.AsSRegister());
+}
+
+
+TEST(ManagedRegister, DRegister) {
+ ManagedRegister reg = ManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D0, reg.AsDRegister());
+ EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromSRegisterPair(S0)));
+
+ reg = ManagedRegister::FromDRegister(D1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D1, reg.AsDRegister());
+ EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromSRegisterPair(S2)));
+
+ reg = ManagedRegister::FromDRegister(D6);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D6, reg.AsDRegister());
+ EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromSRegisterPair(S12)));
+
+ reg = ManagedRegister::FromDRegister(D14);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D14, reg.AsDRegister());
+ EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromSRegisterPair(S28)));
+
+ reg = ManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D15, reg.AsDRegister());
+ EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow());
+ EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromSRegisterPair(S30)));
+
+#ifdef VFPv3_D32
+ reg = ManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D16, reg.AsDRegister());
+
+ reg = ManagedRegister::FromDRegister(D18);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D18, reg.AsDRegister());
+
+ reg = ManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D30, reg.AsDRegister());
+
+ reg = ManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(D31, reg.AsDRegister());
+#endif // VFPv3_D32
+}
+
+
+TEST(ManagedRegister, Pair) {
+ ManagedRegister reg = ManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R0_R1, reg.AsRegisterPair());
+ EXPECT_EQ(R0, reg.AsRegisterPairLow());
+ EXPECT_EQ(R1, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromCoreRegisterPair(R0)));
+
+ reg = ManagedRegister::FromRegisterPair(R2_R3);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R2_R3, reg.AsRegisterPair());
+ EXPECT_EQ(R2, reg.AsRegisterPairLow());
+ EXPECT_EQ(R3, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromCoreRegisterPair(R2)));
+
+ reg = ManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R4_R5, reg.AsRegisterPair());
+ EXPECT_EQ(R4, reg.AsRegisterPairLow());
+ EXPECT_EQ(R5, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromCoreRegisterPair(R4)));
+
+ reg = ManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCoreRegister());
+ EXPECT_TRUE(!reg.IsSRegister());
+ EXPECT_TRUE(!reg.IsDRegister());
+ EXPECT_TRUE(!reg.IsOverlappingDRegister());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(R6_R7, reg.AsRegisterPair());
+ EXPECT_EQ(R6, reg.AsRegisterPairLow());
+ EXPECT_EQ(R7, reg.AsRegisterPairHigh());
+ EXPECT_TRUE(reg.Equals(ManagedRegister::FromCoreRegisterPair(R6)));
+}
+
+
+TEST(ManagedRegister, Equals) {
+ ManagedRegister no_reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(no_reg.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!no_reg.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!no_reg.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!no_reg.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!no_reg.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!no_reg.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_R0 = ManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(!reg_R0.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(reg_R0.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_R1 = ManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R1.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R1.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_R8 = ManagedRegister::FromCoreRegister(R8);
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg_R8.Equals(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R8.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_S0 = ManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg_S0.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S0.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_S1 = ManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S1.Equals(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S1.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_S31 = ManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg_S31.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_S31.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_D0 = ManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg_D0.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D0.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_D15 = ManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg_D15.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D15.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+#ifdef VFPv3_D32
+ ManagedRegister reg_D16 = ManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_D16.Equals(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D16.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_D30 = ManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg_D30.Equals(ManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(!reg_D30.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+
+ ManagedRegister reg_D31 = ManagedRegister::FromDRegister(D30);
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromDRegister(D30)));
+ EXPECT_TRUE(reg_D31.Equals(ManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg_D31.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+#endif // VFPv3_D32
+
+ ManagedRegister reg_R0R1 = ManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg_R0R1.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R0R1.Equals(ManagedRegister::FromRegisterPair(R2_R3)));
+
+ ManagedRegister reg_R4R5 = ManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg_R4R5.Equals(ManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(!reg_R4R5.Equals(ManagedRegister::FromRegisterPair(R6_R7)));
+
+ ManagedRegister reg_R6R7 = ManagedRegister::FromRegisterPair(R6_R7);
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::NoRegister()));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg_R6R7.Equals(ManagedRegister::FromRegisterPair(R4_R5)));
+ EXPECT_TRUE(reg_R6R7.Equals(ManagedRegister::FromRegisterPair(R6_R7)));
+}
+
+
+TEST(ManagedRegister, Overlaps) {
+ ManagedRegister reg = ManagedRegister::FromCoreRegister(R0);
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromCoreRegister(R1);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromCoreRegister(R7);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromSRegister(S0);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromSRegister(S1);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromSRegister(S15);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromSRegister(S31);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromDRegister(D0);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromDRegister(D7);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromDRegister(D15);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+#ifdef VFPv3_D32
+ reg = ManagedRegister::FromDRegister(D16);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromDRegister(D31);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+#endif // VFPv3_D32
+
+ reg = ManagedRegister::FromRegisterPair(R0_R1);
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+
+ reg = ManagedRegister::FromRegisterPair(R4_R5);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCoreRegister(R8)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S2)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S15)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S30)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromSRegister(S31)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D1)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D16)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromDRegister(D31)));
+#endif // VFPv3_D32
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(R0_R1)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(R4_R5)));
+}
+
+} // namespace art
diff --git a/src/managed_register_x86.cc b/src/managed_register_x86.cc
new file mode 100644
index 0000000..414c51b
--- /dev/null
+++ b/src/managed_register_x86.cc
@@ -0,0 +1,112 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/globals.h"
+#include "src/calling_convention.h"
+#include "src/managed_register.h"
+
+namespace art {
+
+// These cpu registers are never available for allocation.
+static const Register kReservedCpuRegistersArray[] = { EBP, ESP };
+
+
+// We reduce the number of available registers for allocation in debug-code
+// mode in order to increase register pressure.
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
+static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Define register pairs.
+// This list must be kept in sync with the RegisterPair enum.
+#define REGISTER_PAIR_LIST(P) \
+ P(EAX, EDX) \
+ P(EAX, ECX) \
+ P(EAX, EBX) \
+ P(EAX, EDI) \
+ P(EDX, ECX) \
+ P(EDX, EBX) \
+ P(EDX, EDI) \
+ P(ECX, EBX) \
+ P(ECX, EDI) \
+ P(EBX, EDI)
+
+
+struct RegisterPairDescriptor {
+ RegisterPair reg; // Used to verify that the enum is in sync.
+ Register low;
+ Register high;
+};
+
+
+static const RegisterPairDescriptor kRegisterPairs[] = {
+#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high },
+ REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION)
+#undef REGISTER_PAIR_ENUMERATION
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+ os << ManagedRegister::FromRegisterPair(reg);
+ return os;
+}
+
+bool ManagedRegister::Overlaps(const ManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ CHECK(IsValidManagedRegister());
+ CHECK(other.IsValidManagedRegister());
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return ManagedRegister::FromCpuRegister(low).Overlaps(other) ||
+ ManagedRegister::FromCpuRegister(high).Overlaps(other);
+ }
+ if (other.IsRegisterPair()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int ManagedRegister::AllocIdLow() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].low;
+}
+
+
+int ManagedRegister::AllocIdHigh() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].high;
+}
+
+
+void ManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsXmmRegister()) {
+ os << "XMM: " << static_cast<int>(AsXmmRegister());
+ } else if (IsX87Register()) {
+ os << "X87: " << static_cast<int>(AsX87Register());
+ } else if (IsCpuRegister()) {
+ os << "CPU: " << static_cast<int>(AsCpuRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const ManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+} // namespace art
diff --git a/src/managed_register_x86.h b/src/managed_register_x86.h
new file mode 100644
index 0000000..5d906c3
--- /dev/null
+++ b/src/managed_register_x86.h
@@ -0,0 +1,214 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#ifndef ART_SRC_MANAGED_REGISTER_X86_H_
+#define ART_SRC_MANAGED_REGISTER_X86_H_
+
+#include "src/constants_x86.h"
+
+namespace art {
+
+// Values for register pairs.
+// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs.
+// The table kRegisterPairs in x86.cc must be kept in sync with this enum.
+enum RegisterPair {
+ EAX_EDX = 0,
+ EAX_ECX = 1,
+ EAX_EBX = 2,
+ EAX_EDI = 3,
+ EDX_ECX = 4,
+ EDX_EBX = 5,
+ EDX_EDI = 6,
+ ECX_EBX = 7,
+ ECX_EDI = 8,
+ EBX_EDI = 9,
+ kNumberOfRegisterPairs = 10,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCpuRegIds = kNumberOfCpuRegisters;
+const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters;
+
+const int kNumberOfXmmRegIds = kNumberOfXmmRegisters;
+const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters;
+
+const int kNumberOfX87RegIds = kNumberOfX87Registers;
+const int kNumberOfX87AllocIds = kNumberOfX87Registers;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
+ kNumberOfX87RegIds;
+
+// Register ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// [S..P[ register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+// P = X + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+
+
+// An instance of class 'ManagedRegister' represents a single cpu register (enum
+// Register), an xmm register (enum XmmRegister), or a pair of cpu registers
+// (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ManagedRegister {
+ public:
+ // ManagedRegister is a value class. There exists no method to change the
+ // internal state. We therefore allow a copy constructor and an
+ // assignment-operator.
+ ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+
+ ManagedRegister& operator=(const ManagedRegister& other) {
+ id_ = other.id_;
+ return *this;
+ }
+
+ Register AsCpuRegister() const {
+ CHECK(IsCpuRegister());
+ return static_cast<Register>(id_);
+ }
+
+ XmmRegister AsXmmRegister() const {
+ CHECK(IsXmmRegister());
+ return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
+ }
+
+ X87Register AsX87Register() const {
+ CHECK(IsX87Register());
+ return static_cast<X87Register>(id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds));
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCpuRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCpuRegister();
+ }
+
+ bool IsCpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
+ }
+
+ bool IsXmmRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCpuRegIds;
+ return (0 <= test) && (test < kNumberOfXmmRegIds);
+ }
+
+ bool IsX87Register() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ return (0 <= test) && (test < kNumberOfXmmRegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ bool IsNoRegister() const {
+ return id_ == kNoRegister;
+ }
+
+ void Print(std::ostream& os) const;
+
+ // It is valid to invoke Equals on and with a NoRegister.
+ bool Equals(const ManagedRegister& other) const {
+ return id_ == other.id_;
+ }
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const ManagedRegister& other) const;
+
+ static ManagedRegister NoRegister() {
+ return ManagedRegister();
+ }
+
+ static ManagedRegister FromCpuRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static ManagedRegister FromXmmRegister(XmmRegister r) {
+ CHECK_NE(r, kNoXmmRegister);
+ return FromRegId(r + kNumberOfCpuRegIds);
+ }
+
+ static ManagedRegister FromX87Register(X87Register r) {
+ CHECK_NE(r, kNoX87Register);
+ return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ }
+
+ static ManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds));
+ }
+
+ private:
+ static const int kNoRegister = -1;
+
+ ManagedRegister() : id_(kNoRegister) { }
+
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() && !IsRegisterPair());
+ CHECK_LT(id_, kNumberOfAllocIds);
+ return id_;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ static ManagedRegister FromRegId(int reg_id) {
+ ManagedRegister reg;
+ reg.id_ = reg_id;
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+
+ int id_;
+};
+
+std::ostream& operator<<(std::ostream& os, const ManagedRegister& reg);
+
+} // namespace art
+
+#endif // ART_SRC_MANAGED_REGISTER_X86_H_
diff --git a/src/managed_register_x86_test.cc b/src/managed_register_x86_test.cc
new file mode 100644
index 0000000..3192bb3
--- /dev/null
+++ b/src/managed_register_x86_test.cc
@@ -0,0 +1,343 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/globals.h"
+#include "src/managed_register.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(ManagedRegister, NoRegister) {
+ ManagedRegister reg = ManagedRegister::NoRegister();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ManagedRegister, CpuRegister) {
+ ManagedRegister reg = ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsCpuRegister());
+
+ reg = ManagedRegister::FromCpuRegister(EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EBX, reg.AsCpuRegister());
+
+ reg = ManagedRegister::FromCpuRegister(ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsCpuRegister());
+
+ reg = ManagedRegister::FromCpuRegister(EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(EDI, reg.AsCpuRegister());
+}
+
+TEST(ManagedRegister, XmmRegister) {
+ ManagedRegister reg = ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM0, reg.AsXmmRegister());
+
+ reg = ManagedRegister::FromXmmRegister(XMM1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM1, reg.AsXmmRegister());
+
+ reg = ManagedRegister::FromXmmRegister(XMM7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM7, reg.AsXmmRegister());
+}
+
+TEST(ManagedRegister, X87Register) {
+ ManagedRegister reg = ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST0, reg.AsX87Register());
+
+ reg = ManagedRegister::FromX87Register(ST1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST1, reg.AsX87Register());
+
+ reg = ManagedRegister::FromX87Register(ST7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST7, reg.AsX87Register());
+}
+
+TEST(ManagedRegister, RegisterPair) {
+ ManagedRegister reg = ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EAX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EAX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EAX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EDX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EDX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(ECX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(ECX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+ reg = ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(EBX, reg.AsRegisterPairLow());
+ EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+}
+
+TEST(ManagedRegister, Equals) {
+ ManagedRegister reg_eax = ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(reg_eax.Equals(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_eax.Equals(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ ManagedRegister reg_xmm0 = ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(reg_xmm0.Equals(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ ManagedRegister reg_st0 = ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg_st0.Equals(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_st0.Equals(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ ManagedRegister reg_pair = ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg_pair.Equals(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_pair.Equals(ManagedRegister::FromRegisterPair(EBX_EDI)));
+}
+
+TEST(ManagedRegister, Overlaps) {
+ ManagedRegister reg = ManagedRegister::FromCpuRegister(EAX);
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromCpuRegister(EDX);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromCpuRegister(EDI);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromCpuRegister(EBX);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EDX_ECX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EDX_EBX)));
+
+ reg = ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EAX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EBX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromCpuRegister(EDI)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(ManagedRegister::FromRegisterPair(EDX_EBX)));
+}
+
+} // namespace art
diff --git a/src/object.cc b/src/object.cc
index c2c09ca..95e8b67 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -1,16 +1,17 @@
// Copyright 2011 Google Inc. All Rights Reserved.
+#include "src/object.h"
+#include <algorithm>
+#include <string.h>
#include "src/globals.h"
#include "src/logging.h"
-#include "src/object.h"
#include "src/dex_file.h"
#include "src/raw_dex_file.h"
-#include <algorithm>
-
namespace art {
-bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) {
+bool Class::IsInSamePackage(const StringPiece& descriptor1,
+ const StringPiece& descriptor2) {
size_t i = 0;
while (descriptor1[i] != '\0' && descriptor1[i] == descriptor2[i]) {
++i;
@@ -24,10 +25,12 @@
}
#if 0
-bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) {
+bool Class::IsInSamePackage(const StringPiece& descriptor1,
+ const StringPiece& descriptor2) {
size_t size = std::min(descriptor1.size(), descriptor2.size());
std::pair<StringPiece::const_iterator, StringPiece::const_iterator> pos;
- pos = std::mismatch(descriptor1.begin(), descriptor1.begin() + size, descriptor2.begin());
+ pos = std::mismatch(descriptor1.begin(), descriptor1.begin() + size,
+ descriptor2.begin());
return !(*(pos.second).rfind('/') != npos && descriptor2.rfind('/') != npos);
}
#endif
@@ -67,6 +70,79 @@
return num_registers;
}
+// The number of reference arguments to this method including implicit this
+// pointer
+size_t Method::NumReferenceArgs() const {
+ size_t result = IsStatic() ? 0 : 1;
+ for (int i = 1; i < shorty_.length(); i++) {
+ if ((shorty_[i] == 'L') || (shorty_[i] == '[')) {
+ result++;
+ }
+ }
+ return result;
+}
+
+// The number of long or double arguments
+size_t Method::NumLongOrDoubleArgs() const {
+ size_t result = 0;
+ for (int i = 1; i < shorty_.length(); i++) {
+ if ((shorty_[i] == 'D') || (shorty_[i] == 'J')) {
+ result++;
+ }
+ }
+ return result;
+}
+
+// The number of reference arguments to this method before the given parameter
+// index
+size_t Method::NumReferenceArgsBefore(unsigned int param) const {
+ CHECK_LT(param, NumArgs());
+ unsigned int result = IsStatic() ? 0 : 1;
+ for (unsigned int i = 1; (i < (unsigned int)shorty_.length()) &&
+ (i < (param + 1)); i++) {
+ if ((shorty_[i] == 'L') || (shorty_[i] == '[')) {
+ result++;
+ }
+ }
+ return result;
+}
+
+// Is the given method parameter a reference?
+bool Method::IsParamAReference(unsigned int param) const {
+ CHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return true; // this argument
+ }
+ return ((shorty_[param] == 'L') || (shorty_[param] == '['));
+}
+
+// Is the given method parameter a long or double?
+bool Method::IsParamALongOrDouble(unsigned int param) const {
+ CHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ }
+ return (shorty_[param] == 'J') || (shorty_[param] == 'D');
+}
+
+size_t Method::ParamSizeInBytes(unsigned int param) const {
+ CHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return kPointerSize; // this argument
+ }
+ switch (shorty_[param]) {
+ case '[': return kPointerSize;
+ case 'L': return kPointerSize;
+ case 'D': return 8;
+ case 'J': return 8;
+ default: return 4;
+ }
+}
+
bool Method::HasSameArgumentTypes(const Method* that) const {
const RawDexFile* raw1 = this->GetClass()->GetDexFile()->GetRaw();
const RawDexFile::ProtoId& proto1 = raw1->GetProtoId(this->proto_idx_);
@@ -111,6 +187,30 @@
return (strcmp(type1, type2) == 0);
}
+Method* Class::FindDirectMethod(const StringPiece& name) const {
+ Method* result = NULL;
+ for (size_t i = 0; i < NumDirectMethods(); i++) {
+ Method* method = GetDirectMethod(i);
+ if (method->GetName().compare(name) == 0) {
+ result = method;
+ break;
+ }
+ }
+ return result;
+}
+
+Method* Class::FindVirtualMethod(const StringPiece& name) const {
+ Method* result = NULL;
+ for (size_t i = 0; i < NumVirtualMethods(); i++) {
+ Method* method = GetVirtualMethod(i);
+ if (method->GetName().compare(name) == 0) {
+ result = method;
+ break;
+ }
+ }
+ return result;
+}
+
Method* Class::FindDirectMethodLocally(const StringPiece& name,
const StringPiece& descriptor) const {
return NULL; // TODO
@@ -131,7 +231,7 @@
if (rhs >= Class::kStatusError && rhs <= Class::kStatusInitialized) {
os << kClassStatusNames[rhs - 1];
} else {
- os << "Class::Status[" << int(rhs) << "]";
+ os << "Class::Status[" << static_cast<int>(rhs) << "]";
}
return os;
}
diff --git a/src/object.h b/src/object.h
index da64d1b..31d4390 100644
--- a/src/object.h
+++ b/src/object.h
@@ -3,8 +3,11 @@
#ifndef ART_SRC_OBJECT_H_
#define ART_SRC_OBJECT_H_
+#include "src/assembler.h"
+#include "src/constants.h"
#include "src/dex_file.h"
#include "src/globals.h"
+#include "src/logging.h"
#include "src/macros.h"
#include "src/stringpiece.h"
#include "src/monitor.h"
@@ -95,7 +98,7 @@
* Return an offset, given a bit number as returned from CLZ.
*/
#define CLASS_OFFSET_FROM_CLZ(rshift) \
- (((int)(rshift) * CLASS_OFFSET_ALIGNMENT) + CLASS_SMALLEST_OFFSET)
+ ((static_cast<int>(rshift) * CLASS_OFFSET_ALIGNMENT) + CLASS_SMALLEST_OFFSET)
class Object {
@@ -149,7 +152,7 @@
class ObjectLock {
public:
- ObjectLock(Object* object) : obj_(object) {
+ explicit ObjectLock(Object* object) : obj_(object) {
CHECK(object != NULL);
obj_->MonitorEnter();
}
@@ -306,8 +309,13 @@
return declaring_class_;
}
+ static MemberOffset ClassOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Method, klass_));
+ }
+
// const char* GetReturnTypeDescriptor() const {
- // return declaring_class_->GetDexFile_->GetRaw()->dexStringByTypeIdx(proto_id_.return_type_id_);
+ // return declaring_class_->GetDexFile_->GetRaw()
+ // ->dexStringByTypeIdx(proto_id_.return_type_id_);
// }
// Returns true if the method is declared public.
@@ -386,6 +394,67 @@
uint32_t instance_data_[METHOD_FIELD_SLOTS];
#undef METHOD_FIELD_SLOTS
+ bool IsReturnAReference() const {
+ return (shorty_[0] == 'L') || (shorty_[0] == '[');
+ }
+
+ bool IsReturnAFloatOrDouble() const {
+ return (shorty_[0] == 'F') || (shorty_[0] == 'D');
+ }
+
+ bool IsReturnAFloat() const {
+ return shorty_[0] == 'F';
+ }
+
+ bool IsReturnADouble() const {
+ return shorty_[0] == 'D';
+ }
+
+ bool IsReturnALong() const {
+ return shorty_[0] == 'J';
+ }
+
+ // The number of arguments that should be supplied to this method
+ size_t NumArgs() const {
+ return (IsStatic() ? 0 : 1) + shorty_.length() - 1;
+ }
+
+ // The number of reference arguments to this method including implicit this
+ // pointer
+ size_t NumReferenceArgs() const;
+
+ // The number of long or double arguments
+ size_t NumLongOrDoubleArgs() const;
+
+ // The number of reference arguments to this method before the given
+ // parameter index
+ size_t NumReferenceArgsBefore(unsigned int param) const;
+
+ // Is the given method parameter a reference?
+ bool IsParamAReference(unsigned int param) const;
+
+ // Is the given method parameter a long or double?
+ bool IsParamALongOrDouble(unsigned int param) const;
+
+ size_t ParamSizeInBytes(unsigned int param) const;
+
+ void SetCode(const void* code) {
+ code_ = code;
+ }
+
+ const void* GetCode() const {
+ return code_;
+ }
+
+ void RegisterNative(const void* native_method) {
+ native_method_ = native_method;
+ }
+
+ static MemberOffset NativeMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Method, native_method_));
+ }
+
+ public: // TODO: private/const
// the class we are a part of
Class* declaring_class_;
@@ -421,6 +490,12 @@
private:
Method();
+
+ // Compiled code associated with this method
+ const void* code_;
+
+ // Any native method registered with this method
+ const void* native_method_;
};
// Class objects.
@@ -494,7 +569,8 @@
// Returns true if this class is in the same packages as that class.
bool IsInSamePackage(const Class* that) const;
- static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
+ static bool IsInSamePackage(const StringPiece& descriptor1,
+ const StringPiece& descriptor2);
// Returns true if this class represents an array class.
bool IsArray() const {
@@ -539,7 +615,8 @@
// Returns the size in bytes of a class object instance with the
// given number of static fields.
// static size_t Size(size_t num_sfields) {
- // return OFFSETOF_MEMBER(Class, sfields_) + sizeof(StaticField) * num_sfields;
+ // return OFFSETOF_MEMBER(Class, sfields_) +
+ // sizeof(StaticField) * num_sfields;
// }
// Returns the number of static, private, and constructor methods.
@@ -592,10 +669,14 @@
reference_offsets_ = new_reference_offsets;
}
+ Method* FindDirectMethod(const StringPiece& name) const;
+
+ Method* FindVirtualMethod(const StringPiece& name) const;
+
Method* FindDirectMethodLocally(const StringPiece& name,
const StringPiece& descriptor) const;
- public: // TODO: private
+ public: // TODO: private
// leave space for instance data; we could access fields directly if
// we freeze the definition of java/lang/Class
#define CLASS_FIELD_SLOTS 1
@@ -654,7 +735,7 @@
// initiating class loader list
// NOTE: for classes with low serialNumber, these are unused, and the
// values are kept in a table in gDvm.
- //InitiatingLoaderList initiating_loader_list_;
+ // InitiatingLoaderList initiating_loader_list_;
// array of interfaces this class implements directly
size_t interface_count_;
diff --git a/src/object_test.cc b/src/object_test.cc
index 839d61b..334461a 100644
--- a/src/object_test.cc
+++ b/src/object_test.cc
@@ -48,7 +48,7 @@
ASSERT_EQ(4U, klass->NumVirtualMethods());
- Method* m1 = klass->GetVirtualMethod(0);
+ Method* m1 = klass->GetVirtualMethod(0u);
ASSERT_EQ("m1", m1->GetName());
Method* m2 = klass->GetVirtualMethod(1);
@@ -110,7 +110,7 @@
Class* klass2 = linker2->FindClass("LProtoCompare2;", NULL);
ASSERT_TRUE(klass2 != NULL);
- Method* m1_1 = klass1->GetVirtualMethod(0);
+ Method* m1_1 = klass1->GetVirtualMethod(0u);
ASSERT_EQ("m1", m1_1->GetName());
Method* m2_1 = klass1->GetVirtualMethod(1);
ASSERT_EQ("m2", m2_1->GetName());
@@ -119,7 +119,7 @@
Method* m4_1 = klass1->GetVirtualMethod(3);
ASSERT_EQ("m4", m4_1->GetName());
- Method* m1_2 = klass2->GetVirtualMethod(0);
+ Method* m1_2 = klass2->GetVirtualMethod(0u);
ASSERT_EQ("m1", m1_2->GetName());
Method* m2_2 = klass2->GetVirtualMethod(1);
ASSERT_EQ("m2", m2_2->GetName());
diff --git a/src/runtime.cc b/src/runtime.cc
index 0f768c5..28af9dd 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -65,14 +65,13 @@
return true;
}
-bool AttachCurrentThread() {
- LOG(FATAL) << "Unimplemented";
- return false;
+bool Runtime::AttachCurrentThread() {
+ return Thread::Attach() != NULL;
}
-bool DetachCurrentThread() {
- LOG(FATAL) << "Unimplemented";
- return false;
+bool Runtime::DetachCurrentThread() {
+ LOG(WARNING) << "Unimplemented: Runtime::DetachCurrentThread";
+ return true;
}
} // namespace art
diff --git a/src/thread.cc b/src/thread.cc
index 60e977f..7c76f4c 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -2,11 +2,11 @@
#include "src/thread.h"
+#include <pthread.h>
+#include <sys/mman.h>
#include <algorithm>
#include <cerrno>
#include <list>
-#include <pthread.h>
-#include <sys/mman.h>
#include "src/runtime.h"
#include "src/utils.h"
@@ -18,7 +18,7 @@
Mutex* Mutex::Create(const char* name) {
Mutex* mu = new Mutex(name);
int result = pthread_mutex_init(&mu->lock_impl_, NULL);
- CHECK(result == 0);
+ CHECK_EQ(0, result);
return mu;
}
@@ -79,6 +79,8 @@
result = pthread_attr_destroy(&attr);
CHECK_EQ(result, 0);
+ InitCpu();
+
return new_thread;
}
@@ -100,6 +102,8 @@
PLOG(FATAL) << "pthread_setspecific failed";
}
+ InitCpu();
+
return thread;
}
@@ -125,6 +129,24 @@
return true;
}
+static const char* kStateNames[] = {
+ "New",
+ "Runnable",
+ "Blocked",
+ "Waiting",
+ "TimedWaiting",
+ "Native",
+ "Terminated",
+};
+std::ostream& operator<<(std::ostream& os, const Thread::State& state) {
+ if (state >= Thread::kNew && state <= Thread::kTerminated) {
+ os << kStateNames[state-Thread::kNew];
+ } else {
+ os << "State[" << static_cast<int>(state) << "]";
+ }
+ return os;
+}
+
ThreadList* ThreadList::Create() {
return new ThreadList;
}
diff --git a/src/thread.h b/src/thread.h
index a862e0a..ab0bacc 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -4,19 +4,23 @@
#ifndef ART_SRC_THREAD_H_
#define ART_SRC_THREAD_H_
-#include <list>
#include <pthread.h>
+#include <list>
#include "src/globals.h"
+#include "src/heap.h"
#include "src/logging.h"
#include "src/macros.h"
#include "src/runtime.h"
+#include "jni.h"
+
namespace art {
class Heap;
class Object;
class Runtime;
+class StackHandleBlock;
class Thread;
class ThreadList;
@@ -62,6 +66,39 @@
DISALLOW_COPY_AND_ASSIGN(MutexLock);
};
+// Stack handle blocks are allocated within the bridge frame between managed
+// and native code.
+class StackHandleBlock {
+ public:
+ // Number of references contained within this SHB
+ size_t NumberOfReferences() {
+ return number_of_references_;
+ }
+
+ // Link to previous SHB or NULL
+ StackHandleBlock* Link() {
+ return link_;
+ }
+
+ // Offset of length within SHB, used by generated code
+ static size_t NumberOfReferencesOffset() {
+ return OFFSETOF_MEMBER(StackHandleBlock, number_of_references_);
+ }
+
+ // Offset of link within SHB, used by generated code
+ static size_t LinkOffset() {
+ return OFFSETOF_MEMBER(StackHandleBlock, link_);
+ }
+
+ private:
+ StackHandleBlock() {}
+
+ size_t number_of_references_;
+ StackHandleBlock* link_;
+
+ DISALLOW_COPY_AND_ASSIGN(StackHandleBlock);
+};
+
class Thread {
public:
enum State {
@@ -71,6 +108,7 @@
kBlocked,
kWaiting,
kTimedWaiting,
+ kNative,
kTerminated,
};
@@ -131,10 +169,63 @@
state_ = new_state;
}
+ // Offset of state within Thread, used by generated code
+ static ThreadOffset StateOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, state_));
+ }
+
+ Heap* GetHeap() {
+ return heap_;
+ }
+
+ // JNI methods
+ JNIEnv* GetJniEnv() const {
+ return jni_env_;
+ }
+
+ // Offset of JNI environment within Thread, used by generated code
+ static ThreadOffset JniEnvOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
+ }
+
+ // Offset of top stack handle block within Thread, used by generated code
+ static ThreadOffset TopShbOffset() {
+ return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shb_));
+ }
+
+ // Number of references allocated in StackHandleBlocks on this thread
+ size_t NumShbHandles() {
+ size_t count = 0;
+ for (StackHandleBlock* cur = top_shb_; cur; cur = cur->Link()) {
+ count += cur->NumberOfReferences();
+ }
+ return count;
+ }
+
private:
- Thread() : id_(1234), exception_(NULL) {}
+ Thread() :
+ thread_id_(1234), top_shb_(NULL),
+ jni_env_(reinterpret_cast<JNIEnv*>(0xEBADC0DE)), exception_(NULL) {
+ }
~Thread() {}
+ void InitCpu();
+
+ // Initialized to "this". On certain architectures (such as x86) reading
+ // off of Thread::Current is easy but getting the address of Thread::Current
+ // is hard. This field can be read off of Thread::Current to give the address.
+ Thread* self_;
+
+ uint32_t thread_id_;
+
+ Heap* heap_;
+
+ // Top of linked list of stack handle blocks or NULL for none
+ StackHandleBlock* top_shb_;
+
+ // Every thread may have an associated JNI environment
+ JNIEnv* jni_env_;
+
State state_;
uint32_t id_;
@@ -152,6 +243,7 @@
DISALLOW_COPY_AND_ASSIGN(Thread);
};
+std::ostream& operator<<(std::ostream& os, const Thread::State& state);
class ThreadList {
public:
diff --git a/src/thread_arm.cc b/src/thread_arm.cc
new file mode 100644
index 0000000..4b5eab7
--- /dev/null
+++ b/src/thread_arm.cc
@@ -0,0 +1,11 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/macros.h"
+#include "src/thread.h"
+
+namespace art {
+
+void Thread::InitCpu() {
+}
+
+} // namespace art
diff --git a/src/thread_x86.cc b/src/thread_x86.cc
new file mode 100644
index 0000000..14cb2b2
--- /dev/null
+++ b/src/thread_x86.cc
@@ -0,0 +1,60 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+#include "src/thread.h"
+#include <asm/ldt.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include "src/macros.h"
+
+namespace art {
+
+void Thread::InitCpu() {
+ // Read LDT
+ CHECK_EQ((size_t)LDT_ENTRY_SIZE, sizeof(uint64_t));
+ uint64_t ldt_[LDT_ENTRIES];
+ syscall(SYS_modify_ldt, 0, ldt_, sizeof(ldt_));
+ // Create empty slot to point at current Thread*
+ struct user_desc ldt_entry;
+ ldt_entry.entry_number = -1;
+ ldt_entry.base_addr = (unsigned int)this;
+ ldt_entry.limit = 4096;
+ ldt_entry.seg_32bit = 1;
+ ldt_entry.contents = MODIFY_LDT_CONTENTS_DATA;
+ ldt_entry.read_exec_only = 0;
+ ldt_entry.limit_in_pages = 0;
+ ldt_entry.seg_not_present = 0;
+ ldt_entry.useable = 1;
+ for (int i = 0; i < LDT_ENTRIES; i++) {
+ if (ldt_[i] == 0) {
+ ldt_entry.entry_number = i;
+ break;
+ }
+ }
+ if (ldt_entry.entry_number >= LDT_ENTRIES) {
+ LOG(FATAL) << "Failed to find available LDT slot";
+ }
+ // Update LDT
+ CHECK_EQ(0, syscall(SYS_modify_ldt, 1, &ldt_entry, sizeof(ldt_entry)));
+ // Change FS to be new LDT entry
+ uint16_t table_indicator = 1 << 2; // LDT
+ uint16_t rpl = 3; // Requested privilege level
+ uint16_t selector = (ldt_entry.entry_number << 3) | table_indicator | rpl;
+ // TODO: use our assembler to generate code
+ asm("movw %w0, %%fs"
+ : // output
+ : "q"(selector) // input
+ :); // clobber
+ // Allow easy indirection back to Thread*
+ self_ = this;
+ // Sanity check reads from FS goes to this Thread*
+ CHECK_EQ(0, OFFSETOF_MEMBER(Thread, self_));
+ Thread* self_check;
+ // TODO: use our assembler to generate code
+ asm("movl %%fs:0, %0"
+ : "=r"(self_check) // output
+ : // input
+ :); // clobber
+ CHECK_EQ(self_check, this);
+}
+
+} // namespace art
diff --git a/src/utils.h b/src/utils.h
index cbf0cff..217bd69 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -12,14 +12,12 @@
return (x & (x - 1)) == 0;
}
-
template<typename T>
static inline bool IsAligned(T x, int n) {
CHECK(IsPowerOfTwo(n));
return (x & (n - 1)) == 0;
}
-
template<typename T>
static inline bool IsAligned(T* x, int n) {
return IsAligned(reinterpret_cast<uintptr_t>(x), n);
@@ -33,7 +31,6 @@
return (-limit <= value) && (value < limit);
}
-
static inline bool IsUint(int N, word value) {
CHECK_LT(0, N);
CHECK_LT(N, kBitsPerWord);
@@ -41,7 +38,6 @@
return (0 <= value) && (value < limit);
}
-
static inline bool IsAbsoluteUint(int N, word value) {
CHECK_LT(0, N);
CHECK_LT(N, kBitsPerWord);
@@ -49,12 +45,18 @@
return IsUint(N, value);
}
+static inline int32_t Low16Bits(int32_t value) {
+ return static_cast<int32_t>(value & 0xffff);
+}
+
+static inline int32_t High16Bits(int32_t value) {
+ return static_cast<int32_t>(value >> 16);
+}
static inline int32_t Low32Bits(int64_t value) {
return static_cast<int32_t>(value);
}
-
static inline int32_t High32Bits(int64_t value) {
return static_cast<int32_t>(value >> 32);
}