ART: Mips64 runtime support
Interpret-only Mips64 runtime support.
Change-Id: Iee22d0c8c77105d9b2f03a67dc4e09957fe0ab0a
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index ac640b4..cd34d88 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -64,6 +64,7 @@
static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = !kArm32QuickCodeUseSoftFloat;
static constexpr size_t kNumQuickGprArgs = 3;
static constexpr size_t kNumQuickFprArgs = kArm32QuickCodeUseSoftFloat ? 0 : 16;
+ static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
@@ -99,6 +100,7 @@
static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
+ static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
@@ -108,7 +110,7 @@
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
-#elif defined(__mips__)
+#elif defined(__mips__) && !defined(__LP64__)
// The callee save frame is pointed to by SP.
// | argN | |
// | ... | |
@@ -128,12 +130,60 @@
static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
+ static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 60; // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
+#elif defined(__mips__) && defined(__LP64__)
+ // The callee save frame is pointed to by SP.
+ // | argN | |
+ // | ... | |
+ // | arg4 | |
+ // | arg3 spill | | Caller's frame
+ // | arg2 spill | |
+ // | arg1 spill | |
+ // | Method* | ---
+ // | RA |
+ // | ... | callee saves
+ // | F7 | f_arg7
+ // | F6 | f_arg6
+ // | F5 | f_arg5
+ // | F6 | f_arg6
+ // | F5 | f_arg5
+ // | F4 | f_arg4
+ // | F3 | f_arg3
+ // | F2 | f_arg2
+ // | F1 | f_arg1
+ // | F0 | f_arg0
+ // | A7 | arg7
+ // | A6 | arg6
+ // | A5 | arg5
+ // | A4 | arg4
+ // | A3 | arg3
+ // | A2 | arg2
+ // | A1 | arg1
+ // | | padding
+ // | A0/Method* | <- sp
+ // NOTE: for Mip64, when A0 is skipped, F0 is also skipped.
+ static constexpr bool kAlignPairRegister = false;
+ static constexpr bool kQuickSoftFloatAbi = false;
+ static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
+ // These values are set to zeros because GPR and FPR register
+ // assignments for Mips64 are interleaved, which the current VisitArguments()
+ // function does not support.
+ static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
+ static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs.
+ static constexpr bool kGprFprLockstep = true;
+
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F1).
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1).
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address.
+ static size_t GprIndexToGprOffset(uint32_t gpr_index) {
+ return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
+ }
#elif defined(__i386__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -154,6 +204,7 @@
static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
+ static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4; // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28; // Offset of return address.
@@ -193,6 +244,7 @@
static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
+ static constexpr bool kGprFprLockstep = false;
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address.
@@ -314,6 +366,20 @@
+ stack_index_ * kBytesStackArgLocation);
}
+ void IncGprIndex() {
+ gpr_index_++;
+ if (kGprFprLockstep) {
+ fpr_index_++;
+ }
+ }
+
+ void IncFprIndex() {
+ fpr_index_++;
+ if (kGprFprLockstep) {
+ gpr_index_++;
+ }
+ }
+
void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// (a) 'stack_args_' should point to the first method's argument
// (b) whatever the argument type it is, the 'stack_index_' should
@@ -330,7 +396,7 @@
Visit();
stack_index_++;
if (kNumQuickGprArgs > 0) {
- gpr_index_++;
+ IncGprIndex();
}
}
for (uint32_t shorty_index = 1; shorty_index < shorty_len_; ++shorty_index) {
@@ -346,7 +412,7 @@
Visit();
stack_index_++;
if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
+ IncGprIndex();
}
break;
case Primitive::kPrimFloat:
@@ -355,11 +421,11 @@
stack_index_++;
if (kQuickSoftFloatAbi) {
if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
+ IncGprIndex();
}
} else {
if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
- fpr_index_++;
+ IncFprIndex();
if (kQuickDoubleRegAlignedFloatBackFilled) {
// Double should not overlap with float.
// For example, if fpr_index_ = 3, fpr_double_index_ should be at least 4.
@@ -378,7 +444,7 @@
if (cur_type_ == Primitive::kPrimLong && kAlignPairRegister && gpr_index_ == 0) {
// Currently, this is only for ARM, where the first available parameter register
// is R1. So we skip it, and use R2 instead.
- gpr_index_++;
+ IncGprIndex();
}
is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
((gpr_index_ + 1) == kNumQuickGprArgs);
@@ -390,10 +456,10 @@
stack_index_++;
}
if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
+ IncGprIndex();
if (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) {
if (gpr_index_ < kNumQuickGprArgs) {
- gpr_index_++;
+ IncGprIndex();
}
}
}
@@ -416,10 +482,10 @@
}
}
} else if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
- fpr_index_++;
+ IncFprIndex();
if (GetBytesPerFprSpillLocation(kRuntimeISA) == 4) {
if (fpr_index_ + 1 < kNumQuickFprArgs + 1) {
- fpr_index_++;
+ IncFprIndex();
}
}
}
@@ -939,7 +1005,8 @@
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = true;
- static constexpr bool kMultiRegistersWidened = false;
+ static constexpr bool kMultiFPRegistersWidened = false;
+ static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = true;
static constexpr bool kAlignDoubleOnStack = true;
#elif defined(__aarch64__)
@@ -950,10 +1017,11 @@
static constexpr size_t kRegistersNeededForLong = 1;
static constexpr size_t kRegistersNeededForDouble = 1;
static constexpr bool kMultiRegistersAligned = false;
- static constexpr bool kMultiRegistersWidened = false;
+ static constexpr bool kMultiFPRegistersWidened = false;
+ static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
-#elif defined(__mips__)
+#elif defined(__mips__) && !defined(__LP64__)
static constexpr bool kNativeSoftFloatAbi = true; // This is a hard float ABI.
static constexpr size_t kNumNativeGprArgs = 4; // 4 arguments passed in GPRs.
static constexpr size_t kNumNativeFprArgs = 0; // 0 arguments passed in FPRs.
@@ -961,9 +1029,23 @@
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = true;
- static constexpr bool kMultiRegistersWidened = true;
+ static constexpr bool kMultiFPRegistersWidened = true;
+ static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = true;
static constexpr bool kAlignDoubleOnStack = true;
+#elif defined(__mips__) && defined(__LP64__)
+ // Let the code prepare GPRs only and we will load the FPRs with same data.
+ static constexpr bool kNativeSoftFloatAbi = true;
+ static constexpr size_t kNumNativeGprArgs = 8;
+ static constexpr size_t kNumNativeFprArgs = 0;
+
+ static constexpr size_t kRegistersNeededForLong = 1;
+ static constexpr size_t kRegistersNeededForDouble = 1;
+ static constexpr bool kMultiRegistersAligned = false;
+ static constexpr bool kMultiFPRegistersWidened = false;
+ static constexpr bool kMultiGPRegistersWidened = true;
+ static constexpr bool kAlignLongOnStack = false;
+ static constexpr bool kAlignDoubleOnStack = false;
#elif defined(__i386__)
// TODO: Check these!
static constexpr bool kNativeSoftFloatAbi = false; // Not using int registers for fp
@@ -973,7 +1055,8 @@
static constexpr size_t kRegistersNeededForLong = 2;
static constexpr size_t kRegistersNeededForDouble = 2;
static constexpr bool kMultiRegistersAligned = false; // x86 not using regs, anyways
- static constexpr bool kMultiRegistersWidened = false;
+ static constexpr bool kMultiFPRegistersWidened = false;
+ static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
#elif defined(__x86_64__)
@@ -984,7 +1067,8 @@
static constexpr size_t kRegistersNeededForLong = 1;
static constexpr size_t kRegistersNeededForDouble = 1;
static constexpr bool kMultiRegistersAligned = false;
- static constexpr bool kMultiRegistersWidened = false;
+ static constexpr bool kMultiFPRegistersWidened = false;
+ static constexpr bool kMultiGPRegistersWidened = false;
static constexpr bool kAlignLongOnStack = false;
static constexpr bool kAlignDoubleOnStack = false;
#else
@@ -1043,10 +1127,20 @@
void AdvanceInt(uint32_t val) {
if (HaveIntGpr()) {
gpr_index_--;
- PushGpr(val);
+ if (kMultiGPRegistersWidened) {
+ DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
+ PushGpr(static_cast<int64_t>(bit_cast<uint32_t, int32_t>(val)));
+ } else {
+ PushGpr(val);
+ }
} else {
stack_entries_++;
- PushStack(val);
+ if (kMultiGPRegistersWidened) {
+ DCHECK_EQ(sizeof(uintptr_t), sizeof(int64_t));
+ PushStack(static_cast<int64_t>(bit_cast<uint32_t, int32_t>(val)));
+ } else {
+ PushStack(val);
+ }
gpr_index_ = 0;
}
}
@@ -1108,7 +1202,7 @@
if (HaveFloatFpr()) {
fpr_index_--;
if (kRegistersNeededForDouble == 1) {
- if (kMultiRegistersWidened) {
+ if (kMultiFPRegistersWidened) {
PushFpr8(bit_cast<double, uint64_t>(val));
} else {
// No widening, just use the bits.
@@ -1119,7 +1213,7 @@
}
} else {
stack_entries_++;
- if (kRegistersNeededForDouble == 1 && kMultiRegistersWidened) {
+ if (kRegistersNeededForDouble == 1 && kMultiFPRegistersWidened) {
// Need to widen before storing: Note the "double" in the template instantiation.
// Note: We need to jump through those hoops to make the compiler happy.
DCHECK_EQ(sizeof(uintptr_t), sizeof(uint64_t));