AArch64: Jni compiler fixes
This patch fixes some of the issues with the ARM64 assembler and JNI
compiler.
The JNI compiler is not enabled by default, yet. To enable, change
line 1884 in compiler/driver/compiler_driver.cc, removing kArm64 from
the GenericJNI list.
The compiler passes all tests in jni_compiler_test.
Also change the common_compiler_test instruction-set-features logic.
We allow tests when the build-time features are a subset of the
runtime features.
Dex2oat cross-compiling is now working. A 32b version of dex2oat should
be able to compile correctly.
Change-Id: I51d1c24f2c75d4397a11c54724a8b277ff3b3df8
Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 2d1be9d..6212a23 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -21,14 +21,29 @@
namespace art {
namespace arm64 {
-// Calling convention
+static const Register kCoreArgumentRegisters[] = {
+ X0, X1, X2, X3, X4, X5, X6, X7
+};
+static const WRegister kWArgumentRegisters[] = {
+ W0, W1, W2, W3, W4, W5, W6, W7
+};
+
+static const DRegister kDArgumentRegisters[] = {
+ D0, D1, D2, D3, D4, D5, D6, D7
+};
+
+static const SRegister kSArgumentRegisters[] = {
+ S0, S1, S2, S3, S4, S5, S6, S7
+};
+
+// Calling convention
ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
- return Arm64ManagedRegister::FromCoreRegister(IP0); // X16
+ return Arm64ManagedRegister::FromCoreRegister(X20); // saved on entry restored on exit
}
ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
- return Arm64ManagedRegister::FromCoreRegister(IP0); // X16
+ return Arm64ManagedRegister::FromCoreRegister(X20); // saved on entry restored on exit
}
static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
@@ -79,64 +94,64 @@
FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
CHECK(IsCurrentParamOnStack());
FrameOffset result =
- FrameOffset(displacement_.Int32Value() + // displacement
+ FrameOffset(displacement_.Int32Value() + // displacement
kFramePointerSize + // Method*
- (itr_slots_ * kFramePointerSize)); // offset into in args
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
return result;
}
const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM64 to free them up for scratch use, we then assume
// all arguments are on the stack.
- if (entry_spills_.size() == 0) {
- // TODO Need fp regs spilled too.
- //
- size_t num_spills = NumArgs();
+ if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+ int gp_reg_index = 1; // we start from X1/W1, X0 holds ArtMethod*.
+ int fp_reg_index = 0; // D0/S0.
- // TODO Floating point need spilling too.
- if (num_spills > 0) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X1));
- if (num_spills > 1) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X2));
- if (num_spills > 2) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X3));
- if (num_spills > 3) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X5));
- if (num_spills > 4) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X6));
- if (num_spills > 5) {
- entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X7));
- }
+ // We need to choose the correct register (D/S or X/W) since the managed
+ // stack uses 32bit stack slots.
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ if (IsCurrentParamAFloatOrDouble()) { // FP regs.
+ if (fp_reg_index < 8) {
+ if (!IsCurrentParamADouble()) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[fp_reg_index]));
+ } else {
+ entry_spills_.push_back(Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[fp_reg_index]));
}
+ fp_reg_index++;
+ } else { // just increase the stack offset.
+ if (!IsCurrentParamADouble()) {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
+ }
+ }
+ } else { // GP regs.
+ if (gp_reg_index < 8) {
+ if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gp_reg_index]));
+ } else {
+ entry_spills_.push_back(Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg_index]));
+ }
+ gp_reg_index++;
+ } else { // just increase the stack offset.
+ if (IsCurrentParamALong() && (!IsCurrentParamAReference())) {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
+ } else {
+ entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
}
}
}
+ Next();
}
}
-
return entry_spills_;
}
-// JNI calling convention
+// JNI calling convention
Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
: JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
- // TODO This needs to be converted to 64bit.
- // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
- // or jclass for static methods and the JNIEnv. We start at the aligned register r2.
-// size_t padding = 0;
-// for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
-// if (IsParamALongOrDouble(cur_arg)) {
-// if ((cur_reg & 1) != 0) {
-// padding += 4;
-// cur_reg++; // additional bump to ensure alignment
-// }
-// cur_reg++; // additional bump to skip extra long word
-// }
-// cur_reg++; // bump the iterator for every argument
-// }
- padding_ =0;
-
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X19));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X20));
callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X21));
@@ -162,18 +177,26 @@
uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
- result = 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 | 1 << X25
- | 1 << X26 | 1 << X27 | 1 << X28 | 1<< X29 | 1 << LR;
+ result = 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 |
+ 1 << X25 | 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
+ return result;
+}
+
+uint32_t Arm64JniCallingConvention::FpSpillMask() const {
+ // Compute spill mask to agree with callee saves initialized in the constructor
+ uint32_t result = 0;
+ result = 1 << D8 | 1 << D9 | 1 << D10 | 1 << D11 | 1 << D12 | 1 << D13 |
+ 1 << D14 | 1 << D15;
return result;
}
ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
- return Arm64ManagedRegister::FromCoreRegister(X9);
+ return ManagedRegister::NoRegister();
}
size_t Arm64JniCallingConvention::FrameSize() {
- // Method*, LR and callee save area size, local reference segment state
- size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kFramePointerSize;
+ // Method*, callee save area size, local reference segment state
+ size_t frame_data_size = ((1 + CalleeSaveRegisters().size()) * kFramePointerSize) + sizeof(uint32_t);
// References plus 2 words for SIRT header
size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSizeTarget(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
@@ -181,64 +204,60 @@
}
size_t Arm64JniCallingConvention::OutArgSize() {
- return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_,
- kStackAlignment);
-}
-
-// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
-// in even register numbers and stack slots
-void Arm64JniCallingConvention::Next() {
- JniCallingConvention::Next();
- size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
- if ((itr_args_ >= 2) &&
- (arg_pos < NumArgs()) &&
- IsParamALongOrDouble(arg_pos)) {
- // itr_slots_ needs to be an even number, according to AAPCS.
- if ((itr_slots_ & 0x1u) != 0) {
- itr_slots_++;
- }
- }
+ return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
}
bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
- return itr_slots_ < 4;
+ if (IsCurrentParamAFloatOrDouble()) {
+ return (itr_float_and_doubles_ < 8);
+ } else {
+ return ((itr_args_ - itr_float_and_doubles_) < 8);
+ }
}
bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
return !IsCurrentParamInRegister();
}
-// TODO and floating point?
-
-static const Register kJniArgumentRegisters[] = {
- X0, X1, X2, X3, X4, X5, X6, X7
-};
ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
- CHECK_LT(itr_slots_, 4u);
- int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
- // TODO Floating point & 64bit registers.
- if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
- CHECK_EQ(itr_slots_, 2u);
- return Arm64ManagedRegister::FromCoreRegister(X1);
+ CHECK(IsCurrentParamInRegister());
+ if (IsCurrentParamAFloatOrDouble()) {
+ CHECK_LT(itr_float_and_doubles_, 8u);
+ if (IsCurrentParamADouble()) {
+ return Arm64ManagedRegister::FromDRegister(kDArgumentRegisters[itr_float_and_doubles_]);
+ } else {
+ return Arm64ManagedRegister::FromSRegister(kSArgumentRegisters[itr_float_and_doubles_]);
+ }
} else {
- return
- Arm64ManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+ int gp_reg = itr_args_ - itr_float_and_doubles_;
+ CHECK_LT(static_cast<unsigned int>(gp_reg), 8u);
+ if (IsCurrentParamALong() || IsCurrentParamAReference() || IsCurrentParamJniEnv()) {
+ return Arm64ManagedRegister::FromCoreRegister(kCoreArgumentRegisters[gp_reg]);
+ } else {
+ return Arm64ManagedRegister::FromWRegister(kWArgumentRegisters[gp_reg]);
+ }
}
}
FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
- CHECK_GE(itr_slots_, 4u);
- size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kFramePointerSize);
+ CHECK(IsCurrentParamOnStack());
+ size_t args_on_stack = itr_args_
+ - std::min(8u, itr_float_and_doubles_)
+ - std::min(8u, (itr_args_ - itr_float_and_doubles_));
+ size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
CHECK_LT(offset, OutArgSize());
return FrameOffset(offset);
}
size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
- size_t static_args = IsStatic() ? 1 : 0; // count jclass
- // regular argument parameters and this
- size_t param_args = NumArgs() + NumLongOrDoubleArgs();
- // count JNIEnv* less arguments in registers
- return static_args + param_args + 1 - 4;
+ // all arguments including JNI args
+ size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+
+ size_t all_stack_args = all_args -
+ std::min(8u, static_cast<unsigned int>(NumFloatOrDoubleArgs())) -
+ std::min(8u, static_cast<unsigned int>((all_args - NumFloatOrDoubleArgs())));
+
+ return all_stack_args;
}
} // namespace arm64
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index c18cd2b..92f547c 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -55,7 +55,6 @@
ManagedRegister IntReturnRegister() OVERRIDE;
ManagedRegister InterproceduralScratchRegister() OVERRIDE;
// JNI calling convention
- void Next() OVERRIDE; // Override default behavior for AAPCS
size_t FrameSize() OVERRIDE;
size_t OutArgSize() OVERRIDE;
const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
@@ -63,9 +62,7 @@
}
ManagedRegister ReturnScratchRegister() const OVERRIDE;
uint32_t CoreSpillMask() const OVERRIDE;
- uint32_t FpSpillMask() const OVERRIDE {
- return 0; // Floats aren't spilled in JNI down call
- }
+ uint32_t FpSpillMask() const OVERRIDE;
bool IsCurrentParamInRegister() OVERRIDE;
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
@@ -78,9 +75,6 @@
// TODO: these values aren't unique and can be shared amongst instances
std::vector<ManagedRegister> callee_save_regs_;
- // Padding to ensure longs and doubles are not split in AAPCS
- size_t padding_;
-
DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
};
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 8efdcda..a99a4c2 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -90,6 +90,14 @@
return IsParamAFloatOrDouble(itr_args_);
}
+bool ManagedRuntimeCallingConvention::IsCurrentParamADouble() {
+ return IsParamADouble(itr_args_);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamALong() {
+ return IsParamALong(itr_args_);
+}
+
// JNI calling convention
JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
@@ -168,6 +176,10 @@
}
}
+bool JniCallingConvention::IsCurrentParamJniEnv() {
+ return (itr_args_ == kJniEnv);
+}
+
bool JniCallingConvention::IsCurrentParamAFloatOrDouble() {
switch (itr_args_) {
case kJniEnv:
@@ -181,6 +193,32 @@
}
}
+bool JniCallingConvention::IsCurrentParamADouble() {
+ switch (itr_args_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return false; // jobject or jclass
+ default: {
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ return IsParamADouble(arg_pos);
+ }
+ }
+}
+
+bool JniCallingConvention::IsCurrentParamALong() {
+ switch (itr_args_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return false; // jobject or jclass
+ default: {
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ return IsParamALong(arg_pos);
+ }
+ }
+}
+
// Return position of SIRT entry holding reference at the current iterator
// position
FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 76d237e..4d25d1c 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -126,6 +126,24 @@
char ch = shorty_[param];
return (ch == 'F' || ch == 'D');
}
+ bool IsParamADouble(unsigned int param) const {
+ DCHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return false; // this argument
+ }
+ return shorty_[param] == 'D';
+ }
+ bool IsParamALong(unsigned int param) const {
+ DCHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return true; // this argument
+ }
+ return shorty_[param] == 'J';
+ }
bool IsParamAReference(unsigned int param) const {
DCHECK_LT(param, NumArgs());
if (IsStatic()) {
@@ -214,6 +232,8 @@
void Next();
bool IsCurrentParamAReference();
bool IsCurrentParamAFloatOrDouble();
+ bool IsCurrentParamADouble();
+ bool IsCurrentParamALong();
bool IsCurrentArgExplicit(); // ie a non-implict argument such as this
bool IsCurrentArgPossiblyNull();
size_t CurrentParamSize();
@@ -283,6 +303,9 @@
virtual void Next();
bool IsCurrentParamAReference();
bool IsCurrentParamAFloatOrDouble();
+ bool IsCurrentParamADouble();
+ bool IsCurrentParamALong();
+ bool IsCurrentParamJniEnv();
size_t CurrentParamSize();
virtual bool IsCurrentParamInRegister() = 0;
virtual bool IsCurrentParamOnStack() = 0;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index dcdcdd1..64508d1 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -29,6 +29,7 @@
#include "utils/assembler.h"
#include "utils/managed_register.h"
#include "utils/arm/managed_register_arm.h"
+#include "utils/arm64/managed_register_arm64.h"
#include "utils/mips/managed_register_mips.h"
#include "utils/x86/managed_register_x86.h"
#include "thread.h"
@@ -73,11 +74,17 @@
// Calling conventions to call into JNI method "end" possibly passing a returned reference, the
// method and the current thread.
- size_t jni_end_arg_count = 0;
- if (reference_return) { jni_end_arg_count++; }
- if (is_synchronized) { jni_end_arg_count++; }
- const char* jni_end_shorty = jni_end_arg_count == 0 ? "I"
- : (jni_end_arg_count == 1 ? "II" : "III");
+ const char* jni_end_shorty;
+ if (reference_return && is_synchronized) {
+ jni_end_shorty = "ILL";
+ } else if (reference_return) {
+ jni_end_shorty = "IL";
+ } else if (is_synchronized) {
+ jni_end_shorty = "VL";
+ } else {
+ jni_end_shorty = "V";
+ }
+
UniquePtr<JniCallingConvention> end_jni_conv(
JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
@@ -101,12 +108,22 @@
__ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
main_jni_conv->ReferenceCount(),
mr_conv->InterproceduralScratchRegister());
- __ CopyRawPtrFromThread32(main_jni_conv->SirtLinkOffset(),
- Thread::TopSirtOffset<4>(),
- mr_conv->InterproceduralScratchRegister());
- __ StoreStackOffsetToThread32(Thread::TopSirtOffset<4>(),
- main_jni_conv->SirtOffset(),
- mr_conv->InterproceduralScratchRegister());
+
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ CopyRawPtrFromThread64(main_jni_conv->SirtLinkOffset(),
+ Thread::TopSirtOffset<8>(),
+ mr_conv->InterproceduralScratchRegister());
+ __ StoreStackOffsetToThread64(Thread::TopSirtOffset<8>(),
+ main_jni_conv->SirtOffset(),
+ mr_conv->InterproceduralScratchRegister());
+ } else {
+ __ CopyRawPtrFromThread32(main_jni_conv->SirtLinkOffset(),
+ Thread::TopSirtOffset<4>(),
+ mr_conv->InterproceduralScratchRegister());
+ __ StoreStackOffsetToThread32(Thread::TopSirtOffset<4>(),
+ main_jni_conv->SirtOffset(),
+ mr_conv->InterproceduralScratchRegister());
+ }
// 3. Place incoming reference arguments into SIRT
main_jni_conv->Next(); // Skip JNIEnv*
@@ -154,9 +171,15 @@
}
// 4. Write out the end of the quick frames.
- __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
- __ StoreImmediateToThread32(Thread::TopOfManagedStackPcOffset<4>(), 0,
- mr_conv->InterproceduralScratchRegister());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ StoreStackPointerToThread64(Thread::TopOfManagedStackOffset<8>());
+ __ StoreImmediateToThread64(Thread::TopOfManagedStackPcOffset<8>(), 0,
+ mr_conv->InterproceduralScratchRegister());
+ } else {
+ __ StoreStackPointerToThread32(Thread::TopOfManagedStackOffset<4>());
+ __ StoreImmediateToThread32(Thread::TopOfManagedStackPcOffset<4>(), 0,
+ mr_conv->InterproceduralScratchRegister());
+ }
// 5. Move frame down to allow space for out going args.
const size_t main_out_arg_size = main_jni_conv->OutArgSize();
@@ -164,13 +187,14 @@
const size_t max_out_arg_size = std::max(main_out_arg_size, end_out_arg_size);
__ IncreaseFrameSize(max_out_arg_size);
-
// 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- ThreadOffset<4> jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
+ ThreadOffset<4> jni_start32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodStart);
+ ThreadOffset<8> jni_start64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_sirt_offset(0);
if (is_synchronized) {
@@ -192,12 +216,21 @@
}
if (main_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(main_jni_conv->CurrentParamRegister());
- __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start),
- main_jni_conv->InterproceduralScratchRegister());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start64),
+ main_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start32),
+ main_jni_conv->InterproceduralScratchRegister());
+ }
} else {
__ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
main_jni_conv->InterproceduralScratchRegister());
- __ CallFromThread32(jni_start, main_jni_conv->InterproceduralScratchRegister());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ CallFromThread64(jni_start64, main_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ CallFromThread32(jni_start32, main_jni_conv->InterproceduralScratchRegister());
+ }
}
if (is_synchronized) { // Check for exceptions from monitor enter.
__ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
@@ -259,11 +292,20 @@
if (main_jni_conv->IsCurrentParamInRegister()) {
ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
- __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ LoadRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>());
+ } else {
+ __ LoadRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>());
+ }
} else {
FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
- __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ CopyRawPtrFromThread64(jni_env, Thread::JniEnvOffset<8>(),
main_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ CopyRawPtrFromThread32(jni_env, Thread::JniEnvOffset<4>(),
+ main_jni_conv->InterproceduralScratchRegister());
+ }
}
// 9. Plant call to native code associated with method.
@@ -295,19 +337,23 @@
__ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
}
- // 12. Call into JNI method end possibly passing a returned reference, the method and the current
// thread.
end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
- ThreadOffset<4> jni_end(-1);
+ ThreadOffset<4> jni_end32(-1);
+ ThreadOffset<8> jni_end64(-1);
if (reference_return) {
// Pass result.
- jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
+ jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndWithReference);
+ jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
- : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
+ jni_end32 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(4, pJniMethodEnd);
+ jni_end64 = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(8, pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
@@ -334,12 +380,21 @@
}
if (end_jni_conv->IsCurrentParamInRegister()) {
__ GetCurrentThread(end_jni_conv->CurrentParamRegister());
- __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end),
- end_jni_conv->InterproceduralScratchRegister());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end64),
+ end_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end32),
+ end_jni_conv->InterproceduralScratchRegister());
+ }
} else {
__ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
end_jni_conv->InterproceduralScratchRegister());
- __ CallFromThread32(ThreadOffset<4>(jni_end), end_jni_conv->InterproceduralScratchRegister());
+ if (instruction_set == kArm64 || instruction_set == kX86_64) {
+ __ CallFromThread64(ThreadOffset<8>(jni_end64), end_jni_conv->InterproceduralScratchRegister());
+ } else {
+ __ CallFromThread32(ThreadOffset<4>(jni_end32), end_jni_conv->InterproceduralScratchRegister());
+ }
}
// 13. Reload return value
@@ -360,6 +415,10 @@
// 17. Finalize code generation
__ EmitSlowPaths();
size_t cs = __ CodeSize();
+ if (instruction_set == kArm64) {
+ // Test that we do not exceed the buffer size.
+ CHECK(cs < arm64::kBufferSizeArm64);
+ }
std::vector<uint8_t> managed_code(cs);
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);