x86_64: JNI compiler
Passed all tests from jni_compiler_test and art/test on host with jni_copiler.
Incoming argument spill is enabled, entry_spills refactored. Now each entry spill
contains data type size (4 or 8) and offset which should be used for spill.
Assembler REX support implemented in opcodes used in JNI compiler.
Please note, JNI compiler is not enabled by default yet (see compiler_driver.cc:1875).
Change-Id: I5fd19cca72122b197aec07c3708b1e80c324be44
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 78f403c..28b438e 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index fc2d857..96bbb7e 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -36,10 +36,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index c4d0d45..ff899b7 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM64 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 2dcf1af..7e33830 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -36,10 +36,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 5856df4..043bcea 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -21,6 +21,7 @@
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
#include "jni/quick/x86/calling_convention_x86.h"
+#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
namespace art {
@@ -44,6 +45,8 @@
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -61,6 +64,9 @@
itr_longs_and_doubles_++;
itr_slots_++;
}
+ if (IsParamAFloatOrDouble(itr_args_)) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -85,6 +91,10 @@
return IsParamAReference(itr_args_);
}
+bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() {
+ return IsParamAFloatOrDouble(itr_args_);
+}
+
// JNI calling convention
JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
@@ -100,6 +110,8 @@
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -111,9 +123,8 @@
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t start_of_sirt = SirtNumRefsOffset().Int32Value() + kPointerSize;
- size_t references_size = kPointerSize * ReferenceCount(); // size excluding header
- return FrameOffset(start_of_sirt + references_size);
+ size_t references_size = kSirtPointerSize * ReferenceCount(); // size excluding header
+ return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -139,6 +150,9 @@
itr_slots_++;
}
}
+ if (IsCurrentParamAFloatOrDouble()) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -159,14 +173,25 @@
}
}
+bool JniCallingConvention::IsCurrentParamAFloatOrDouble() {
+ switch (itr_args_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return false; // jobject or jclass
+ default: {
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ return IsParamAFloatOrDouble(arg_pos);
+ }
+ }
+}
+
// Return position of SIRT entry holding reference at the current iterator
// position
FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
CHECK(IsCurrentParamAReference());
CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- // Address of 1st SIRT entry
- int result = SirtNumRefsOffset().Int32Value() + kPointerSize;
- result += itr_refs_ * kPointerSize;
+ int result = SirtReferencesOffset().Int32Value() + itr_refs_ * kSirtPointerSize;
CHECK_GT(result, SirtNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index f2b7fd9..fe3d1cd 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -60,23 +60,35 @@
itr_args_ = 0;
itr_refs_ = 0;
itr_longs_and_doubles_ = 0;
+ itr_float_and_doubles_ = 0;
}
virtual ~CallingConvention() {}
protected:
CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+ : displacement_(0), kSirtPointerSize(sizeof(StackReference<mirror::Object>)), is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer.
+ num_float_or_double_args_ = 0;
num_long_or_double_args_ = 0;
for (size_t i = 1; i < strlen(shorty); i++) {
char ch = shorty_[i];
- if (ch == 'L') {
+ switch (ch) {
+ case 'L':
num_ref_args_++;
- } else if ((ch == 'D') || (ch == 'J')) {
+ break;
+ case 'J':
num_long_or_double_args_++;
+ break;
+ case 'D':
+ num_long_or_double_args_++;
+ num_float_or_double_args_++;
+ break;
+ case 'F':
+ num_float_or_double_args_++;
+ break;
}
}
}
@@ -97,6 +109,16 @@
char ch = shorty_[param];
return (ch == 'J' || ch == 'D');
}
+ bool IsParamAFloatOrDouble(unsigned int param) const {
+ DCHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return false; // this argument
+ }
+ char ch = shorty_[param];
+ return (ch == 'F' || ch == 'D');
+ }
bool IsParamAReference(unsigned int param) const {
DCHECK_LT(param, NumArgs());
if (IsStatic()) {
@@ -112,6 +134,9 @@
size_t NumLongOrDoubleArgs() const {
return num_long_or_double_args_;
}
+ size_t NumFloatOrDoubleArgs() const {
+ return num_float_or_double_args_;
+ }
size_t NumReferenceArgs() const {
return num_ref_args_;
}
@@ -141,8 +166,11 @@
unsigned int itr_args_;
// Number of longs and doubles seen along argument list
unsigned int itr_longs_and_doubles_;
+ // Number of float and doubles seen along argument list
+ unsigned int itr_float_and_doubles_;
// Space for frames below this on the stack
FrameOffset displacement_;
+ size_t kSirtPointerSize;
private:
const bool is_static_;
@@ -150,6 +178,7 @@
std::string shorty_;
size_t num_args_;
size_t num_ref_args_;
+ size_t num_float_or_double_args_;
size_t num_long_or_double_args_;
};
@@ -174,6 +203,7 @@
bool HasNext();
void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
bool IsCurrentArgExplicit(); // ie a non-implict argument such as this
bool IsCurrentArgPossiblyNull();
size_t CurrentParamSize();
@@ -185,7 +215,7 @@
virtual ~ManagedRuntimeCallingConvention() {}
// Registers to spill to caller's out registers on entry.
- virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+ virtual const ManagedRegisterEntrySpills& EntrySpills() = 0;
protected:
ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -241,6 +271,7 @@
bool HasNext();
virtual void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
size_t CurrentParamSize();
virtual bool IsCurrentParamInRegister() = 0;
virtual bool IsCurrentParamOnStack() = 0;
@@ -255,13 +286,21 @@
return FrameOffset(displacement_.Int32Value() +
kPointerSize); // above Method*
}
+
+ FrameOffset SirtLinkOffset() const {
+ return FrameOffset(SirtOffset().Int32Value() +
+ StackIndirectReferenceTable::LinkOffset());
+ }
+
FrameOffset SirtNumRefsOffset() const {
return FrameOffset(SirtOffset().Int32Value() +
StackIndirectReferenceTable::NumberOfReferencesOffset());
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset());
+
+ FrameOffset SirtReferencesOffset() const {
+ // The StackIndirectReferenceTable::number_of_references_ type is uint32_t
+ return FrameOffset(SirtNumRefsOffset().Int32Value() +
+ sizeof(uint32_t));
}
virtual ~JniCallingConvention() {}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 1c9aed8..c89bc40 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -271,7 +271,7 @@
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
- if (instruction_set == kX86) {
+ if (instruction_set == kX86 || instruction_set == kX86_64) {
if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
__ SignExtend(main_jni_conv->ReturnRegister(),
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 0a48500..ea39d60 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& MipsManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on MIPS to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 445f453..1a9053a 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -35,10 +35,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 8b5c86d..8d22fe6 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -90,7 +90,7 @@
(itr_slots_ * kPointerSize)); // offset into in args
}
-const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on X86 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index e814c7e..2dab059 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -37,9 +37,9 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
new file mode 100644
index 0000000..8ebea46
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86_64.h"
+
+#include "base/logging.h"
+#include "utils/x86_64/managed_register_x86_64.h"
+#include "utils.h"
+
+namespace art {
+namespace x86_64 {
+
+// Calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
+ return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return X86_64ManagedRegister::FromXmmRegister(_XMM0);
+ } else if (shorty[0] == 'J') {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ } else if (shorty[0] == 'V') {
+ return ManagedRegister::NoRegister();
+ } else {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ }
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RDI);
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
+ return true;
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ return FrameOffset(displacement_.Int32Value() + // displacement
+ kPointerSize + // Method*
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+}
+
+const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on X86 to free them up for scratch use, we then assume
+ // all arguments are on the stack.
+ if (entry_spills_.size() == 0) {
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ ManagedRegister in_reg = CurrentParamRegister();
+ if (!in_reg.IsNoRegister()) {
+ int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4;
+ int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
+ ManagedRegisterSpill spill(in_reg, size, spill_offset);
+ entry_spills_.push_back(spill);
+ }
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty) {
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBX));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBP));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R12));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R13));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R14));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R15));
+}
+
+uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
+ return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | 1 << R13 | 1 << kNumberOfCpuRegisters;
+}
+
+size_t X86_64JniCallingConvention::FrameSize() {
+ // Method*, return address and callee save area size, local reference segment state
+ size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
+ size_t sirt_size = kPointerSize + sizeof(uint32_t) + ReferenceCount()*kSirtPointerSize;
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86_64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
+ return CurrentParamRegister().IsNoRegister();
+}
+
+ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
+ size_t offset = itr_args_
+ - std::min(8U, itr_float_and_doubles_) // Float arguments passed through Xmm0..Xmm7
+ - std::min(6U, itr_args_ - itr_float_and_doubles_); // Integer arguments passed through GPR
+ return FrameOffset(displacement_.Int32Value() - OutArgSize() + (offset * kPointerSize));
+}
+
+size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ size_t static_args = IsStatic() ? 1 : 0; // count jclass
+ // regular argument parameters and this
+ size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+ // count JNIEnv* and return pc (pushed after Method*)
+ size_t total_args = static_args + param_args + 2;
+
+ // Float arguments passed through Xmm0..Xmm7
+ // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
+ size_t total_stack_args = total_args
+ - std::min(8U, static_cast<unsigned int>(NumFloatOrDoubleArgs()))
+ - std::min(6U, static_cast<unsigned int>(NumArgs() - NumFloatOrDoubleArgs()));
+
+ return total_stack_args;
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
new file mode 100644
index 0000000..d7f7762
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ explicit X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+ ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+ DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
+};
+
+class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~X86_64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0;
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_