summaryrefslogtreecommitdiff
path: root/compiler/jni/quick
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/jni/quick')
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.cc2
-rw-r--r--compiler/jni/quick/arm/calling_convention_arm.h4
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc2
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.h4
-rw-r--r--compiler/jni/quick/calling_convention.cc37
-rw-r--r--compiler/jni/quick/calling_convention.h53
-rw-r--r--compiler/jni/quick/jni_compiler.cc2
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.cc2
-rw-r--r--compiler/jni/quick/mips/calling_convention_mips.h4
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc2
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.h4
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc203
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.h83
13 files changed, 376 insertions, 26 deletions
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 78f403c2e8..28b438e198 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -85,7 +85,7 @@ FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return result;
}
-const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index fc2d8570fe..96bbb7e94f 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -36,10 +36,10 @@ class ArmManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index c4d0d451c0..ff899b75e9 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -85,7 +85,7 @@ FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return result;
}
-const std::vector<ManagedRegister>& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM64 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 2dcf1af71c..7e3383065d 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -36,10 +36,10 @@ class Arm64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingC
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 5856df4bc1..043bceae17 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -21,6 +21,7 @@
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
#include "jni/quick/x86/calling_convention_x86.h"
+#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
namespace art {
@@ -44,6 +45,8 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -61,6 +64,9 @@ void ManagedRuntimeCallingConvention::Next() {
itr_longs_and_doubles_++;
itr_slots_++;
}
+ if (IsParamAFloatOrDouble(itr_args_)) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -85,6 +91,10 @@ bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() {
return IsParamAReference(itr_args_);
}
+bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() {
+ return IsParamAFloatOrDouble(itr_args_);
+}
+
// JNI calling convention
JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
@@ -100,6 +110,8 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -111,9 +123,8 @@ size_t JniCallingConvention::ReferenceCount() const {
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t start_of_sirt = SirtNumRefsOffset().Int32Value() + kPointerSize;
- size_t references_size = kPointerSize * ReferenceCount(); // size excluding header
- return FrameOffset(start_of_sirt + references_size);
+ size_t references_size = kSirtPointerSize * ReferenceCount(); // size excluding header
+ return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -139,6 +150,9 @@ void JniCallingConvention::Next() {
itr_slots_++;
}
}
+ if (IsCurrentParamAFloatOrDouble()) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -159,14 +173,25 @@ bool JniCallingConvention::IsCurrentParamAReference() {
}
}
+bool JniCallingConvention::IsCurrentParamAFloatOrDouble() {
+ switch (itr_args_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return false; // jobject or jclass
+ default: {
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ return IsParamAFloatOrDouble(arg_pos);
+ }
+ }
+}
+
// Return position of SIRT entry holding reference at the current iterator
// position
FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
CHECK(IsCurrentParamAReference());
CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- // Address of 1st SIRT entry
- int result = SirtNumRefsOffset().Int32Value() + kPointerSize;
- result += itr_refs_ * kPointerSize;
+ int result = SirtReferencesOffset().Int32Value() + itr_refs_ * kSirtPointerSize;
CHECK_GT(result, SirtNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index f2b7fd9a4a..fe3d1cd551 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -60,23 +60,35 @@ class CallingConvention {
itr_args_ = 0;
itr_refs_ = 0;
itr_longs_and_doubles_ = 0;
+ itr_float_and_doubles_ = 0;
}
virtual ~CallingConvention() {}
protected:
CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+ : displacement_(0), kSirtPointerSize(sizeof(StackReference<mirror::Object>)), is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer.
+ num_float_or_double_args_ = 0;
num_long_or_double_args_ = 0;
for (size_t i = 1; i < strlen(shorty); i++) {
char ch = shorty_[i];
- if (ch == 'L') {
+ switch (ch) {
+ case 'L':
num_ref_args_++;
- } else if ((ch == 'D') || (ch == 'J')) {
+ break;
+ case 'J':
num_long_or_double_args_++;
+ break;
+ case 'D':
+ num_long_or_double_args_++;
+ num_float_or_double_args_++;
+ break;
+ case 'F':
+ num_float_or_double_args_++;
+ break;
}
}
}
@@ -97,6 +109,16 @@ class CallingConvention {
char ch = shorty_[param];
return (ch == 'J' || ch == 'D');
}
+ bool IsParamAFloatOrDouble(unsigned int param) const {
+ DCHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return false; // this argument
+ }
+ char ch = shorty_[param];
+ return (ch == 'F' || ch == 'D');
+ }
bool IsParamAReference(unsigned int param) const {
DCHECK_LT(param, NumArgs());
if (IsStatic()) {
@@ -112,6 +134,9 @@ class CallingConvention {
size_t NumLongOrDoubleArgs() const {
return num_long_or_double_args_;
}
+ size_t NumFloatOrDoubleArgs() const {
+ return num_float_or_double_args_;
+ }
size_t NumReferenceArgs() const {
return num_ref_args_;
}
@@ -141,8 +166,11 @@ class CallingConvention {
unsigned int itr_args_;
// Number of longs and doubles seen along argument list
unsigned int itr_longs_and_doubles_;
+ // Number of float and doubles seen along argument list
+ unsigned int itr_float_and_doubles_;
// Space for frames below this on the stack
FrameOffset displacement_;
+ size_t kSirtPointerSize;
private:
const bool is_static_;
@@ -150,6 +178,7 @@ class CallingConvention {
std::string shorty_;
size_t num_args_;
size_t num_ref_args_;
+ size_t num_float_or_double_args_;
size_t num_long_or_double_args_;
};
@@ -174,6 +203,7 @@ class ManagedRuntimeCallingConvention : public CallingConvention {
bool HasNext();
void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
bool IsCurrentArgExplicit(); // ie a non-implict argument such as this
bool IsCurrentArgPossiblyNull();
size_t CurrentParamSize();
@@ -185,7 +215,7 @@ class ManagedRuntimeCallingConvention : public CallingConvention {
virtual ~ManagedRuntimeCallingConvention() {}
// Registers to spill to caller's out registers on entry.
- virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+ virtual const ManagedRegisterEntrySpills& EntrySpills() = 0;
protected:
ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -241,6 +271,7 @@ class JniCallingConvention : public CallingConvention {
bool HasNext();
virtual void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
size_t CurrentParamSize();
virtual bool IsCurrentParamInRegister() = 0;
virtual bool IsCurrentParamOnStack() = 0;
@@ -255,13 +286,21 @@ class JniCallingConvention : public CallingConvention {
return FrameOffset(displacement_.Int32Value() +
kPointerSize); // above Method*
}
+
+ FrameOffset SirtLinkOffset() const {
+ return FrameOffset(SirtOffset().Int32Value() +
+ StackIndirectReferenceTable::LinkOffset());
+ }
+
FrameOffset SirtNumRefsOffset() const {
return FrameOffset(SirtOffset().Int32Value() +
StackIndirectReferenceTable::NumberOfReferencesOffset());
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset());
+
+ FrameOffset SirtReferencesOffset() const {
+ // The StackIndirectReferenceTable::number_of_references_ type is uint32_t
+ return FrameOffset(SirtNumRefsOffset().Int32Value() +
+ sizeof(uint32_t));
}
virtual ~JniCallingConvention() {}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 1c9aed83c3..c89bc40fda 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -271,7 +271,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
- if (instruction_set == kX86) {
+ if (instruction_set == kX86 || instruction_set == kX86_64) {
if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
__ SignExtend(main_jni_conv->ReturnRegister(),
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 0a48500380..ea39d6009c 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -85,7 +85,7 @@ FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
return result;
}
-const std::vector<ManagedRegister>& MipsManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on MIPS to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 445f453943..1a9053a9dd 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -35,10 +35,10 @@ class MipsManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCo
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 8b5c86d683..8d22fe6632 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -90,7 +90,7 @@ FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
(itr_slots_ * kPointerSize)); // offset into in args
}
-const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on X86 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index e814c7e531..2dab059919 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -37,9 +37,9 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
new file mode 100644
index 0000000000..8ebea4630e
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86_64.h"
+
+#include "base/logging.h"
+#include "utils/x86_64/managed_register_x86_64.h"
+#include "utils.h"
+
+namespace art {
+namespace x86_64 {
+
+// Calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
+ return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return X86_64ManagedRegister::FromXmmRegister(_XMM0);
+ } else if (shorty[0] == 'J') {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ } else if (shorty[0] == 'V') {
+ return ManagedRegister::NoRegister();
+ } else {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ }
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RDI);
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
+ return true;
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ return FrameOffset(displacement_.Int32Value() + // displacement
+ kPointerSize + // Method*
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+}
+
+const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on X86 to free them up for scratch use, we then assume
+ // all arguments are on the stack.
+ if (entry_spills_.size() == 0) {
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ ManagedRegister in_reg = CurrentParamRegister();
+ if (!in_reg.IsNoRegister()) {
+ int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4;
+ int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
+ ManagedRegisterSpill spill(in_reg, size, spill_offset);
+ entry_spills_.push_back(spill);
+ }
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty) {
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBX));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBP));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R12));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R13));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R14));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R15));
+}
+
+uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
+ return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | 1 << R13 | 1 << kNumberOfCpuRegisters;
+}
+
+size_t X86_64JniCallingConvention::FrameSize() {
+ // Method*, return address and callee save area size, local reference segment state
+ size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
+ size_t sirt_size = kPointerSize + sizeof(uint32_t) + ReferenceCount()*kSirtPointerSize;
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86_64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
+ return CurrentParamRegister().IsNoRegister();
+}
+
+ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
+ size_t offset = itr_args_
+ - std::min(8U, itr_float_and_doubles_) // Float arguments passed through Xmm0..Xmm7
+ - std::min(6U, itr_args_ - itr_float_and_doubles_); // Integer arguments passed through GPR
+ return FrameOffset(displacement_.Int32Value() - OutArgSize() + (offset * kPointerSize));
+}
+
+size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ size_t static_args = IsStatic() ? 1 : 0; // count jclass
+ // regular argument parameters and this
+ size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+ // count JNIEnv* and return pc (pushed after Method*)
+ size_t total_args = static_args + param_args + 2;
+
+ // Float arguments passed through Xmm0..Xmm7
+ // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
+ size_t total_stack_args = total_args
+ - std::min(8U, static_cast<unsigned int>(NumFloatOrDoubleArgs()))
+ - std::min(6U, static_cast<unsigned int>(NumArgs() - NumFloatOrDoubleArgs()));
+
+ return total_stack_args;
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
new file mode 100644
index 0000000000..d7f77626c3
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ explicit X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+ ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+ DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
+};
+
+class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~X86_64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0;
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_