x86_64: JNI compiler
Passed all tests from jni_compiler_test and art/test on host with jni_copiler.
Incoming argument spill is enabled, entry_spills refactored. Now each entry spill
contains data type size (4 or 8) and offset which should be used for spill.
Assembler REX support implemented in opcodes used in JNI compiler.
Please note, JNI compiler is not enabled by default yet (see compiler_driver.cc:1875).
Change-Id: I5fd19cca72122b197aec07c3708b1e80c324be44
Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 4eb9ff5..b17cd52 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -69,6 +69,7 @@
jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
jni/quick/x86/calling_convention_x86.cc \
+ jni/quick/x86_64/calling_convention_x86_64.cc \
jni/quick/calling_convention.cc \
jni/quick/jni_compiler.cc \
optimizing/builder.cc \
@@ -89,6 +90,8 @@
utils/mips/managed_register_mips.cc \
utils/x86/assembler_x86.cc \
utils/x86/managed_register_x86.cc \
+ utils/x86_64/assembler_x86_64.cc \
+ utils/x86_64/managed_register_x86_64.cc \
utils/scoped_arena_allocator.cc \
buffered_output_stream.cc \
compilers.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 344f3ef..8e013c1 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -105,6 +105,7 @@
case kArm64:
case kMips:
case kX86:
+ case kX86_64:
return 0;
case kThumb2: {
// +1 to set the low-order bit so a BLX will switch to Thumb mode
@@ -123,6 +124,7 @@
case kArm64:
case kMips:
case kX86:
+ case kX86_64:
return code_pointer;
case kThumb2: {
uintptr_t address = reinterpret_cast<uintptr_t>(code_pointer);
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 78f403c..28b438e 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index fc2d857..96bbb7e 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -36,10 +36,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index c4d0d45..ff899b7 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on ARM64 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index 2dcf1af..7e33830 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -36,10 +36,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 5856df4..043bcea 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -21,6 +21,7 @@
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
#include "jni/quick/x86/calling_convention_x86.h"
+#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
namespace art {
@@ -44,6 +45,8 @@
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -61,6 +64,9 @@
itr_longs_and_doubles_++;
itr_slots_++;
}
+ if (IsParamAFloatOrDouble(itr_args_)) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -85,6 +91,10 @@
return IsParamAReference(itr_args_);
}
+bool ManagedRuntimeCallingConvention::IsCurrentParamAFloatOrDouble() {
+ return IsParamAFloatOrDouble(itr_args_);
+}
+
// JNI calling convention
JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
@@ -100,6 +110,8 @@
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+ case kX86_64:
+ return new x86_64::X86_64JniCallingConvention(is_static, is_synchronized, shorty);
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
@@ -111,9 +123,8 @@
}
FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
- size_t start_of_sirt = SirtNumRefsOffset().Int32Value() + kPointerSize;
- size_t references_size = kPointerSize * ReferenceCount(); // size excluding header
- return FrameOffset(start_of_sirt + references_size);
+ size_t references_size = kSirtPointerSize * ReferenceCount(); // size excluding header
+ return FrameOffset(SirtReferencesOffset().Int32Value() + references_size);
}
FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
@@ -139,6 +150,9 @@
itr_slots_++;
}
}
+ if (IsCurrentParamAFloatOrDouble()) {
+ itr_float_and_doubles_++;
+ }
if (IsCurrentParamAReference()) {
itr_refs_++;
}
@@ -159,14 +173,25 @@
}
}
+bool JniCallingConvention::IsCurrentParamAFloatOrDouble() {
+ switch (itr_args_) {
+ case kJniEnv:
+ return false; // JNIEnv*
+ case kObjectOrClass:
+ return false; // jobject or jclass
+ default: {
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ return IsParamAFloatOrDouble(arg_pos);
+ }
+ }
+}
+
// Return position of SIRT entry holding reference at the current iterator
// position
FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
CHECK(IsCurrentParamAReference());
CHECK_LT(SirtLinkOffset(), SirtNumRefsOffset());
- // Address of 1st SIRT entry
- int result = SirtNumRefsOffset().Int32Value() + kPointerSize;
- result += itr_refs_ * kPointerSize;
+ int result = SirtReferencesOffset().Int32Value() + itr_refs_ * kSirtPointerSize;
CHECK_GT(result, SirtNumRefsOffset().Int32Value());
return FrameOffset(result);
}
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index f2b7fd9..fe3d1cd 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -60,23 +60,35 @@
itr_args_ = 0;
itr_refs_ = 0;
itr_longs_and_doubles_ = 0;
+ itr_float_and_doubles_ = 0;
}
virtual ~CallingConvention() {}
protected:
CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
- : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+ : displacement_(0), kSirtPointerSize(sizeof(StackReference<mirror::Object>)), is_static_(is_static), is_synchronized_(is_synchronized),
shorty_(shorty) {
num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
num_ref_args_ = is_static ? 0 : 1; // The implicit this pointer.
+ num_float_or_double_args_ = 0;
num_long_or_double_args_ = 0;
for (size_t i = 1; i < strlen(shorty); i++) {
char ch = shorty_[i];
- if (ch == 'L') {
+ switch (ch) {
+ case 'L':
num_ref_args_++;
- } else if ((ch == 'D') || (ch == 'J')) {
+ break;
+ case 'J':
num_long_or_double_args_++;
+ break;
+ case 'D':
+ num_long_or_double_args_++;
+ num_float_or_double_args_++;
+ break;
+ case 'F':
+ num_float_or_double_args_++;
+ break;
}
}
}
@@ -97,6 +109,16 @@
char ch = shorty_[param];
return (ch == 'J' || ch == 'D');
}
+ bool IsParamAFloatOrDouble(unsigned int param) const {
+ DCHECK_LT(param, NumArgs());
+ if (IsStatic()) {
+ param++; // 0th argument must skip return value at start of the shorty
+ } else if (param == 0) {
+ return false; // this argument
+ }
+ char ch = shorty_[param];
+ return (ch == 'F' || ch == 'D');
+ }
bool IsParamAReference(unsigned int param) const {
DCHECK_LT(param, NumArgs());
if (IsStatic()) {
@@ -112,6 +134,9 @@
size_t NumLongOrDoubleArgs() const {
return num_long_or_double_args_;
}
+ size_t NumFloatOrDoubleArgs() const {
+ return num_float_or_double_args_;
+ }
size_t NumReferenceArgs() const {
return num_ref_args_;
}
@@ -141,8 +166,11 @@
unsigned int itr_args_;
// Number of longs and doubles seen along argument list
unsigned int itr_longs_and_doubles_;
+ // Number of float and doubles seen along argument list
+ unsigned int itr_float_and_doubles_;
// Space for frames below this on the stack
FrameOffset displacement_;
+ size_t kSirtPointerSize;
private:
const bool is_static_;
@@ -150,6 +178,7 @@
std::string shorty_;
size_t num_args_;
size_t num_ref_args_;
+ size_t num_float_or_double_args_;
size_t num_long_or_double_args_;
};
@@ -174,6 +203,7 @@
bool HasNext();
void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
bool IsCurrentArgExplicit(); // ie a non-implict argument such as this
bool IsCurrentArgPossiblyNull();
size_t CurrentParamSize();
@@ -185,7 +215,7 @@
virtual ~ManagedRuntimeCallingConvention() {}
// Registers to spill to caller's out registers on entry.
- virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+ virtual const ManagedRegisterEntrySpills& EntrySpills() = 0;
protected:
ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -241,6 +271,7 @@
bool HasNext();
virtual void Next();
bool IsCurrentParamAReference();
+ bool IsCurrentParamAFloatOrDouble();
size_t CurrentParamSize();
virtual bool IsCurrentParamInRegister() = 0;
virtual bool IsCurrentParamOnStack() = 0;
@@ -255,13 +286,21 @@
return FrameOffset(displacement_.Int32Value() +
kPointerSize); // above Method*
}
+
+ FrameOffset SirtLinkOffset() const {
+ return FrameOffset(SirtOffset().Int32Value() +
+ StackIndirectReferenceTable::LinkOffset());
+ }
+
FrameOffset SirtNumRefsOffset() const {
return FrameOffset(SirtOffset().Int32Value() +
StackIndirectReferenceTable::NumberOfReferencesOffset());
}
- FrameOffset SirtLinkOffset() const {
- return FrameOffset(SirtOffset().Int32Value() +
- StackIndirectReferenceTable::LinkOffset());
+
+ FrameOffset SirtReferencesOffset() const {
+ // The StackIndirectReferenceTable::number_of_references_ type is uint32_t
+ return FrameOffset(SirtNumRefsOffset().Int32Value() +
+ sizeof(uint32_t));
}
virtual ~JniCallingConvention() {}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 1c9aed8..c89bc40 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -271,7 +271,7 @@
mr_conv->InterproceduralScratchRegister());
// 10. Fix differences in result widths.
- if (instruction_set == kX86) {
+ if (instruction_set == kX86 || instruction_set == kX86_64) {
if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
__ SignExtend(main_jni_conv->ReturnRegister(),
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 0a48500..ea39d60 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -85,7 +85,7 @@
return result;
}
-const std::vector<ManagedRegister>& MipsManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on MIPS to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
index 445f453..1a9053a 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ b/compiler/jni/quick/mips/calling_convention_mips.h
@@ -35,10 +35,10 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 8b5c86d..8d22fe6 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -90,7 +90,7 @@
(itr_slots_ * kPointerSize)); // offset into in args
}
-const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpills() {
// We spill the argument registers on X86 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index e814c7e..2dab059 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -37,9 +37,9 @@
bool IsCurrentParamOnStack() OVERRIDE;
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
- const std::vector<ManagedRegister>& EntrySpills() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
private:
- std::vector<ManagedRegister> entry_spills_;
+ ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
};
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
new file mode 100644
index 0000000..8ebea46
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86_64.h"
+
+#include "base/logging.h"
+#include "utils/x86_64/managed_register_x86_64.h"
+#include "utils.h"
+
+namespace art {
+namespace x86_64 {
+
+// Calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
+ return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return X86_64ManagedRegister::FromXmmRegister(_XMM0);
+ } else if (shorty[0] == 'J') {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ } else if (shorty[0] == 'V') {
+ return ManagedRegister::NoRegister();
+ } else {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+ }
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86_64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+ManagedRegister X86_64JniCallingConvention::IntReturnRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RAX);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::MethodRegister() {
+ return X86_64ManagedRegister::FromCpuRegister(RDI);
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ // We assume all parameters are on stack, args coming via registers are spilled as entry_spills
+ return true;
+}
+
+ManagedRegister X86_64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ return FrameOffset(displacement_.Int32Value() + // displacement
+ kPointerSize + // Method*
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+}
+
+const ManagedRegisterEntrySpills& X86_64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on X86 to free them up for scratch use, we then assume
+ // all arguments are on the stack.
+ if (entry_spills_.size() == 0) {
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ ManagedRegister in_reg = CurrentParamRegister();
+ if (!in_reg.IsNoRegister()) {
+ int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4;
+ int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
+ ManagedRegisterSpill spill(in_reg, size, spill_offset);
+ entry_spills_.push_back(spill);
+ }
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+X86_64JniCallingConvention::X86_64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty) {
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBX));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(RBP));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R12));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R13));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R14));
+ callee_save_regs_.push_back(X86_64ManagedRegister::FromCpuRegister(R15));
+}
+
+uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
+ return 1 << RBX | 1 << RBP | 1 << R12 | 1 << R13 | 1 << R14 | 1 << R15 | 1 << R13 | 1 << kNumberOfCpuRegisters;
+}
+
+size_t X86_64JniCallingConvention::FrameSize() {
+ // Method*, return address and callee save area size, local reference segment state
+ size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+ // References plus link_ (pointer) and number_of_references_ (uint32_t) for SIRT header
+ size_t sirt_size = kPointerSize + sizeof(uint32_t) + ReferenceCount()*kSirtPointerSize;
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86_64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
+ return !IsCurrentParamOnStack();
+}
+
+bool X86_64JniCallingConvention::IsCurrentParamOnStack() {
+ return CurrentParamRegister().IsNoRegister();
+}
+
+ManagedRegister X86_64JniCallingConvention::CurrentParamRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (itr_args_ - itr_float_and_doubles_) {
+ case 0: res = X86_64ManagedRegister::FromCpuRegister(RDI); break;
+ case 1: res = X86_64ManagedRegister::FromCpuRegister(RSI); break;
+ case 2: res = X86_64ManagedRegister::FromCpuRegister(RDX); break;
+ case 3: res = X86_64ManagedRegister::FromCpuRegister(RCX); break;
+ case 4: res = X86_64ManagedRegister::FromCpuRegister(R8); break;
+ case 5: res = X86_64ManagedRegister::FromCpuRegister(R9); break;
+ }
+ } else if (itr_float_and_doubles_ < 8) {
+ // First eight float parameters are passed via XMM0..XMM7
+ res = X86_64ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(_XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+FrameOffset X86_64JniCallingConvention::CurrentParamStackOffset() {
+ size_t offset = itr_args_
+ - std::min(8U, itr_float_and_doubles_) // Float arguments passed through Xmm0..Xmm7
+ - std::min(6U, itr_args_ - itr_float_and_doubles_); // Integer arguments passed through GPR
+ return FrameOffset(displacement_.Int32Value() - OutArgSize() + (offset * kPointerSize));
+}
+
+size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ size_t static_args = IsStatic() ? 1 : 0; // count jclass
+ // regular argument parameters and this
+ size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+ // count JNIEnv* and return pc (pushed after Method*)
+ size_t total_args = static_args + param_args + 2;
+
+ // Float arguments passed through Xmm0..Xmm7
+ // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
+ size_t total_stack_args = total_args
+ - std::min(8U, static_cast<unsigned int>(NumFloatOrDoubleArgs()))
+ - std::min(6U, static_cast<unsigned int>(NumArgs() - NumFloatOrDoubleArgs()));
+
+ return total_stack_args;
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
new file mode 100644
index 0000000..d7f7762
--- /dev/null
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+#define ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace x86_64 {
+
+class X86_64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ explicit X86_64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+ ~X86_64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+ DISALLOW_COPY_AND_ASSIGN(X86_64ManagedRuntimeCallingConvention);
+};
+
+class X86_64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit X86_64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~X86_64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0;
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_X86_64_CALLING_CONVENTION_X86_64_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index dbd078a..872a557 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1438,7 +1438,7 @@
void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills) {
+ const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index 757a8a2..bb9207c 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -440,7 +440,7 @@
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills);
+ const ManagedRegisterEntrySpills& entry_spills);
// Emit code that will remove an activation from the stack
virtual void RemoveFrame(size_t frame_size,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 00ce923..f8b91d7 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -577,7 +577,7 @@
void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills) {
+ const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
CHECK(X0 == method_reg.AsArm64().AsCoreRegister());
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 1c47e77..44eb6ff 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -111,7 +111,7 @@
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills);
+ const ManagedRegisterEntrySpills& entry_spills);
// Emit code that will remove an activation from the stack.
void RemoveFrame(size_t frame_size,
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index a7cb278..1921b28 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -23,6 +23,7 @@
#include "arm64/assembler_arm64.h"
#include "mips/assembler_mips.h"
#include "x86/assembler_x86.h"
+#include "x86_64/assembler_x86_64.h"
#include "globals.h"
#include "memory_region.h"
@@ -111,9 +112,10 @@
return new arm64::Arm64Assembler();
case kMips:
return new mips::MipsAssembler();
- case kX86: // Fall-through.
- case kX86_64:
+ case kX86:
return new x86::X86Assembler();
+ case kX86_64:
+ return new x86_64::X86_64Assembler();
default:
LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
return NULL;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index cd4fc12..72ebdd3 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -24,6 +24,7 @@
#include "arm/constants_arm.h"
#include "mips/constants_mips.h"
#include "x86/constants_x86.h"
+#include "x86_64/constants_x86_64.h"
#include "instruction_set.h"
#include "managed_register.h"
#include "memory_region.h"
@@ -47,6 +48,9 @@
namespace x86 {
class X86Assembler;
}
+namespace x86_64 {
+ class X86_64Assembler;
+}
class Label {
public:
@@ -95,6 +99,7 @@
friend class arm::ArmAssembler;
friend class mips::MipsAssembler;
friend class x86::X86Assembler;
+ friend class x86_64::X86_64Assembler;
DISALLOW_COPY_AND_ASSIGN(Label);
};
@@ -335,7 +340,7 @@
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills) = 0;
+ const ManagedRegisterEntrySpills& entry_spills) = 0;
// Emit code that will remove an activation from the stack
virtual void RemoveFrame(size_t frame_size,
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index 04c9723..f007d28 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -17,6 +17,8 @@
#ifndef ART_COMPILER_UTILS_MANAGED_REGISTER_H_
#define ART_COMPILER_UTILS_MANAGED_REGISTER_H_
+#include <vector>
+
namespace art {
namespace arm {
@@ -28,10 +30,15 @@
namespace mips {
class MipsManagedRegister;
}
+
namespace x86 {
class X86ManagedRegister;
}
+namespace x86_64 {
+class X86_64ManagedRegister;
+}
+
class ManagedRegister {
public:
// ManagedRegister is a value class. There exists no method to change the
@@ -48,6 +55,7 @@
arm64::Arm64ManagedRegister AsArm64() const;
mips::MipsManagedRegister AsMips() const;
x86::X86ManagedRegister AsX86() const;
+ x86_64::X86_64ManagedRegister AsX86_64() const;
// It is valid to invoke Equals on and with a NoRegister.
bool Equals(const ManagedRegister& other) const {
@@ -71,6 +79,44 @@
int id_;
};
+class ManagedRegisterSpill : public ManagedRegister {
+ public:
+ // ManagedRegisterSpill contains information about data type size and location in caller frame
+ // These additional attributes could be defined by calling convention (EntrySpills)
+ ManagedRegisterSpill(const ManagedRegister& other, uint32_t size, uint32_t spill_offset)
+ : ManagedRegister(other), size_(size), spill_offset_(spill_offset) { }
+
+ explicit ManagedRegisterSpill(const ManagedRegister& other)
+ : ManagedRegister(other), size_(-1), spill_offset_(-1) { }
+
+ int32_t getSpillOffset() {
+ return spill_offset_;
+ }
+
+ int32_t getSize() {
+ return size_;
+ }
+
+ private:
+ int32_t size_;
+ int32_t spill_offset_;
+};
+
+class ManagedRegisterEntrySpills : public std::vector<ManagedRegisterSpill> {
+ public:
+ // The ManagedRegister does not have information about size and offset.
+ // In this case it's size and offset determined by BuildFrame (assembler)
+ void push_back(ManagedRegister __x) {
+ ManagedRegisterSpill spill(__x);
+ std::vector<ManagedRegisterSpill>::push_back(spill);
+ }
+
+ void push_back(ManagedRegisterSpill __x) {
+ std::vector<ManagedRegisterSpill>::push_back(__x);
+ }
+ private:
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_MANAGED_REGISTER_H_
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index ce21b84..dfd3306 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -538,7 +538,7 @@
void MipsAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills) {
+ const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
// Increase frame to required size.
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 0f5f2fe..0d1a94c 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -357,7 +357,7 @@
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills);
+ const ManagedRegisterEntrySpills& entry_spills);
// Emit code that will remove an activation from the stack
virtual void RemoveFrame(size_t frame_size,
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index db8956d..d242c17 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1388,7 +1388,7 @@
void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& spill_regs,
- const std::vector<ManagedRegister>& entry_spills) {
+ const ManagedRegisterEntrySpills& entry_spills) {
CHECK_ALIGNED(frame_size, kStackAlignment);
for (int i = spill_regs.size() - 1; i >= 0; --i) {
pushl(spill_regs.at(i).AsX86().AsCpuRegister());
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index e284d8c..879f4ec 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -466,7 +466,7 @@
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const std::vector<ManagedRegister>& entry_spills);
+ const ManagedRegisterEntrySpills& entry_spills);
// Emit code that will remove an activation from the stack
virtual void RemoveFrame(size_t frame_size,
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
new file mode 100644
index 0000000..fa302c9
--- /dev/null
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -0,0 +1,1951 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86_64.h"
+
+#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "memory_region.h"
+#include "thread.h"
+
+namespace art {
+namespace x86_64 {
+
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
+ return os << "XMM" << static_cast<int>(reg);
+}
+
+std::ostream& operator<<(std::ostream& os, const X87Register& reg) {
+ return os << "ST" << static_cast<int>(reg);
+}
+
+void X86_64Assembler::call(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitRegisterOperand(2, reg);
+}
+
+
+void X86_64Assembler::call(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(2, address);
+}
+
+
+void X86_64Assembler::call(Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xE8);
+ static const int kSize = 5;
+ EmitLabel(label, kSize);
+}
+
+
+void X86_64Assembler::pushq(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_rm(reg);
+ EmitUint8(0x50 + reg);
+}
+
+
+void X86_64Assembler::pushq(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(6, address);
+}
+
+
+void X86_64Assembler::pushq(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (imm.is_int8()) {
+ EmitUint8(0x6A);
+ EmitUint8(imm.value() & 0xFF);
+ } else {
+ EmitUint8(0x68);
+ EmitImmediate(imm);
+ }
+}
+
+
+void X86_64Assembler::popq(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_rm(reg);
+ EmitUint8(0x58 + reg);
+}
+
+
+void X86_64Assembler::popq(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x8F);
+ EmitOperand(0, address);
+}
+
+
+void X86_64Assembler::movq(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48); // REX.W
+ EmitUint8(0xB8 + dst);
+ EmitImmediate(imm);
+}
+
+
+void X86_64Assembler::movl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xB8 + dst);
+ EmitImmediate(imm);
+}
+
+
+void X86_64Assembler::movq(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48); // REX.W
+ EmitUint8(0x89);
+ EmitRegisterOperand(src, dst);
+}
+
+
+void X86_64Assembler::movl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x89);
+ EmitRegisterOperand(src, dst);
+}
+
+
+void X86_64Assembler::movq(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_reg(dst, 8);
+ EmitUint8(0x8B);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movl(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_reg(dst, 4);
+ EmitUint8(0x8B);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movq(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_reg(src, 8);
+ EmitUint8(0x89);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::movl(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_reg(src, 4);
+ EmitUint8(0x89);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::movl(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitImmediate(imm);
+}
+
+void X86_64Assembler::movl(const Address& dst, Label* lbl) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC7);
+ EmitOperand(0, dst);
+ EmitLabel(lbl, dst.length_ + 5);
+}
+
+void X86_64Assembler::movzxb(Register dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB6);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::movzxb(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB6);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movsxb(Register dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBE);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::movsxb(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBE);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movb(Register /*dst*/, const Address& /*src*/) {
+ LOG(FATAL) << "Use movzxb or movsxb instead.";
+}
+
+
+void X86_64Assembler::movb(const Address& dst, ByteRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x88);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::movb(const Address& dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC6);
+ EmitOperand(RAX, dst);
+ CHECK(imm.is_int8());
+ EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86_64Assembler::movzxw(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB7);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::movzxw(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB7);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movsxw(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBF);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::movsxw(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xBF);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movw(Register /*dst*/, const Address& /*src*/) {
+ LOG(FATAL) << "Use movzxw or movsxw instead.";
+}
+
+
+void X86_64Assembler::movw(const Address& dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitOperandSizeOverride();
+ EmitUint8(0x89);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::leaq(Register dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex_reg(dst, 8);
+ EmitUint8(0x8D);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::cmovl(Condition condition, Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x40 + condition);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::setb(Condition condition, Register dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x90 + condition);
+ EmitOperand(0, Operand(dst));
+}
+
+
+void X86_64Assembler::movss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movss(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::movss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86_64Assembler::movd(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x6E);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::movd(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x7E);
+ EmitOperand(src, Operand(dst));
+}
+
+
+void X86_64Assembler::addss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::addss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::subss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::subss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::mulss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::mulss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::divss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::divss(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::flds(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(0, src);
+}
+
+
+void X86_64Assembler::fstps(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(3, dst);
+}
+
+
+void X86_64Assembler::movsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x10);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::movsd(const Address& dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitOperand(src, dst);
+}
+
+
+void X86_64Assembler::movsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x11);
+ EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86_64Assembler::addsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::addsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x58);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::subsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::subsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5C);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::mulsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::mulsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x59);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::divsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::divsd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5E);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvtsi2ss(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::cvtsi2sd(XmmRegister dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2A);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::cvtss2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvtsd2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2D);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvttss2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x2C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvttsd2si(Register dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x2C);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x5A);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0xE6);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::comiss(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86_64Assembler::comisd(XmmRegister a, XmmRegister b) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x2F);
+ EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86_64Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF2);
+ EmitUint8(0x0F);
+ EmitUint8(0x51);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::sqrtss(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF3);
+ EmitUint8(0x0F);
+ EmitUint8(0x51);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::xorpd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::xorpd(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::xorps(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::xorps(XmmRegister dst, XmmRegister src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0x57);
+ EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::andpd(XmmRegister dst, const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x66);
+ EmitUint8(0x0F);
+ EmitUint8(0x54);
+ EmitOperand(dst, src);
+}
+
+
+void X86_64Assembler::fldl(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitOperand(0, src);
+}
+
+
+void X86_64Assembler::fstpl(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitOperand(3, dst);
+}
+
+
+void X86_64Assembler::fnstcw(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(7, dst);
+}
+
+
+void X86_64Assembler::fldcw(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitOperand(5, src);
+}
+
+
+void X86_64Assembler::fistpl(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDF);
+ EmitOperand(7, dst);
+}
+
+
+void X86_64Assembler::fistps(const Address& dst) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDB);
+ EmitOperand(3, dst);
+}
+
+
+void X86_64Assembler::fildl(const Address& src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDF);
+ EmitOperand(5, src);
+}
+
+
+void X86_64Assembler::fincstp() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xF7);
+}
+
+
+void X86_64Assembler::ffree(const Immediate& index) {
+ CHECK_LT(index.value(), 7);
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xDD);
+ EmitUint8(0xC0 + index.value());
+}
+
+
+void X86_64Assembler::fsin() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xFE);
+}
+
+
+void X86_64Assembler::fcos() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xFF);
+}
+
+
+void X86_64Assembler::fptan() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xD9);
+ EmitUint8(0xF2);
+}
+
+
+void X86_64Assembler::xchgl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x87);
+ EmitRegisterOperand(dst, src);
+}
+
+void X86_64Assembler::xchgl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x87);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::cmpl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(7, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::cmpl(Register reg0, Register reg1) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x3B);
+ EmitOperand(reg0, Operand(reg1));
+}
+
+
+void X86_64Assembler::cmpl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x3B);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::addl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x03);
+ EmitRegisterOperand(dst, src);
+}
+
+
+void X86_64Assembler::addl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x03);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::cmpl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x39);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::cmpl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(7, address, imm);
+}
+
+
+void X86_64Assembler::testl(Register reg1, Register reg2) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex(reg1, reg2, 4);
+ EmitUint8(0x85);
+ EmitRegisterOperand(reg1, reg2);
+}
+
+
+void X86_64Assembler::testl(Register reg, const Immediate& immediate) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ // For registers that have a byte variant (RAX, RBX, RCX, and RDX)
+ // we only test the byte register to keep the encoding short.
+ if (immediate.is_uint8() && reg < 4) {
+ // Use zero-extended 8-bit immediate.
+ if (reg == RAX) {
+ EmitUint8(0xA8);
+ } else {
+ EmitUint8(0xF6);
+ EmitUint8(0xC0 + reg);
+ }
+ EmitUint8(immediate.value() & 0xFF);
+ } else if (reg == RAX) {
+ // Use short form if the destination is RAX.
+ EmitUint8(0xA9);
+ EmitImmediate(immediate);
+ } else {
+ EmitUint8(0xF7);
+ EmitOperand(0, Operand(reg));
+ EmitImmediate(immediate);
+ }
+}
+
+
+void X86_64Assembler::andl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x23);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::andl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(4, Operand(dst), imm);
+}
+
+
+void X86_64Assembler::orl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::orl(Register dst, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(1, Operand(dst), imm);
+}
+
+
+void X86_64Assembler::xorl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ rex(dst, src, 4);
+ EmitUint8(0x33);
+ EmitOperand(dst, Operand(src));
+}
+
+void X86_64Assembler::rex_reg(Register &dst, size_t size) {
+ Register src = kNoRegister;
+ rex(dst, src, size);
+}
+
+void X86_64Assembler::rex_rm(Register &src, size_t size) {
+ Register dst = kNoRegister;
+ rex(dst, src, size);
+}
+
+void X86_64Assembler::rex(Register &dst, Register &src, size_t size) {
+ uint8_t rex = 0;
+ // REX.WRXB
+ // W - 64-bit operand
+ // R - MODRM.reg
+ // X - SIB.index
+ // B - MODRM.rm/SIB.base
+ if (size == 8) {
+ rex |= 0x48; // REX.W000
+ }
+ if (dst >= Register::R8 && dst < Register::kNumberOfCpuRegisters) {
+ rex |= 0x44; // REX.0R00
+ dst = static_cast<Register>(dst - 8);
+ }
+ if (src >= Register::R8 && src < Register::kNumberOfCpuRegisters) {
+ rex |= 0x41; // REX.000B
+ src = static_cast<Register>(src - 8);
+ }
+ if (rex != 0) {
+ EmitUint8(rex);
+ }
+}
+
+void X86_64Assembler::addl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(0, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::addq(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48); // REX.W
+ EmitComplex(0, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::addl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x01);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::addl(const Address& address, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(0, address, imm);
+}
+
+
+void X86_64Assembler::adcl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(2, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::adcl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x13);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::adcl(Register dst, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x13);
+ EmitOperand(dst, address);
+}
+
+
+void X86_64Assembler::subl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x2B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::subl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48); // REX.W
+ EmitComplex(5, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::subl(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x2B);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::cdq() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x99);
+}
+
+
+void X86_64Assembler::idivl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitUint8(0xF8 | reg);
+}
+
+
+void X86_64Assembler::imull(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAF);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::imull(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x69);
+ EmitOperand(reg, Operand(reg));
+ EmitImmediate(imm);
+}
+
+
+void X86_64Assembler::imull(Register reg, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAF);
+ EmitOperand(reg, address);
+}
+
+
+void X86_64Assembler::imull(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(5, Operand(reg));
+}
+
+
+void X86_64Assembler::imull(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(5, address);
+}
+
+
+void X86_64Assembler::mull(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(4, Operand(reg));
+}
+
+
+void X86_64Assembler::mull(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(4, address);
+}
+
+
+void X86_64Assembler::sbbl(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x1B);
+ EmitOperand(dst, Operand(src));
+}
+
+
+void X86_64Assembler::sbbl(Register reg, const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitComplex(3, Operand(reg), imm);
+}
+
+
+void X86_64Assembler::sbbl(Register dst, const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x1B);
+ EmitOperand(dst, address);
+}
+
+
+void X86_64Assembler::incl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x40 + reg);
+}
+
+
+void X86_64Assembler::incl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(0, address);
+}
+
+
+void X86_64Assembler::decl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x48 + reg);
+}
+
+
+void X86_64Assembler::decl(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(1, address);
+}
+
+
+void X86_64Assembler::shll(Register reg, const Immediate& imm) {
+ EmitGenericShift(4, reg, imm);
+}
+
+
+void X86_64Assembler::shll(Register operand, Register shifter) {
+ EmitGenericShift(4, operand, shifter);
+}
+
+
+void X86_64Assembler::shrl(Register reg, const Immediate& imm) {
+ EmitGenericShift(5, reg, imm);
+}
+
+
+void X86_64Assembler::shrl(Register operand, Register shifter) {
+ EmitGenericShift(5, operand, shifter);
+}
+
+
+void X86_64Assembler::sarl(Register reg, const Immediate& imm) {
+ EmitGenericShift(7, reg, imm);
+}
+
+
+void X86_64Assembler::sarl(Register operand, Register shifter) {
+ EmitGenericShift(7, operand, shifter);
+}
+
+
+void X86_64Assembler::shld(Register dst, Register src) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xA5);
+ EmitRegisterOperand(src, dst);
+}
+
+
+void X86_64Assembler::negl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitOperand(3, Operand(reg));
+}
+
+
+void X86_64Assembler::notl(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF7);
+ EmitUint8(0xD0 | reg);
+}
+
+
+void X86_64Assembler::enter(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC8);
+ CHECK(imm.is_uint16());
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8((imm.value() >> 8) & 0xFF);
+ EmitUint8(0x00);
+}
+
+
+void X86_64Assembler::leave() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC9);
+}
+
+
+void X86_64Assembler::ret() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC3);
+}
+
+
+void X86_64Assembler::ret(const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xC2);
+ CHECK(imm.is_uint16());
+ EmitUint8(imm.value() & 0xFF);
+ EmitUint8((imm.value() >> 8) & 0xFF);
+}
+
+
+
+void X86_64Assembler::nop() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x90);
+}
+
+
+void X86_64Assembler::int3() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xCC);
+}
+
+
+void X86_64Assembler::hlt() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF4);
+}
+
+
+void X86_64Assembler::j(Condition condition, Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (label->IsBound()) {
+ static const int kShortSize = 2;
+ static const int kLongSize = 6;
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ if (IsInt(8, offset - kShortSize)) {
+ EmitUint8(0x70 + condition);
+ EmitUint8((offset - kShortSize) & 0xFF);
+ } else {
+ EmitUint8(0x0F);
+ EmitUint8(0x80 + condition);
+ EmitInt32(offset - kLongSize);
+ }
+ } else {
+ EmitUint8(0x0F);
+ EmitUint8(0x80 + condition);
+ EmitLabelLink(label);
+ }
+}
+
+
+void X86_64Assembler::jmp(Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitRegisterOperand(4, reg);
+}
+
+void X86_64Assembler::jmp(const Address& address) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xFF);
+ EmitOperand(4, address);
+}
+
+void X86_64Assembler::jmp(Label* label) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ if (label->IsBound()) {
+ static const int kShortSize = 2;
+ static const int kLongSize = 5;
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ if (IsInt(8, offset - kShortSize)) {
+ EmitUint8(0xEB);
+ EmitUint8((offset - kShortSize) & 0xFF);
+ } else {
+ EmitUint8(0xE9);
+ EmitInt32(offset - kLongSize);
+ }
+ } else {
+ EmitUint8(0xE9);
+ EmitLabelLink(label);
+ }
+}
+
+
+X86_64Assembler* X86_64Assembler::lock() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0xF0);
+ return this;
+}
+
+
+void X86_64Assembler::cmpxchgl(const Address& address, Register reg) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xB1);
+ EmitOperand(reg, address);
+}
+
+void X86_64Assembler::mfence() {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x0F);
+ EmitUint8(0xAE);
+ EmitUint8(0xF0);
+}
+
+X86_64Assembler* X86_64Assembler::gs() {
+ // TODO: fs is a prefix and not an instruction
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ EmitUint8(0x65);
+ return this;
+}
+
+void X86_64Assembler::AddImmediate(Register reg, const Immediate& imm) {
+ int value = imm.value();
+ if (value > 0) {
+ if (value == 1) {
+ incl(reg);
+ } else if (value != 0) {
+ addl(reg, imm);
+ }
+ } else if (value < 0) {
+ value = -value;
+ if (value == 1) {
+ decl(reg);
+ } else if (value != 0) {
+ subl(reg, Immediate(value));
+ }
+ }
+}
+
+
+void X86_64Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
+ // TODO: Need to have a code constants table.
+ int64_t constant = bit_cast<int64_t, double>(value);
+ pushq(Immediate(High32Bits(constant)));
+ pushq(Immediate(Low32Bits(constant)));
+ movsd(dst, Address(RSP, 0));
+ addq(RSP, Immediate(2 * kWordSize));
+}
+
+
+void X86_64Assembler::FloatNegate(XmmRegister f) {
+ static const struct {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+ } float_negate_constant __attribute__((aligned(16))) =
+ { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
+ xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+}
+
+
+void X86_64Assembler::DoubleNegate(XmmRegister d) {
+ static const struct {
+ uint64_t a;
+ uint64_t b;
+ } double_negate_constant __attribute__((aligned(16))) =
+ {0x8000000000000000LL, 0x8000000000000000LL};
+ xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+}
+
+
+void X86_64Assembler::DoubleAbs(XmmRegister reg) {
+ static const struct {
+ uint64_t a;
+ uint64_t b;
+ } double_abs_constant __attribute__((aligned(16))) =
+ {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
+ andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+}
+
+
+void X86_64Assembler::Align(int alignment, int offset) {
+ CHECK(IsPowerOfTwo(alignment));
+ // Emit nop instruction until the real position is aligned.
+ while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) {
+ nop();
+ }
+}
+
+
+void X86_64Assembler::Bind(Label* label) {
+ int bound = buffer_.Size();
+ CHECK(!label->IsBound()); // Labels can only be bound once.
+ while (label->IsLinked()) {
+ int position = label->LinkPosition();
+ int next = buffer_.Load<int32_t>(position);
+ buffer_.Store<int32_t>(position, bound - (position + 4));
+ label->position_ = next;
+ }
+ label->BindTo(bound);
+}
+
+
+void X86_64Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
+ CHECK_GE(reg_or_opcode, 0);
+ CHECK_LT(reg_or_opcode, 8);
+ const int length = operand.length_;
+ CHECK_GT(length, 0);
+ // Emit the ModRM byte updated with the given reg value.
+ CHECK_EQ(operand.encoding_[0] & 0x38, 0);
+ EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3));
+ // Emit the rest of the encoded operand.
+ for (int i = 1; i < length; i++) {
+ EmitUint8(operand.encoding_[i]);
+ }
+}
+
+
+void X86_64Assembler::EmitImmediate(const Immediate& imm) {
+ EmitInt32(imm.value());
+}
+
+
+void X86_64Assembler::EmitComplex(int reg_or_opcode,
+ const Operand& operand,
+ const Immediate& immediate) {
+ CHECK_GE(reg_or_opcode, 0);
+ CHECK_LT(reg_or_opcode, 8);
+ if (immediate.is_int8()) {
+ // Use sign-extended 8-bit immediate.
+ EmitUint8(0x83);
+ EmitOperand(reg_or_opcode, operand);
+ EmitUint8(immediate.value() & 0xFF);
+ } else if (operand.IsRegister(RAX)) {
+ // Use short form if the destination is eax.
+ EmitUint8(0x05 + (reg_or_opcode << 3));
+ EmitImmediate(immediate);
+ } else {
+ EmitUint8(0x81);
+ EmitOperand(reg_or_opcode, operand);
+ EmitImmediate(immediate);
+ }
+}
+
+
+void X86_64Assembler::EmitLabel(Label* label, int instruction_size) {
+ if (label->IsBound()) {
+ int offset = label->Position() - buffer_.Size();
+ CHECK_LE(offset, 0);
+ EmitInt32(offset - instruction_size);
+ } else {
+ EmitLabelLink(label);
+ }
+}
+
+
+void X86_64Assembler::EmitLabelLink(Label* label) {
+ CHECK(!label->IsBound());
+ int position = buffer_.Size();
+ EmitInt32(label->position_);
+ label->LinkTo(position);
+}
+
+
+void X86_64Assembler::EmitGenericShift(int reg_or_opcode,
+ Register reg,
+ const Immediate& imm) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK(imm.is_int8());
+ if (imm.value() == 1) {
+ EmitUint8(0xD1);
+ EmitOperand(reg_or_opcode, Operand(reg));
+ } else {
+ EmitUint8(0xC1);
+ EmitOperand(reg_or_opcode, Operand(reg));
+ EmitUint8(imm.value() & 0xFF);
+ }
+}
+
+
+void X86_64Assembler::EmitGenericShift(int reg_or_opcode,
+ Register operand,
+ Register shifter) {
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ CHECK_EQ(shifter, RCX);
+ EmitUint8(0xD3);
+ EmitOperand(reg_or_opcode, Operand(operand));
+}
+
+void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& spill_regs,
+ const ManagedRegisterEntrySpills& entry_spills) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ for (int i = spill_regs.size() - 1; i >= 0; --i) {
+ pushq(spill_regs.at(i).AsX86_64().AsCpuRegister());
+ }
+ // return address then method on stack
+ addq(RSP, Immediate(-frame_size + (spill_regs.size() * kPointerSize) +
+ kPointerSize /*method*/ + kPointerSize /*return address*/));
+ pushq(method_reg.AsX86_64().AsCpuRegister());
+
+ for (size_t i = 0; i < entry_spills.size(); ++i) {
+ ManagedRegisterSpill spill = entry_spills.at(i);
+ if (spill.AsX86_64().IsCpuRegister()) {
+ if (spill.getSize() == 8) {
+ movq(Address(RSP, frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ movl(Address(RSP, frame_size + spill.getSpillOffset()), spill.AsX86_64().AsCpuRegister());
+ }
+ } else {
+ if (spill.getSize() == 8) {
+ movsd(Address(RSP, frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
+ } else {
+ CHECK_EQ(spill.getSize(), 4);
+ movss(Address(RSP, frame_size + spill.getSpillOffset()), spill.AsX86_64().AsXmmRegister());
+ }
+ }
+ }
+}
+
+void X86_64Assembler::RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& spill_regs) {
+ CHECK_ALIGNED(frame_size, kStackAlignment);
+ addq(RSP, Immediate(frame_size - (spill_regs.size() * kPointerSize) - kPointerSize));
+ for (size_t i = 0; i < spill_regs.size(); ++i) {
+ popq(spill_regs.at(i).AsX86_64().AsCpuRegister());
+ }
+ ret();
+}
+
+void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ addq(RSP, Immediate(-adjust));
+}
+
+void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
+ CHECK_ALIGNED(adjust, kStackAlignment);
+ addq(RSP, Immediate(adjust));
+}
+
+void X86_64Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (src.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (src.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ movl(Address(RSP, offs), src.AsCpuRegister());
+ } else {
+ CHECK_EQ(8u, size);
+ movq(Address(RSP, offs), src.AsCpuRegister());
+ }
+ } else if (src.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ movq(Address(RSP, offs), src.AsRegisterPairLow());
+ movq(Address(RSP, FrameOffset(offs.Int32Value()+4)),
+ src.AsRegisterPairHigh());
+ } else if (src.IsX87Register()) {
+ if (size == 4) {
+ fstps(Address(RSP, offs));
+ } else {
+ fstpl(Address(RSP, offs));
+ }
+ } else {
+ CHECK(src.IsXmmRegister());
+ if (size == 4) {
+ movss(Address(RSP, offs), src.AsXmmRegister());
+ } else {
+ movsd(Address(RSP, offs), src.AsXmmRegister());
+ }
+ }
+}
+
+void X86_64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ movq(Address(RSP, dest), src.AsCpuRegister());
+}
+
+void X86_64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ CHECK(src.IsCpuRegister());
+ movq(Address(RSP, dest), src.AsCpuRegister());
+}
+
+void X86_64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister) {
+ movl(Address(RSP, dest), Immediate(imm)); // TODO(64) movq?
+}
+
+void X86_64Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister) {
+ gs()->movl(Address::Absolute(dest, true), Immediate(imm)); // TODO(64) movq?
+}
+
+void X86_64Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ leaq(scratch.AsCpuRegister(), Address(RSP, fr_offs));
+ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+ gs()->movq(Address::Absolute(thr_offs, true), RSP);
+}
+
+void X86_64Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) {
+ gs()->movl(Address::Absolute(thr_offs, true), lbl); // TODO(64) movq?
+}
+
+void X86_64Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
+ FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
+ UNIMPLEMENTED(FATAL); // this case only currently exists for ARM
+}
+
+void X86_64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ if (size == 4) {
+ CHECK_EQ(4u, size);
+ movl(dest.AsCpuRegister(), Address(RSP, src));
+ } else {
+ CHECK_EQ(8u, size);
+ movq(dest.AsCpuRegister(), Address(RSP, src));
+ }
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(0u, size);
+ movq(dest.AsRegisterPairLow(), Address(RSP, src));
+ movq(dest.AsRegisterPairHigh(), Address(RSP, FrameOffset(src.Int32Value()+4)));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ flds(Address(RSP, src));
+ } else {
+ fldl(Address(RSP, src));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ movss(dest.AsXmmRegister(), Address(RSP, src));
+ } else {
+ movsd(dest.AsXmmRegister(), Address(RSP, src));
+ }
+ }
+}
+
+void X86_64Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ if (dest.IsNoRegister()) {
+ CHECK_EQ(0u, size);
+ } else if (dest.IsCpuRegister()) {
+ CHECK_EQ(4u, size);
+ gs()->movq(dest.AsCpuRegister(), Address::Absolute(src, true));
+ } else if (dest.IsRegisterPair()) {
+ CHECK_EQ(8u, size);
+ gs()->movq(dest.AsRegisterPairLow(), Address::Absolute(src, true));
+ gs()->movq(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4), true));
+ } else if (dest.IsX87Register()) {
+ if (size == 4) {
+ gs()->flds(Address::Absolute(src, true));
+ } else {
+ gs()->fldl(Address::Absolute(src, true));
+ }
+ } else {
+ CHECK(dest.IsXmmRegister());
+ if (size == 4) {
+ gs()->movss(dest.AsXmmRegister(), Address::Absolute(src, true));
+ } else {
+ gs()->movsd(dest.AsXmmRegister(), Address::Absolute(src, true));
+ }
+ }
+}
+
+void X86_64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ movq(dest.AsCpuRegister(), Address(RSP, src));
+}
+
+void X86_64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+ MemberOffset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+ Offset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+ movq(dest.AsCpuRegister(), Address(base.AsX86_64().AsCpuRegister(), offs));
+}
+
+void X86_64Assembler::LoadRawPtrFromThread(ManagedRegister mdest,
+ ThreadOffset offs) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ CHECK(dest.IsCpuRegister());
+ gs()->movq(dest.AsCpuRegister(), Address::Absolute(offs, true));
+}
+
+void X86_64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ movsxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ movsxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
+ X86_64ManagedRegister reg = mreg.AsX86_64();
+ CHECK(size == 1 || size == 2) << size;
+ CHECK(reg.IsCpuRegister()) << reg;
+ if (size == 1) {
+ movzxb(reg.AsCpuRegister(), reg.AsByteRegister());
+ } else {
+ movzxw(reg.AsCpuRegister(), reg.AsCpuRegister());
+ }
+}
+
+void X86_64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+ X86_64ManagedRegister dest = mdest.AsX86_64();
+ X86_64ManagedRegister src = msrc.AsX86_64();
+ if (!dest.Equals(src)) {
+ if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+ movq(dest.AsCpuRegister(), src.AsCpuRegister());
+ } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+ // Pass via stack and pop X87 register
+ subl(RSP, Immediate(16));
+ if (size == 4) {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ fstps(Address(RSP, 0));
+ movss(dest.AsXmmRegister(), Address(RSP, 0));
+ } else {
+ CHECK_EQ(src.AsX87Register(), ST0);
+ fstpl(Address(RSP, 0));
+ movsd(dest.AsXmmRegister(), Address(RSP, 0));
+ }
+ addq(RSP, Immediate(16));
+ } else {
+ // TODO: x87, SSE
+ UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+ }
+ }
+}
+
+void X86_64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ movl(scratch.AsCpuRegister(), Address(RSP, src));
+ movl(Address(RSP, dest), scratch.AsCpuRegister());
+}
+
+void X86_64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset thr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(thr_offs, true));
+ Store(fr_offs, scratch, 8);
+}
+
+void X86_64Assembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ Load(scratch, fr_offs, 8);
+ gs()->movq(Address::Absolute(thr_offs, true), scratch.AsCpuRegister());
+}
+
+void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src,
+ ManagedRegister mscratch,
+ size_t size) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ if (scratch.IsCpuRegister() && size == 8) {
+ Load(scratch, src, 4);
+ Store(dest, scratch, 4);
+ Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+ Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+ } else {
+ Load(scratch, src, size);
+ Store(dest, scratch, size);
+ }
+}
+
+void X86_64Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
+ ManagedRegister /*scratch*/, size_t /*size*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
+void X86_64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size) {
+ CHECK(scratch.IsNoRegister());
+ CHECK_EQ(size, 4u);
+ pushq(Address(RSP, src));
+ popq(Address(dest_base.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ movq(scratch, Address(RSP, src_base));
+ movq(scratch, Address(scratch, src_offset));
+ movq(Address(RSP, dest), scratch);
+}
+
+void X86_64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) {
+ CHECK_EQ(size, 4u);
+ CHECK(scratch.IsNoRegister());
+ pushq(Address(src.AsX86_64().AsCpuRegister(), src_offset));
+ popq(Address(dest.AsX86_64().AsCpuRegister(), dest_offset));
+}
+
+void X86_64Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister mscratch, size_t size) {
+ Register scratch = mscratch.AsX86_64().AsCpuRegister();
+ CHECK_EQ(size, 4u);
+ CHECK_EQ(dest.Int32Value(), src.Int32Value());
+ movq(scratch, Address(RSP, src));
+ pushq(Address(scratch, src_offset));
+ popq(Address(scratch, dest_offset));
+}
+
+void X86_64Assembler::MemoryBarrier(ManagedRegister) {
+#if ANDROID_SMP != 0
+ mfence();
+#endif
+}
+
+void X86_64Assembler::CreateSirtEntry(ManagedRegister mout_reg,
+ FrameOffset sirt_offset,
+ ManagedRegister min_reg, bool null_allowed) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ if (in_reg.IsNoRegister()) { // TODO(64): && null_allowed
+ // Use out_reg as indicator of NULL
+ in_reg = out_reg;
+ // TODO: movzwl
+ movl(in_reg.AsCpuRegister(), Address(RSP, sirt_offset));
+ }
+ CHECK(in_reg.IsCpuRegister());
+ CHECK(out_reg.IsCpuRegister());
+ VerifyObject(in_reg, null_allowed);
+ if (null_allowed) {
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(kZero, &null_arg);
+ leaq(out_reg.AsCpuRegister(), Address(RSP, sirt_offset));
+ Bind(&null_arg);
+ } else {
+ leaq(out_reg.AsCpuRegister(), Address(RSP, sirt_offset));
+ }
+}
+
+void X86_64Assembler::CreateSirtEntry(FrameOffset out_off,
+ FrameOffset sirt_offset,
+ ManagedRegister mscratch,
+ bool null_allowed) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ CHECK(scratch.IsCpuRegister());
+ if (null_allowed) {
+ Label null_arg;
+ movl(scratch.AsCpuRegister(), Address(RSP, sirt_offset));
+ testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+ j(kZero, &null_arg);
+ leaq(scratch.AsCpuRegister(), Address(RSP, sirt_offset));
+ Bind(&null_arg);
+ } else {
+ leaq(scratch.AsCpuRegister(), Address(RSP, sirt_offset));
+ }
+ Store(out_off, scratch, 8);
+}
+
+// Given a SIRT entry, load the associated reference.
+void X86_64Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+ ManagedRegister min_reg) {
+ X86_64ManagedRegister out_reg = mout_reg.AsX86_64();
+ X86_64ManagedRegister in_reg = min_reg.AsX86_64();
+ CHECK(out_reg.IsCpuRegister());
+ CHECK(in_reg.IsCpuRegister());
+ Label null_arg;
+ if (!out_reg.Equals(in_reg)) {
+ xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+ }
+ testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+ j(kZero, &null_arg);
+ movq(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+ Bind(&null_arg);
+}
+
+void X86_64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+ // TODO: not validating references
+}
+
+void X86_64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+ X86_64ManagedRegister base = mbase.AsX86_64();
+ CHECK(base.IsCpuRegister());
+ call(Address(base.AsCpuRegister(), offset.Int32Value()));
+ // TODO: place reference map on call
+}
+
+void X86_64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+ Register scratch = mscratch.AsX86_64().AsCpuRegister();
+ movq(scratch, Address(RSP, base));
+ call(Address(scratch, offset));
+}
+
+void X86_64Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) {
+ gs()->call(Address::Absolute(offset, true));
+}
+
+void X86_64Assembler::GetCurrentThread(ManagedRegister tr) {
+ gs()->movq(tr.AsX86_64().AsCpuRegister(),
+ Address::Absolute(Thread::SelfOffset(), true));
+}
+
+void X86_64Assembler::GetCurrentThread(FrameOffset offset,
+ ManagedRegister mscratch) {
+ X86_64ManagedRegister scratch = mscratch.AsX86_64();
+ gs()->movq(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset(), true));
+ movq(Address(RSP, offset), scratch.AsCpuRegister());
+}
+
+void X86_64Assembler::ExceptionPoll(ManagedRegister /*scratch*/, size_t stack_adjust) {
+ X86ExceptionSlowPath* slow = new X86ExceptionSlowPath(stack_adjust);
+ buffer_.EnqueueSlowPath(slow);
+ gs()->cmpl(Address::Absolute(Thread::ExceptionOffset(), true), Immediate(0));
+ j(kNotEqual, slow->Entry());
+}
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+ X86_64Assembler* sp_asm = down_cast<X86_64Assembler*>(sasm);
+#define __ sp_asm->
+ __ Bind(&entry_);
+ // Note: the return value is dead
+ if (stack_adjust_ != 0) { // Fix up the frame.
+ __ DecreaseFrameSize(stack_adjust_);
+ }
+ // Pass exception as argument in RAX
+ __ gs()->movq(RAX, Address::Absolute(Thread::ExceptionOffset(), true)); // TODO(64): Pass argument via RDI
+ __ gs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException), true));
+ // this call should never return
+ __ int3();
+#undef __
+}
+
+static const char* kRegisterNames[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+};
+
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= RAX && rhs <= R15) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+} // namespace x86_64
+} // namespace art
+
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
new file mode 100644
index 0000000..d48ba72
--- /dev/null
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
+
+#include <vector>
+#include "base/macros.h"
+#include "constants_x86_64.h"
+#include "globals.h"
+#include "managed_register_x86_64.h"
+#include "offsets.h"
+#include "utils/assembler.h"
+#include "utils.h"
+
+namespace art {
+namespace x86_64 {
+
+class Immediate {
+ public:
+ explicit Immediate(int32_t value) : value_(value) {}
+
+ int32_t value() const { return value_; }
+
+ bool is_int8() const { return IsInt(8, value_); }
+ bool is_uint8() const { return IsUint(8, value_); }
+ bool is_uint16() const { return IsUint(16, value_); }
+
+ private:
+ const int32_t value_;
+
+ DISALLOW_COPY_AND_ASSIGN(Immediate);
+};
+
+
+class Operand {
+ public:
+ uint8_t mod() const {
+ return (encoding_at(0) >> 6) & 3;
+ }
+
+ Register rm() const {
+ return static_cast<Register>(encoding_at(0) & 7);
+ }
+
+ ScaleFactor scale() const {
+ return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+ }
+
+ Register index() const {
+ return static_cast<Register>((encoding_at(1) >> 3) & 7);
+ }
+
+ Register base() const {
+ return static_cast<Register>(encoding_at(1) & 7);
+ }
+
+ int8_t disp8() const {
+ CHECK_GE(length_, 2);
+ return static_cast<int8_t>(encoding_[length_ - 1]);
+ }
+
+ int32_t disp32() const {
+ CHECK_GE(length_, 5);
+ int32_t value;
+ memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+ return value;
+ }
+
+ bool IsRegister(Register reg) const {
+ return ((encoding_[0] & 0xF8) == 0xC0) // Addressing mode is register only.
+ && ((encoding_[0] & 0x07) == reg); // Register codes match.
+ }
+
+ protected:
+ // Operand can be sub classed (e.g: Address).
+ Operand() : length_(0) { }
+
+ void SetModRM(int mod, Register rm) {
+ CHECK_EQ(mod & ~3, 0);
+ encoding_[0] = (mod << 6) | rm;
+ length_ = 1;
+ }
+
+ void SetSIB(ScaleFactor scale, Register index, Register base) {
+ CHECK_EQ(length_, 1);
+ CHECK_EQ(scale & ~3, 0);
+ encoding_[1] = (scale << 6) | (index << 3) | base;
+ length_ = 2;
+ }
+
+ void SetDisp8(int8_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ encoding_[length_++] = static_cast<uint8_t>(disp);
+ }
+
+ void SetDisp32(int32_t disp) {
+ CHECK(length_ == 1 || length_ == 2);
+ int disp_size = sizeof(disp);
+ memmove(&encoding_[length_], &disp, disp_size);
+ length_ += disp_size;
+ }
+
+ private:
+ byte length_;
+ byte encoding_[6];
+ byte padding_;
+
+ explicit Operand(Register reg) { SetModRM(3, reg); }
+
+ // Get the operand encoding byte at the given index.
+ uint8_t encoding_at(int index) const {
+ CHECK_GE(index, 0);
+ CHECK_LT(index, length_);
+ return encoding_[index];
+ }
+
+ friend class X86_64Assembler;
+
+ DISALLOW_COPY_AND_ASSIGN(Operand);
+};
+
+
+class Address : public Operand {
+ public:
+ Address(Register base, int32_t disp) {
+ Init(base, disp);
+ }
+
+ Address(Register base, Offset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ Address(Register base, FrameOffset disp) {
+ CHECK_EQ(base, RSP);
+ Init(RSP, disp.Int32Value());
+ }
+
+ Address(Register base, MemberOffset disp) {
+ Init(base, disp.Int32Value());
+ }
+
+ void Init(Register base, int32_t disp) {
+ if (disp == 0 && base != RBP) {
+ SetModRM(0, base);
+ if (base == RSP) SetSIB(TIMES_1, RSP, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, base);
+ if (base == RSP) SetSIB(TIMES_1, RSP, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, base);
+ if (base == RSP) SetSIB(TIMES_1, RSP, base);
+ SetDisp32(disp);
+ }
+ }
+
+
+ Address(Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, RSP); // Illegal addressing mode.
+ SetModRM(0, RSP);
+ SetSIB(scale, index, RBP);
+ SetDisp32(disp);
+ }
+
+ Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+ CHECK_NE(index, RSP); // Illegal addressing mode.
+ if (disp == 0 && base != RBP) {
+ SetModRM(0, RSP);
+ SetSIB(scale, index, base);
+ } else if (disp >= -128 && disp <= 127) {
+ SetModRM(1, RSP);
+ SetSIB(scale, index, base);
+ SetDisp8(disp);
+ } else {
+ SetModRM(2, RSP);
+ SetSIB(scale, index, base);
+ SetDisp32(disp);
+ }
+ }
+
+ static Address Absolute(uword addr, bool has_rip = false) {
+ Address result;
+ if (has_rip) {
+ result.SetModRM(0, RSP);
+ result.SetSIB(TIMES_1, RSP, RBP);
+ result.SetDisp32(addr);
+ } else {
+ result.SetModRM(0, RBP);
+ result.SetDisp32(addr);
+ }
+ return result;
+ }
+
+ static Address Absolute(ThreadOffset addr, bool has_rip = false) {
+ return Absolute(addr.Int32Value(), has_rip);
+ }
+
+ private:
+ Address() {}
+
+ DISALLOW_COPY_AND_ASSIGN(Address);
+};
+
+
+class X86_64Assembler : public Assembler {
+ public:
+ X86_64Assembler() {}
+ virtual ~X86_64Assembler() {}
+
+ /*
+ * Emit Machine Instructions.
+ */
+ void call(Register reg);
+ void call(const Address& address);
+ void call(Label* label);
+
+ void pushq(Register reg);
+ void pushq(const Address& address);
+ void pushq(const Immediate& imm);
+
+ void popq(Register reg);
+ void popq(const Address& address);
+
+ void movq(Register dst, const Immediate& src);
+ void movl(Register dst, const Immediate& src);
+ void movq(Register dst, Register src);
+ void movl(Register dst, Register src);
+
+ void movq(Register dst, const Address& src);
+ void movl(Register dst, const Address& src);
+ void movq(const Address& dst, Register src);
+ void movl(const Address& dst, Register src);
+ void movl(const Address& dst, const Immediate& imm);
+ void movl(const Address& dst, Label* lbl);
+
+ void movzxb(Register dst, ByteRegister src);
+ void movzxb(Register dst, const Address& src);
+ void movsxb(Register dst, ByteRegister src);
+ void movsxb(Register dst, const Address& src);
+ void movb(Register dst, const Address& src);
+ void movb(const Address& dst, ByteRegister src);
+ void movb(const Address& dst, const Immediate& imm);
+
+ void movzxw(Register dst, Register src);
+ void movzxw(Register dst, const Address& src);
+ void movsxw(Register dst, Register src);
+ void movsxw(Register dst, const Address& src);
+ void movw(Register dst, const Address& src);
+ void movw(const Address& dst, Register src);
+
+ void leaq(Register dst, const Address& src);
+
+ void cmovl(Condition condition, Register dst, Register src);
+
+ void setb(Condition condition, Register dst);
+
+ void movss(XmmRegister dst, const Address& src);
+ void movss(const Address& dst, XmmRegister src);
+ void movss(XmmRegister dst, XmmRegister src);
+
+ void movd(XmmRegister dst, Register src);
+ void movd(Register dst, XmmRegister src);
+
+ void addss(XmmRegister dst, XmmRegister src);
+ void addss(XmmRegister dst, const Address& src);
+ void subss(XmmRegister dst, XmmRegister src);
+ void subss(XmmRegister dst, const Address& src);
+ void mulss(XmmRegister dst, XmmRegister src);
+ void mulss(XmmRegister dst, const Address& src);
+ void divss(XmmRegister dst, XmmRegister src);
+ void divss(XmmRegister dst, const Address& src);
+
+ void movsd(XmmRegister dst, const Address& src);
+ void movsd(const Address& dst, XmmRegister src);
+ void movsd(XmmRegister dst, XmmRegister src);
+
+ void addsd(XmmRegister dst, XmmRegister src);
+ void addsd(XmmRegister dst, const Address& src);
+ void subsd(XmmRegister dst, XmmRegister src);
+ void subsd(XmmRegister dst, const Address& src);
+ void mulsd(XmmRegister dst, XmmRegister src);
+ void mulsd(XmmRegister dst, const Address& src);
+ void divsd(XmmRegister dst, XmmRegister src);
+ void divsd(XmmRegister dst, const Address& src);
+
+ void cvtsi2ss(XmmRegister dst, Register src);
+ void cvtsi2sd(XmmRegister dst, Register src);
+
+ void cvtss2si(Register dst, XmmRegister src);
+ void cvtss2sd(XmmRegister dst, XmmRegister src);
+
+ void cvtsd2si(Register dst, XmmRegister src);
+ void cvtsd2ss(XmmRegister dst, XmmRegister src);
+
+ void cvttss2si(Register dst, XmmRegister src);
+ void cvttsd2si(Register dst, XmmRegister src);
+
+ void cvtdq2pd(XmmRegister dst, XmmRegister src);
+
+ void comiss(XmmRegister a, XmmRegister b);
+ void comisd(XmmRegister a, XmmRegister b);
+
+ void sqrtsd(XmmRegister dst, XmmRegister src);
+ void sqrtss(XmmRegister dst, XmmRegister src);
+
+ void xorpd(XmmRegister dst, const Address& src);
+ void xorpd(XmmRegister dst, XmmRegister src);
+ void xorps(XmmRegister dst, const Address& src);
+ void xorps(XmmRegister dst, XmmRegister src);
+
+ void andpd(XmmRegister dst, const Address& src);
+
+ void flds(const Address& src);
+ void fstps(const Address& dst);
+
+ void fldl(const Address& src);
+ void fstpl(const Address& dst);
+
+ void fnstcw(const Address& dst);
+ void fldcw(const Address& src);
+
+ void fistpl(const Address& dst);
+ void fistps(const Address& dst);
+ void fildl(const Address& src);
+
+ void fincstp();
+ void ffree(const Immediate& index);
+
+ void fsin();
+ void fcos();
+ void fptan();
+
+ void xchgl(Register dst, Register src);
+ void xchgl(Register reg, const Address& address);
+
+ void cmpl(Register reg, const Immediate& imm);
+ void cmpl(Register reg0, Register reg1);
+ void cmpl(Register reg, const Address& address);
+
+ void cmpl(const Address& address, Register reg);
+ void cmpl(const Address& address, const Immediate& imm);
+
+ void testl(Register reg1, Register reg2);
+ void testl(Register reg, const Immediate& imm);
+
+ void andl(Register dst, const Immediate& imm);
+ void andl(Register dst, Register src);
+
+ void orl(Register dst, const Immediate& imm);
+ void orl(Register dst, Register src);
+
+ void xorl(Register dst, Register src);
+
+ void addl(Register dst, Register src);
+ void addq(Register reg, const Immediate& imm);
+ void addl(Register reg, const Immediate& imm);
+ void addl(Register reg, const Address& address);
+
+ void addl(const Address& address, Register reg);
+ void addl(const Address& address, const Immediate& imm);
+
+ void adcl(Register dst, Register src);
+ void adcl(Register reg, const Immediate& imm);
+ void adcl(Register dst, const Address& address);
+
+ void subl(Register dst, Register src);
+ void subl(Register reg, const Immediate& imm);
+ void subl(Register reg, const Address& address);
+
+ void cdq();
+
+ void idivl(Register reg);
+
+ void imull(Register dst, Register src);
+ void imull(Register reg, const Immediate& imm);
+ void imull(Register reg, const Address& address);
+
+ void imull(Register reg);
+ void imull(const Address& address);
+
+ void mull(Register reg);
+ void mull(const Address& address);
+
+ void sbbl(Register dst, Register src);
+ void sbbl(Register reg, const Immediate& imm);
+ void sbbl(Register reg, const Address& address);
+
+ void incl(Register reg);
+ void incl(const Address& address);
+
+ void decl(Register reg);
+ void decl(const Address& address);
+
+ void shll(Register reg, const Immediate& imm);
+ void shll(Register operand, Register shifter);
+ void shrl(Register reg, const Immediate& imm);
+ void shrl(Register operand, Register shifter);
+ void sarl(Register reg, const Immediate& imm);
+ void sarl(Register operand, Register shifter);
+ void shld(Register dst, Register src);
+
+ void negl(Register reg);
+ void notl(Register reg);
+
+ void enter(const Immediate& imm);
+ void leave();
+
+ void ret();
+ void ret(const Immediate& imm);
+
+ void nop();
+ void int3();
+ void hlt();
+
+ void j(Condition condition, Label* label);
+
+ void jmp(Register reg);
+ void jmp(const Address& address);
+ void jmp(Label* label);
+
+ X86_64Assembler* lock();
+ void cmpxchgl(const Address& address, Register reg);
+
+ void mfence();
+
+ X86_64Assembler* gs();
+
+ //
+ // Macros for High-level operations.
+ //
+
+ void AddImmediate(Register reg, const Immediate& imm);
+
+ void LoadDoubleConstant(XmmRegister dst, double value);
+
+ void DoubleNegate(XmmRegister d);
+ void FloatNegate(XmmRegister f);
+
+ void DoubleAbs(XmmRegister reg);
+
+ void LockCmpxchgl(const Address& address, Register reg) {
+ lock()->cmpxchgl(address, reg);
+ }
+
+ //
+ // Misc. functionality
+ //
+ int PreferredLoopAlignment() { return 16; }
+ void Align(int alignment, int offset);
+ void Bind(Label* label);
+
+ //
+ // Overridden common assembler high-level functionality
+ //
+
+ // Emit code that will create an activation on the stack
+ virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+ const std::vector<ManagedRegister>& callee_save_regs,
+ const ManagedRegisterEntrySpills& entry_spills);
+
+ // Emit code that will remove an activation from the stack
+ virtual void RemoveFrame(size_t frame_size,
+ const std::vector<ManagedRegister>& callee_save_regs);
+
+ virtual void IncreaseFrameSize(size_t adjust);
+ virtual void DecreaseFrameSize(size_t adjust);
+
+ // Store routines
+ virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+ virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+ virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+ virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+ ManagedRegister scratch);
+
+ virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+ void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl);
+
+ virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+ FrameOffset in_off, ManagedRegister scratch);
+
+ // Load routines
+ virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+ virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+ virtual void LoadRef(ManagedRegister dest, FrameOffset src);
+
+ virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+ MemberOffset offs);
+
+ virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+ Offset offs);
+
+ virtual void LoadRawPtrFromThread(ManagedRegister dest,
+ ThreadOffset offs);
+
+ // Copying routines
+ virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+ virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch);
+
+ virtual void CopyRef(FrameOffset dest, FrameOffset src,
+ ManagedRegister scratch);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(ManagedRegister dest, Offset dest_offset,
+ ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+ ManagedRegister scratch, size_t size);
+
+ virtual void MemoryBarrier(ManagedRegister);
+
+ // Sign extension
+ virtual void SignExtend(ManagedRegister mreg, size_t size);
+
+ // Zero extension
+ virtual void ZeroExtend(ManagedRegister mreg, size_t size);
+
+ // Exploit fast access in managed code to Thread::Current()
+ virtual void GetCurrentThread(ManagedRegister tr);
+ virtual void GetCurrentThread(FrameOffset dest_offset,
+ ManagedRegister scratch);
+
+ // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed. in_reg holds a possibly stale reference
+ // that can be used to avoid loading the SIRT entry to see if the value is
+ // NULL.
+ virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+ ManagedRegister in_reg, bool null_allowed);
+
+ // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+ // value is null and null_allowed.
+ virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+ ManagedRegister scratch, bool null_allowed);
+
+ // src holds a SIRT entry (Object**) load this into dst
+ virtual void LoadReferenceFromSirt(ManagedRegister dst,
+ ManagedRegister src);
+
+ // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+ // know that src may not be null.
+ virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+ virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+ // Call to address held at [base+offset]
+ virtual void Call(ManagedRegister base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(FrameOffset base, Offset offset,
+ ManagedRegister scratch);
+ virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+ // Generate code to check if Thread::Current()->exception_ is non-null
+ // and branch to a ExceptionSlowPath if it is.
+ virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+
+ private:
+ inline void EmitUint8(uint8_t value);
+ inline void EmitInt32(int32_t value);
+ inline void EmitRegisterOperand(int rm, int reg);
+ inline void EmitXmmRegisterOperand(int rm, XmmRegister reg);
+ inline void EmitFixup(AssemblerFixup* fixup);
+ inline void EmitOperandSizeOverride();
+
+ void EmitOperand(int rm, const Operand& operand);
+ void EmitImmediate(const Immediate& imm);
+ void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
+ void EmitLabel(Label* label, int instruction_size);
+ void EmitLabelLink(Label* label);
+ void EmitNearLabelLink(Label* label);
+
+ void EmitGenericShift(int rm, Register reg, const Immediate& imm);
+ void EmitGenericShift(int rm, Register operand, Register shifter);
+ void rex(Register &dst, Register &src, size_t size = 4);
+ void rex_reg(Register &dst, size_t size = 4);
+ void rex_rm(Register &src, size_t size = 4);
+
+ DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
+};
+
+inline void X86_64Assembler::EmitUint8(uint8_t value) {
+ buffer_.Emit<uint8_t>(value);
+}
+
+inline void X86_64Assembler::EmitInt32(int32_t value) {
+ buffer_.Emit<int32_t>(value);
+}
+
+inline void X86_64Assembler::EmitRegisterOperand(int rm, int reg) {
+ CHECK_GE(rm, 0);
+ CHECK_LT(rm, 8);
+ buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
+}
+
+inline void X86_64Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) {
+ EmitRegisterOperand(rm, static_cast<Register>(reg));
+}
+
+inline void X86_64Assembler::EmitFixup(AssemblerFixup* fixup) {
+ buffer_.EmitFixup(fixup);
+}
+
+inline void X86_64Assembler::EmitOperandSizeOverride() {
+ EmitUint8(0x66);
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath : public SlowPath {
+ public:
+ explicit X86ExceptionSlowPath(size_t stack_adjust) : stack_adjust_(stack_adjust) {}
+ virtual void Emit(Assembler *sp_asm);
+ private:
+ const size_t stack_adjust_;
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_ASSEMBLER_X86_64_H_
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
new file mode 100644
index 0000000..df0d14e
--- /dev/null
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86_64.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(AssemblerX86_64, CreateBuffer) {
+ AssemblerBuffer buffer;
+ AssemblerBuffer::EnsureCapacity ensured(&buffer);
+ buffer.Emit<uint8_t>(0x42);
+ ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
+ buffer.Emit<int32_t>(42);
+ ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
+}
+
+} // namespace art
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
new file mode 100644
index 0000000..3340802
--- /dev/null
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_CONSTANTS_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_CONSTANTS_X86_64_H_
+
+#include <iosfwd>
+
+#include "arch/x86_64/registers_x86_64.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace x86_64 {
+
+enum ByteRegister {
+ AL = 0,
+ CL = 1,
+ DL = 2,
+ BL = 3,
+ AH = 4,
+ CH = 5,
+ DH = 6,
+ BH = 7,
+ kNoByteRegister = -1 // Signals an illegal register.
+};
+
+
+enum XmmRegister {
+ _XMM0 = 0,
+ _XMM1 = 1,
+ _XMM2 = 2,
+ _XMM3 = 3,
+ _XMM4 = 4,
+ _XMM5 = 5,
+ _XMM6 = 6,
+ _XMM7 = 7,
+ kNumberOfXmmRegisters = 8,
+ kNoXmmRegister = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+
+enum X87Register {
+ ST0 = 0,
+ ST1 = 1,
+ ST2 = 2,
+ ST3 = 3,
+ ST4 = 4,
+ ST5 = 5,
+ ST6 = 6,
+ ST7 = 7,
+ kNumberOfX87Registers = 8,
+ kNoX87Register = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const X87Register& reg);
+
+enum ScaleFactor {
+ TIMES_1 = 0,
+ TIMES_2 = 1,
+ TIMES_4 = 2,
+ TIMES_8 = 3
+};
+
+enum Condition {
+ kOverflow = 0,
+ kNoOverflow = 1,
+ kBelow = 2,
+ kAboveEqual = 3,
+ kEqual = 4,
+ kNotEqual = 5,
+ kBelowEqual = 6,
+ kAbove = 7,
+ kSign = 8,
+ kNotSign = 9,
+ kParityEven = 10,
+ kParityOdd = 11,
+ kLess = 12,
+ kGreaterEqual = 13,
+ kLessEqual = 14,
+ kGreater = 15,
+
+ kZero = kEqual,
+ kNotZero = kNotEqual,
+ kNegative = kSign,
+ kPositive = kNotSign
+};
+
+
+class Instr {
+ public:
+ static const uint8_t kHltInstruction = 0xF4;
+ // We prefer not to use the int3 instruction since it conflicts with gdb.
+ static const uint8_t kBreakPointInstruction = kHltInstruction;
+
+ bool IsBreakPoint() {
+ return (*reinterpret_cast<const uint8_t*>(this)) == kBreakPointInstruction;
+ }
+
+ // Instructions are read out of a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instr.
+ // Use the At(pc) function to create references to Instr.
+ static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+} // namespace x86_64
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_CONSTANTS_X86_64_H_
diff --git a/compiler/utils/x86_64/managed_register_x86_64.cc b/compiler/utils/x86_64/managed_register_x86_64.cc
new file mode 100644
index 0000000..057a894
--- /dev/null
+++ b/compiler/utils/x86_64/managed_register_x86_64.cc
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_x86_64.h"
+
+#include "globals.h"
+
+namespace art {
+namespace x86_64 {
+
+// Define register pairs.
+// This list must be kept in sync with the RegisterPair enum.
+#define REGISTER_PAIR_LIST(P) \
+ P(RAX, RDX) \
+ P(RAX, RCX) \
+ P(RAX, RBX) \
+ P(RAX, RDI) \
+ P(RDX, RCX) \
+ P(RDX, RBX) \
+ P(RDX, RDI) \
+ P(RCX, RBX) \
+ P(RCX, RDI) \
+ P(RBX, RDI)
+
+
+struct RegisterPairDescriptor {
+ RegisterPair reg; // Used to verify that the enum is in sync.
+ Register low;
+ Register high;
+};
+
+
+static const RegisterPairDescriptor kRegisterPairs[] = {
+#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high },
+ REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION)
+#undef REGISTER_PAIR_ENUMERATION
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+ os << X86_64ManagedRegister::FromRegisterPair(reg);
+ return os;
+}
+
+bool X86_64ManagedRegister::Overlaps(const X86_64ManagedRegister& other) const {
+ if (IsNoRegister() || other.IsNoRegister()) return false;
+ CHECK(IsValidManagedRegister());
+ CHECK(other.IsValidManagedRegister());
+ if (Equals(other)) return true;
+ if (IsRegisterPair()) {
+ Register low = AsRegisterPairLow();
+ Register high = AsRegisterPairHigh();
+ return X86_64ManagedRegister::FromCpuRegister(low).Overlaps(other) ||
+ X86_64ManagedRegister::FromCpuRegister(high).Overlaps(other);
+ }
+ if (other.IsRegisterPair()) {
+ return other.Overlaps(*this);
+ }
+ return false;
+}
+
+
+int X86_64ManagedRegister::AllocIdLow() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].low;
+}
+
+
+int X86_64ManagedRegister::AllocIdHigh() const {
+ CHECK(IsRegisterPair());
+ const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds);
+ CHECK_EQ(r, kRegisterPairs[r].reg);
+ return kRegisterPairs[r].high;
+}
+
+
+void X86_64ManagedRegister::Print(std::ostream& os) const {
+ if (!IsValidManagedRegister()) {
+ os << "No Register";
+ } else if (IsXmmRegister()) {
+ os << "XMM: " << static_cast<int>(AsXmmRegister());
+ } else if (IsX87Register()) {
+ os << "X87: " << static_cast<int>(AsX87Register());
+ } else if (IsCpuRegister()) {
+ os << "CPU: " << static_cast<int>(AsCpuRegister());
+ } else if (IsRegisterPair()) {
+ os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+ } else {
+ os << "??: " << RegId();
+ }
+}
+
+std::ostream& operator<<(std::ostream& os, const X86_64ManagedRegister& reg) {
+ reg.Print(os);
+ return os;
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/compiler/utils/x86_64/managed_register_x86_64.h b/compiler/utils/x86_64/managed_register_x86_64.h
new file mode 100644
index 0000000..d68c59d
--- /dev/null
+++ b/compiler/utils/x86_64/managed_register_x86_64.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
+#define ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
+
+#include "constants_x86_64.h"
+#include "utils/managed_register.h"
+
+namespace art {
+namespace x86_64 {
+
+// Values for register pairs.
+// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs.
+// The table kRegisterPairs in x86.cc must be kept in sync with this enum.
+enum RegisterPair {
+ RAX_RDX = 0,
+ RAX_RCX = 1,
+ RAX_RBX = 2,
+ RAX_RDI = 3,
+ RDX_RCX = 4,
+ RDX_RBX = 5,
+ RDX_RDI = 6,
+ RCX_RBX = 7,
+ RCX_RDI = 8,
+ RBX_RDI = 9,
+ kNumberOfRegisterPairs = 10,
+ kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCpuRegIds = kNumberOfCpuRegisters;
+const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters;
+
+const int kNumberOfXmmRegIds = kNumberOfXmmRegisters;
+const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters;
+
+const int kNumberOfX87RegIds = kNumberOfX87Registers;
+const int kNumberOfX87AllocIds = kNumberOfX87Registers;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
+ kNumberOfX87RegIds;
+
+// Register ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// [S..P[ register pairs (enum RegisterPair)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+// P = X + kNumberOfRegisterPairs
+
+// Allocation ids map:
+// [0..R[ cpu registers (enum Register)
+// [R..X[ xmm registers (enum XmmRegister)
+// [X..S[ x87 registers (enum X87Register)
+// where
+// R = kNumberOfCpuRegIds
+// X = R + kNumberOfXmmRegIds
+// S = X + kNumberOfX87RegIds
+
+
+// An instance of class 'ManagedRegister' represents a single cpu register (enum
+// Register), an xmm register (enum XmmRegister), or a pair of cpu registers
+// (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class X86_64ManagedRegister : public ManagedRegister {
+ public:
+ ByteRegister AsByteRegister() const {
+ CHECK(IsCpuRegister());
+ CHECK_LT(AsCpuRegister(), RSP); // RSP, RBP, ESI and RDI cannot be encoded as byte registers.
+ return static_cast<ByteRegister>(id_);
+ }
+
+ Register AsCpuRegister() const {
+ CHECK(IsCpuRegister());
+ return static_cast<Register>(id_);
+ }
+
+ XmmRegister AsXmmRegister() const {
+ CHECK(IsXmmRegister());
+ return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
+ }
+
+ X87Register AsX87Register() const {
+ CHECK(IsX87Register());
+ return static_cast<X87Register>(id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds));
+ }
+
+ Register AsRegisterPairLow() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdLow().
+ return FromRegId(AllocIdLow()).AsCpuRegister();
+ }
+
+ Register AsRegisterPairHigh() const {
+ CHECK(IsRegisterPair());
+ // Appropriate mapping of register ids allows to use AllocIdHigh().
+ return FromRegId(AllocIdHigh()).AsCpuRegister();
+ }
+
+ bool IsCpuRegister() const {
+ CHECK(IsValidManagedRegister());
+ return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
+ }
+
+ bool IsXmmRegister() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - kNumberOfCpuRegIds;
+ return (0 <= test) && (test < kNumberOfXmmRegIds);
+ }
+
+ bool IsX87Register() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ return (0 <= test) && (test < kNumberOfX87RegIds);
+ }
+
+ bool IsRegisterPair() const {
+ CHECK(IsValidManagedRegister());
+ const int test = id_ -
+ (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
+ return (0 <= test) && (test < kNumberOfPairRegIds);
+ }
+
+ void Print(std::ostream& os) const;
+
+ // Returns true if the two managed-registers ('this' and 'other') overlap.
+ // Either managed-register may be the NoRegister. If both are the NoRegister
+ // then false is returned.
+ bool Overlaps(const X86_64ManagedRegister& other) const;
+
+ static X86_64ManagedRegister FromCpuRegister(Register r) {
+ CHECK_NE(r, kNoRegister);
+ return FromRegId(r);
+ }
+
+ static X86_64ManagedRegister FromXmmRegister(XmmRegister r) {
+ CHECK_NE(r, kNoXmmRegister);
+ return FromRegId(r + kNumberOfCpuRegIds);
+ }
+
+ static X86_64ManagedRegister FromX87Register(X87Register r) {
+ CHECK_NE(r, kNoX87Register);
+ return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+ }
+
+ static X86_64ManagedRegister FromRegisterPair(RegisterPair r) {
+ CHECK_NE(r, kNoRegisterPair);
+ return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+ kNumberOfX87RegIds));
+ }
+
+ private:
+ bool IsValidManagedRegister() const {
+ return (0 <= id_) && (id_ < kNumberOfRegIds);
+ }
+
+ int RegId() const {
+ CHECK(!IsNoRegister());
+ return id_;
+ }
+
+ int AllocId() const {
+ CHECK(IsValidManagedRegister() && !IsRegisterPair());
+ CHECK_LT(id_, kNumberOfAllocIds);
+ return id_;
+ }
+
+ int AllocIdLow() const;
+ int AllocIdHigh() const;
+
+ friend class ManagedRegister;
+
+ explicit X86_64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+ static X86_64ManagedRegister FromRegId(int reg_id) {
+ X86_64ManagedRegister reg(reg_id);
+ CHECK(reg.IsValidManagedRegister());
+ return reg;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const X86_64ManagedRegister& reg);
+
+} // namespace x86_64
+
+inline x86_64::X86_64ManagedRegister ManagedRegister::AsX86_64() const {
+ x86_64::X86_64ManagedRegister reg(id_);
+ CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+ return reg;
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_X86_64_MANAGED_REGISTER_X86_64_H_
diff --git a/compiler/utils/x86_64/managed_register_x86_64_test.cc b/compiler/utils/x86_64/managed_register_x86_64_test.cc
new file mode 100644
index 0000000..2dc7581
--- /dev/null
+++ b/compiler/utils/x86_64/managed_register_x86_64_test.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_x86_64.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace x86_64 {
+
+TEST(X86_64ManagedRegister, NoRegister) {
+ X86_64ManagedRegister reg = ManagedRegister::NoRegister().AsX86();
+ EXPECT_TRUE(reg.IsNoRegister());
+ EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(X86_64ManagedRegister, CpuRegister) {
+ X86_64ManagedRegister reg = X86_64ManagedRegister::FromCpuRegister(RAX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(RAX, reg.AsCpuRegister());
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(RBX, reg.AsCpuRegister());
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RCX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(RCX, reg.AsCpuRegister());
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(RDI, reg.AsCpuRegister());
+}
+
+TEST(X86_64ManagedRegister, XmmRegister) {
+ X86_64ManagedRegister reg = X86_64ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM0, reg.AsXmmRegister());
+
+ reg = X86_64ManagedRegister::FromXmmRegister(XMM1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM1, reg.AsXmmRegister());
+
+ reg = X86_64ManagedRegister::FromXmmRegister(XMM7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(XMM7, reg.AsXmmRegister());
+}
+
+TEST(X86_64ManagedRegister, X87Register) {
+ X86_64ManagedRegister reg = X86_64ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST0, reg.AsX87Register());
+
+ reg = X86_64ManagedRegister::FromX87Register(ST1);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST1, reg.AsX87Register());
+
+ reg = X86_64ManagedRegister::FromX87Register(ST7);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(reg.IsX87Register());
+ EXPECT_TRUE(!reg.IsRegisterPair());
+ EXPECT_EQ(ST7, reg.AsX87Register());
+}
+
+TEST(X86_64ManagedRegister, RegisterPair) {
+ X86_64ManagedRegister reg = X86_64ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RDX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EAX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RCX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EAX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RBX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EAX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RAX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RDI, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RCX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EDX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RBX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EDX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RDX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RDI, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(ECX_EBX);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RCX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RBX, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(ECX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RCX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RDI, reg.AsRegisterPairHigh());
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.IsNoRegister());
+ EXPECT_TRUE(!reg.IsCpuRegister());
+ EXPECT_TRUE(!reg.IsXmmRegister());
+ EXPECT_TRUE(!reg.IsX87Register());
+ EXPECT_TRUE(reg.IsRegisterPair());
+ EXPECT_EQ(RBX, reg.AsRegisterPairLow());
+ EXPECT_EQ(RDI, reg.AsRegisterPairHigh());
+}
+
+TEST(X86_64ManagedRegister, Equals) {
+ X86_64ManagedRegister reg_eax = X86_64ManagedRegister::FromCpuRegister(RAX);
+ EXPECT_TRUE(reg_eax.Equals(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_eax.Equals(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86_64ManagedRegister reg_xmm0 = X86_64ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(reg_xmm0.Equals(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_xmm0.Equals(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86_64ManagedRegister reg_st0 = X86_64ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg_st0.Equals(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_st0.Equals(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ X86_64ManagedRegister reg_pair = X86_64ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg_pair.Equals(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg_pair.Equals(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+}
+
+TEST(X86_64ManagedRegister, Overlaps) {
+ X86_64ManagedRegister reg = X86_64ManagedRegister::FromCpuRegister(RAX);
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RDX);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RDI);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromCpuRegister(RBX);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromXmmRegister(XMM0);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromX87Register(ST0);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EAX_EDX);
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EDX_ECX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EBX_EDI);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EDX_EBX)));
+
+ reg = X86_64ManagedRegister::FromRegisterPair(EDX_ECX);
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RAX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RBX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromCpuRegister(RDI)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromXmmRegister(XMM7)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST0)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromX87Register(ST7)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EAX_EDX)));
+ EXPECT_TRUE(!reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EBX_EDI)));
+ EXPECT_TRUE(reg.Overlaps(X86_64ManagedRegister::FromRegisterPair(EDX_EBX)));
+}
+
+} // namespace x86_64
+} // namespace art
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index ca2489c..d03a474 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -29,7 +29,7 @@
// Clang's as(1) uses $0, $1, and so on for macro arguments.
#define VAR(name,index) SYMBOL($index)
- #define PLT_VAR(name, index) SYMBOL($index)
+ #define PLT_VAR(name, index) SYMBOL($index)@PLT
#define REG_VAR(name,index) %$index
#define CALL_MACRO(name,index) $index
#define FUNCTION_TYPE(name,index) .type $index, @function
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5fbf8cb..0d75a89 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -142,8 +142,13 @@
MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name, 0)
- int3
- int3
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ // Outgoing argument set up
+ mov %rsp, %rdx // pass SP
+ mov %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ mov %rax, %rdi // pass arg1
+ call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ int3 // unreached
END_FUNCTION VAR(c_name, 0)
END_MACRO
diff --git a/runtime/arch/x86_64/registers_x86_64.h b/runtime/arch/x86_64/registers_x86_64.h
index 8b0dc07..b9d06b5 100644
--- a/runtime/arch/x86_64/registers_x86_64.h
+++ b/runtime/arch/x86_64/registers_x86_64.h
@@ -67,7 +67,7 @@
XMM15 = 15,
kNumberOfFloatRegisters = 16
};
-std::ostream& operator<<(std::ostream& os, const FloatRegister& rhs);
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
} // namespace x86_64
} // namespace art