summaryrefslogtreecommitdiff
path: root/compiler
diff options
context:
space:
mode:
author Andreas Gampe <agampe@google.com> 2014-03-20 00:20:21 +0000
committer Gerrit Code Review <noreply-gerritcodereview@google.com> 2014-03-20 00:20:21 +0000
commitba970e18e2e7cd19bb5c3672bff2eae0c11b1795 (patch)
treed9c76db71eb8dd3a668643781eefe6152888fd1d /compiler
parent66e4c3e96dccdec7423d673ad6bbf7821a776651 (diff)
parentb95a5345ae4217b70ca36f0cced92f68dda7caf5 (diff)
Merge "AArch64: Add arm64 runtime support."
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/common_compiler_test.h4
-rw-r--r--compiler/compiled_method.cc4
-rw-r--r--compiler/dex/frontend.cc4
-rw-r--r--compiler/driver/compiler_driver.cc2
-rw-r--r--compiler/elf_writer_quick.cc5
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.cc245
-rw-r--r--compiler/jni/quick/arm64/calling_convention_arm64.h88
-rw-r--r--compiler/jni/quick/calling_convention.cc5
-rw-r--r--compiler/trampolines/trampoline_compiler.cc43
-rw-r--r--compiler/utils/arm64/assembler_arm64.h1
-rw-r--r--compiler/utils/arm64/managed_register_arm64.h2
-rw-r--r--compiler/utils/assembler.h3
13 files changed, 403 insertions, 4 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index bcd120b413..4eb9ff58f3 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -66,6 +66,7 @@ LIBART_COMPILER_SRC_FILES := \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
jni/quick/arm/calling_convention_arm.cc \
+ jni/quick/arm64/calling_convention_arm64.cc \
jni/quick/mips/calling_convention_mips.cc \
jni/quick/x86/calling_convention_x86.cc \
jni/quick/calling_convention.cc \
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 49c1283809..6aa85d40de 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -300,6 +300,10 @@ class CommonCompilerTest : public CommonRuntimeTest {
// for ARM, do a runtime check to make sure that the features we are passed from
// the build match the features we actually determine at runtime.
ASSERT_EQ(instruction_set_features, runtime_features);
+#elif defined(__aarch64__)
+ instruction_set = kArm64;
+ // TODO: arm64 compilation support.
+ compiler_options_->SetCompilerFilter(CompilerOptions::kInterpretOnly);
#elif defined(__mips__)
instruction_set = kMips;
#elif defined(__i386__)
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 17c2e94652..344f3ef745 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -86,6 +86,8 @@ uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set
case kArm:
case kThumb2:
return RoundUp(offset, kArmAlignment);
+ case kArm64:
+ return RoundUp(offset, kArm64Alignment);
case kMips:
return RoundUp(offset, kMipsAlignment);
case kX86: // Fall-through.
@@ -100,6 +102,7 @@ uint32_t CompiledCode::AlignCode(uint32_t offset, InstructionSet instruction_set
size_t CompiledCode::CodeDelta() const {
switch (instruction_set_) {
case kArm:
+ case kArm64:
case kMips:
case kX86:
return 0;
@@ -117,6 +120,7 @@ const void* CompiledCode::CodePointer(const void* code_pointer,
InstructionSet instruction_set) {
switch (instruction_set) {
case kArm:
+ case kArm64:
case kMips:
case kX86:
return code_pointer;
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 5a26064414..7890d81236 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -157,9 +157,9 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
cu.compiler_driver = &driver;
cu.class_linker = class_linker;
cu.instruction_set = driver.GetInstructionSet();
- cu.target64 = cu.instruction_set == kX86_64;
+ cu.target64 = (cu.instruction_set == kX86_64) || (cu.instruction_set == kArm64);
cu.compiler = compiler;
- // TODO: x86_64 is not yet implemented.
+ // TODO: x86_64 & arm64 are not yet implemented.
DCHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kX86) ||
(cu.instruction_set == kMips));
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e601a1b6b6..59754d5a50 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1871,7 +1871,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t
if ((access_flags & kAccNative) != 0) {
// Are we interpreting only and have support for generic JNI down calls?
if ((compiler_options_->GetCompilerFilter() == CompilerOptions::kInterpretOnly) &&
- (instruction_set_ == kX86_64)) {
+ (instruction_set_ == kX86_64 || instruction_set_ == kArm64)) {
// Leaving this empty will trigger the generic JNI version
} else {
compiled_method = compiler_->JniCompile(*this, access_flags, method_idx, dex_file);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index a6daa5d00d..f6a324f8e8 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -372,6 +372,11 @@ bool ElfWriterQuick::Write(OatWriter* oat_writer,
elf_header.e_flags = EF_ARM_EABI_VER5;
break;
}
+ case kArm64: {
+ elf_header.e_machine = EM_AARCH64;
+ elf_header.e_flags = 0;
+ break;
+ }
case kX86: {
elf_header.e_machine = EM_386;
elf_header.e_flags = 0;
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
new file mode 100644
index 0000000000..c4d0d451c0
--- /dev/null
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "calling_convention_arm64.h"
+#include "utils/arm64/managed_register_arm64.h"
+
+namespace art {
+namespace arm64 {
+
+// Calling convention
+
+ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return Arm64ManagedRegister::FromCoreRegister(IP0); // X16
+}
+
+ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
+ return Arm64ManagedRegister::FromCoreRegister(IP0); // X16
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+ if (shorty[0] == 'F') {
+ return Arm64ManagedRegister::FromSRegister(S0);
+ } else if (shorty[0] == 'D') {
+ return Arm64ManagedRegister::FromDRegister(D0);
+ } else if (shorty[0] == 'J') {
+ return Arm64ManagedRegister::FromCoreRegister(X0);
+ } else if (shorty[0] == 'V') {
+ return Arm64ManagedRegister::NoRegister();
+ } else {
+ return Arm64ManagedRegister::FromWRegister(W0);
+ }
+}
+
+ManagedRegister Arm64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Arm64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Arm64JniCallingConvention::IntReturnRegister() {
+ return Arm64ManagedRegister::FromWRegister(W0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Arm64ManagedRuntimeCallingConvention::MethodRegister() {
+ return Arm64ManagedRegister::FromCoreRegister(X0);
+}
+
+bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything moved to stack on entry.
+}
+
+bool Arm64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return true;
+}
+
+ManagedRegister Arm64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ FrameOffset result =
+ FrameOffset(displacement_.Int32Value() + // displacement
+ kPointerSize + // Method*
+ (itr_slots_ * kPointerSize)); // offset into in args
+ return result;
+}
+
+const std::vector<ManagedRegister>& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on ARM64 to free them up for scratch use, we then assume
+ // all arguments are on the stack.
+ if (entry_spills_.size() == 0) {
+ // TODO Need fp regs spilled too.
+ //
+ size_t num_spills = NumArgs();
+
+ // TODO Floating point need spilling too.
+ if (num_spills > 0) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X1));
+ if (num_spills > 1) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X2));
+ if (num_spills > 2) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X3));
+ if (num_spills > 3) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X5));
+ if (num_spills > 4) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X6));
+ if (num_spills > 5) {
+ entry_spills_.push_back(Arm64ManagedRegister::FromCoreRegister(X7));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return entry_spills_;
+}
+// JNI calling convention
+
+Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty) {
+ // TODO This needs to be converted to 64bit.
+ // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+ // or jclass for static methods and the JNIEnv. We start at the aligned register r2.
+// size_t padding = 0;
+// for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+// if (IsParamALongOrDouble(cur_arg)) {
+// if ((cur_reg & 1) != 0) {
+// padding += 4;
+// cur_reg++; // additional bump to ensure alignment
+// }
+// cur_reg++; // additional bump to skip extra long word
+// }
+// cur_reg++; // bump the iterator for every argument
+// }
+// padding_ =0;
+
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X19));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X20));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X21));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X22));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X23));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X24));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X25));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X26));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X27));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X28));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X29));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromCoreRegister(X30));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D8));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D9));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D10));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D11));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D12));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D13));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D14));
+ callee_save_regs_.push_back(Arm64ManagedRegister::FromDRegister(D15));
+}
+
+uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
+ // Compute spill mask to agree with callee saves initialized in the constructor
+ uint32_t result = 0;
+ result = 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 | 1 << X25
+ | 1 << X26 | 1 << X27 | 1 << X28 | 1<< X29 | 1 << LR;
+ return result;
+}
+
+ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
+ return Arm64ManagedRegister::FromCoreRegister(X9);
+}
+
+size_t Arm64JniCallingConvention::FrameSize() {
+ // Method*, LR and callee save area size, local reference segment state
+ size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+ // References plus 2 words for SIRT header
+ size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t Arm64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+ kStackAlignment);
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void Arm64JniCallingConvention::Next() {
+ JniCallingConvention::Next();
+ size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ if ((itr_args_ >= 2) &&
+ (arg_pos < NumArgs()) &&
+ IsParamALongOrDouble(arg_pos)) {
+ // itr_slots_ needs to be an even number, according to AAPCS.
+ if ((itr_slots_ & 0x1u) != 0) {
+ itr_slots_++;
+ }
+ }
+}
+
+bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
+ return itr_slots_ < 4;
+}
+
+bool Arm64JniCallingConvention::IsCurrentParamOnStack() {
+ return !IsCurrentParamInRegister();
+}
+
+// TODO and floating point?
+
+static const Register kJniArgumentRegisters[] = {
+ X0, X1, X2, X3, X4, X5, X6, X7
+};
+ManagedRegister Arm64JniCallingConvention::CurrentParamRegister() {
+ CHECK_LT(itr_slots_, 4u);
+ int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+ // TODO Floating point & 64bit registers.
+ if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+ CHECK_EQ(itr_slots_, 2u);
+ return Arm64ManagedRegister::FromCoreRegister(X1);
+ } else {
+ return
+ Arm64ManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+ }
+}
+
+FrameOffset Arm64JniCallingConvention::CurrentParamStackOffset() {
+ CHECK_GE(itr_slots_, 4u);
+ size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kPointerSize);
+ CHECK_LT(offset, OutArgSize());
+ return FrameOffset(offset);
+}
+
+size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ size_t static_args = IsStatic() ? 1 : 0; // count jclass
+ // regular argument parameters and this
+ size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+ // count JNIEnv* less arguments in registers
+ return static_args + param_args + 1 - 4;
+}
+
+} // namespace arm64
+} // namespace art
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
new file mode 100644
index 0000000000..b4d050275d
--- /dev/null
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+#define ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace arm64 {
+
+class Arm64ManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+ Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+ virtual ~Arm64ManagedRuntimeCallingConvention() {}
+ // Calling convention
+ virtual ManagedRegister ReturnRegister();
+ virtual ManagedRegister InterproceduralScratchRegister();
+ // Managed runtime calling convention
+ virtual ManagedRegister MethodRegister();
+ virtual bool IsCurrentParamInRegister();
+ virtual bool IsCurrentParamOnStack();
+ virtual ManagedRegister CurrentParamRegister();
+ virtual FrameOffset CurrentParamStackOffset();
+ virtual const std::vector<ManagedRegister>& EntrySpills();
+
+ private:
+ std::vector<ManagedRegister> entry_spills_;
+
+ DISALLOW_COPY_AND_ASSIGN(Arm64ManagedRuntimeCallingConvention);
+};
+
+class Arm64JniCallingConvention : public JniCallingConvention {
+ public:
+ explicit Arm64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ virtual ~Arm64JniCallingConvention() {}
+ // Calling convention
+ virtual ManagedRegister ReturnRegister();
+ virtual ManagedRegister IntReturnRegister();
+ virtual ManagedRegister InterproceduralScratchRegister();
+ // JNI calling convention
+ virtual void Next(); // Override default behavior for AAPCS
+ virtual size_t FrameSize();
+ virtual size_t OutArgSize();
+ virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+ return callee_save_regs_;
+ }
+ virtual ManagedRegister ReturnScratchRegister() const;
+ virtual uint32_t CoreSpillMask() const;
+ virtual uint32_t FpSpillMask() const {
+ return 0; // Floats aren't spilled in JNI down call
+ }
+ virtual bool IsCurrentParamInRegister();
+ virtual bool IsCurrentParamOnStack();
+ virtual ManagedRegister CurrentParamRegister();
+ virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+ virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ // Padding to ensure longs and doubles are not split in AAPCS
+ size_t padding_;
+
+ DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
+};
+
+} // namespace arm64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_ARM64_CALLING_CONVENTION_ARM64_H_
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index ac962af9e6..5856df4bc1 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -18,6 +18,7 @@
#include "base/logging.h"
#include "jni/quick/arm/calling_convention_arm.h"
+#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
#include "jni/quick/x86/calling_convention_x86.h"
#include "utils.h"
@@ -37,6 +38,8 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
case kArm:
case kThumb2:
return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kArm64:
+ return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
@@ -91,6 +94,8 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
case kArm:
case kThumb2:
return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+ case kArm64:
+ return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 3e13e44397..4dffef9f05 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -18,6 +18,7 @@
#include "jni_internal.h"
#include "utils/arm/assembler_arm.h"
+#include "utils/arm64/assembler_arm64.h"
#include "utils/mips/assembler_mips.h"
#include "utils/x86/assembler_x86.h"
@@ -53,6 +54,46 @@ static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention
}
} // namespace arm
+namespace arm64 {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+ ThreadOffset offset) {
+ UniquePtr<Arm64Assembler> assembler(static_cast<Arm64Assembler*>(Assembler::Create(kArm64)));
+
+ switch (abi) {
+ case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
+ // FIXME IPx used by VIXL - this is unsafe.
+ __ Call(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()),
+ Arm64ManagedRegister::FromCoreRegister(IP1));
+
+ break;
+ case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (X0).
+
+ __ LoadRawPtr(Arm64ManagedRegister::FromCoreRegister(IP1),
+ Arm64ManagedRegister::FromCoreRegister(X0),
+ Offset(JNIEnvExt::SelfOffset().Int32Value()));
+
+ // FIXME IPx used by VIXL - this is unsafe.
+ __ Call(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()),
+ Arm64ManagedRegister::FromCoreRegister(IP0));
+
+ break;
+ case kPortableAbi: // X18 holds Thread*.
+ case kQuickAbi: // Fall-through.
+ __ Call(Arm64ManagedRegister::FromCoreRegister(TR), Offset(offset.Int32Value()),
+ Arm64ManagedRegister::FromCoreRegister(IP0));
+
+ break;
+ }
+
+ size_t cs = assembler->CodeSize();
+ UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+ MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+ assembler->FinalizeInstructions(code);
+
+ return entry_stub.release();
+}
+} // namespace arm64
+
namespace mips {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset offset) {
@@ -123,6 +164,8 @@ const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCalli
case kArm:
case kThumb2:
return arm::CreateTrampoline(abi, offset);
+ case kArm64:
+ return arm64::CreateTrampoline(abi, offset);
case kMips:
return mips::CreateTrampoline(abi, offset);
case kX86:
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 70df252114..2bada3fc9e 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
#include <vector>
+#include <stdint.h>
#include "base/logging.h"
#include "constants_arm64.h"
diff --git a/compiler/utils/arm64/managed_register_arm64.h b/compiler/utils/arm64/managed_register_arm64.h
index 5df37cc12e..80f17f5eb1 100644
--- a/compiler/utils/arm64/managed_register_arm64.h
+++ b/compiler/utils/arm64/managed_register_arm64.h
@@ -24,7 +24,7 @@
namespace art {
namespace arm64 {
-const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreRegIds = 32;
const int kNumberOfWRegIds = kNumberOfWRegisters;
const int kNumberOfDRegIds = kNumberOfDRegisters;
const int kNumberOfSRegIds = kNumberOfSRegisters;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index f02c20f208..cd4fc12e33 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -38,6 +38,9 @@ class AssemblerFixup;
namespace arm {
class ArmAssembler;
}
+namespace arm64 {
+ class Arm64Assembler;
+}
namespace mips {
class MipsAssembler;
}