Trampoline and assembly fixes for ARM64
Trampolines need a jump, not a call. Expose br in the ARM64
assembler to allow this.
The resolution trampoline is called with the Quick ABI, and will
continue to a Quick ABI function. Then the method pointer must be
in x0.
Change-Id: I4e383b59d6c40a659d324a7faef3fadf0c890178
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 4dffef9..32980cb 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -62,7 +62,7 @@
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
// FIXME IPx used by VIXL - this is unsafe.
- __ Call(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()),
+ __ JumpTo(Arm64ManagedRegister::FromCoreRegister(X0), Offset(offset.Int32Value()),
Arm64ManagedRegister::FromCoreRegister(IP1));
break;
@@ -73,13 +73,13 @@
Offset(JNIEnvExt::SelfOffset().Int32Value()));
// FIXME IPx used by VIXL - this is unsafe.
- __ Call(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()),
+ __ JumpTo(Arm64ManagedRegister::FromCoreRegister(IP1), Offset(offset.Int32Value()),
Arm64ManagedRegister::FromCoreRegister(IP0));
break;
case kPortableAbi: // X18 holds Thread*.
case kQuickAbi: // Fall-through.
- __ Call(Arm64ManagedRegister::FromCoreRegister(TR), Offset(offset.Int32Value()),
+ __ JumpTo(Arm64ManagedRegister::FromCoreRegister(TR), Offset(offset.Int32Value()),
Arm64ManagedRegister::FromCoreRegister(IP0));
break;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index b364ba0..00ce923 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -468,6 +468,15 @@
___ Blr(reg_x(scratch.AsCoreRegister()));
}
+void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+ Arm64ManagedRegister base = m_base.AsArm64();
+ Arm64ManagedRegister scratch = m_scratch.AsArm64();
+ CHECK(base.IsCoreRegister()) << base;
+ CHECK(scratch.IsCoreRegister()) << scratch;
+ LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
+ ___ Br(reg_x(scratch.AsCoreRegister()));
+}
+
void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 2bada3f..1c47e77 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -204,6 +204,9 @@
void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
void Call(ThreadOffset offset, ManagedRegister scratch);
+ // Jump to address (not setting link register)
+ void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
+
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);