summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author Chris Jones <christopher.jones@arm.com> 2024-01-05 16:30:56 +0000
committer VladimĂ­r Marko <vmarko@google.com> 2024-08-01 14:13:33 +0000
commit3ea38e1d3e11417bb7f86a5dcd24187c299dcd73 (patch)
tree8cc043db0fde48cb70f19b065a2fbb0edb46606f
parent0a8d0cd9a4648619fd910964cfe1d6101d881968 (diff)
Rework exception delivery and deoptimization
Both exception delivery (various methods calling Thread::QuickDeliverException()) and deoptimization (via artDeoptimizeImpl) use QuickExceptionHandler to find the target context and do a long jump to it via QuickExceptionHandler::DoLongJump. The long jump is done directly from the C++ code, so the frames of the related C++ method are still on the stack before the change of the pc. Note that all those methods are marked as NO_RETURN to reflect that. This patch changes the approach; instead of having the long jump directly from the C++ methods related to exceptions and deoptimization, those methods now only prepare the long jump context and return. So their callers (mainly .S quick entry points and stubs) now need to do a long jump explicitly; thus there will be no C++ frames on the stack before the jump. This approach makes it possible to support exceptions and deoptimization in simulator mode; so we don't need to unwind native (C++ methods' frames) and simulated stacks at the same. Authors: Artem Serov <artem.serov@linaro.org>, Chris Jones <christopher.jones@arm.com> Test: test.py --host --target Change-Id: I5f90e6b5ba152fc2205728f1e814bbe3d609af9d
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--runtime/arch/arm/asm_support_arm.S2
-rw-r--r--runtime/arch/arm/asm_support_arm.h7
-rw-r--r--runtime/arch/arm/context_arm.cc9
-rw-r--r--runtime/arch/arm/context_arm.h2
-rw-r--r--runtime/arch/arm/jni_entrypoints_arm.S8
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S100
-rw-r--r--runtime/arch/arm64/asm_support_arm64.S1
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h7
-rw-r--r--runtime/arch/arm64/context_arm64.cc10
-rw-r--r--runtime/arch/arm64/context_arm64.h2
-rw-r--r--runtime/arch/arm64/jni_entrypoints_arm64.S8
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S171
-rw-r--r--runtime/arch/context.cc6
-rw-r--r--runtime/arch/context.h8
-rw-r--r--runtime/arch/riscv64/asm_support_riscv64.S5
-rw-r--r--runtime/arch/riscv64/asm_support_riscv64.h7
-rw-r--r--runtime/arch/riscv64/context_riscv64.cc8
-rw-r--r--runtime/arch/riscv64/context_riscv64.h2
-rw-r--r--runtime/arch/riscv64/jni_entrypoints_riscv64.S6
-rw-r--r--runtime/arch/riscv64/quick_entrypoints_riscv64.S219
-rw-r--r--runtime/arch/x86/asm_support_x86.S2
-rw-r--r--runtime/arch/x86/asm_support_x86.h7
-rw-r--r--runtime/arch/x86/context_x86.cc25
-rw-r--r--runtime/arch/x86/context_x86.h2
-rw-r--r--runtime/arch/x86/jni_entrypoints_x86.S2
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S136
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S2
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h7
-rw-r--r--runtime/arch/x86_64/context_x86_64.cc9
-rw-r--r--runtime/arch/x86_64/context_x86_64.h2
-rw-r--r--runtime/arch/x86_64/jni_entrypoints_x86_64.S10
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S136
-rw-r--r--runtime/art_method.cc3
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc20
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc12
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc59
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc35
-rw-r--r--runtime/entrypoints_order_test.cc10
-rw-r--r--runtime/instrumentation.cc18
-rw-r--r--runtime/instrumentation.h14
-rw-r--r--runtime/quick_exception_handler.cc16
-rw-r--r--runtime/quick_exception_handler.h11
-rw-r--r--runtime/thread.cc41
-rw-r--r--runtime/thread.h32
45 files changed, 809 insertions, 392 deletions
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 5184b2c897..f96a9bc154 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -154,7 +154,7 @@ const char* const VixlJniHelpersResults = {
" 210: f8d9 8020 ldr.w r8, [r9, #32]\n"
" 214: 4770 bx lr\n"
" 216: f8d9 0094 ldr.w r0, [r9, #148]\n"
- " 21a: f8d9 e2c0 ldr.w lr, [r9, #704]\n"
+ " 21a: f8d9 e2bc ldr.w lr, [r9, #700]\n"
" 21e: 47f0 blx lr\n"
};
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 2ab9918f58..557f53acd0 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -287,6 +287,8 @@
.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
mov r0, rSELF @ pass Thread::Current
bl artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
.endm
/*
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index 063270fab5..09c1609152 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -64,4 +64,11 @@
// The offset of the MOV from the return address in LR for intrinsic CAS.
#define BAKER_MARK_INTROSPECTION_INTRINSIC_CAS_MOV_OFFSET (-8)
+// Size of Context::gprs_.
+#define ARM_LONG_JUMP_GPRS_SIZE 64
+// Size of Context::fprs_.
+#define ARM_LONG_JUMP_FPRS_SIZE 128
+// Size of Context::gprs_ + Context::fprs_.
+#define ARM_LONG_JUMP_CONTEXT_SIZE (ARM_LONG_JUMP_GPRS_SIZE + ARM_LONG_JUMP_FPRS_SIZE)
+
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
index 4e880e4bf6..50aec3331b 100644
--- a/runtime/arch/arm/context_arm.cc
+++ b/runtime/arch/arm/context_arm.cc
@@ -97,11 +97,7 @@ void ArmContext::SmashCallerSaves() {
fprs_[S15] = nullptr;
}
-extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
-
-void ArmContext::DoLongJump() {
- uintptr_t gprs[kNumberOfCoreRegisters];
- uint32_t fprs[kNumberOfSRegisters];
+void ArmContext::CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) {
for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : kBadGprBase + i;
}
@@ -110,8 +106,7 @@ void ArmContext::DoLongJump() {
}
// Ensure the Thread Register contains the address of the current thread.
DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]);
- // The Marking Register will be updated by art_quick_do_long_jump.
- art_quick_do_long_jump(gprs, fprs);
+ // The Marking Register will be updated after return by art_quick_do_long_jump.
}
} // namespace arm
diff --git a/runtime/arch/arm/context_arm.h b/runtime/arch/arm/context_arm.h
index 592d01b380..62ebc33f5d 100644
--- a/runtime/arch/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -86,7 +86,7 @@ class ArmContext final : public Context {
void SetFPR(uint32_t reg, uintptr_t value) override;
void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
+ void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) override;
private:
// Pointers to register locations, initialized to null or the specific registers below.
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 95b5eb5c62..8c80c338dd 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -389,10 +389,12 @@ ENTRY art_jni_lock_object_no_inline
ldr lr, [sp, #(MANAGED_ARGS_R4_LR_SAVE_SIZE - 4)]
.cfi_restore lr
DECREASE_FRAME MANAGED_ARGS_R4_LR_SAVE_SIZE
- // Make a tail call to `artDeliverPendingExceptionFromCode()`.
+ // Make a call to `artDeliverPendingExceptionFromCode()`.
// Rely on the JNI transition frame constructed in the JNI stub.
- mov r0, rSELF @ Pass Thread::Current().
- b artDeliverPendingExceptionFromCode @ (Thread*)
+ mov r0, rSELF @ Pass Thread::Current().
+ bl artDeliverPendingExceptionFromCode @ (Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_jni_lock_object_no_inline
/*
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 6f4bf8ce2b..6d5b7b8a68 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -138,9 +138,11 @@
.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION is_ref = 0
// Use R2 to allow returning 64-bit values in R0-R1.
ldr r2, [rSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ CFI_REMEMBER_STATE
cbnz r2, 1f
DEOPT_OR_RETURN r2, \is_ref // Check if deopt is required
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
@@ -158,9 +160,19 @@
mov r1, r0 // pass the result
mov r0, rSELF // Thread::Current
bl artDeoptimizeIfNeeded
+
+ CFI_REMEMBER_STATE
+ cbnz r0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
bx lr
+
+3:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ bkpt // Unreached
.endm
.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 temp, is_ref
@@ -177,11 +189,19 @@
mov r1, r0 // pass the result
mov r0, rSELF // Thread::Current
bl artDeoptimizeIfNeeded
+
CFI_REMEMBER_STATE
+ cbnz r0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
bx lr
+
+3:
+ // Deoptimize
CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ bkpt // Unreached
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
@@ -190,6 +210,8 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context
mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END \c_name
.endm
@@ -199,6 +221,8 @@ ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context
mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END \c_name
.endm
@@ -208,6 +232,8 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context
mov r1, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END \c_name
.endm
@@ -217,20 +243,26 @@ ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context
mov r2, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END \c_name
.endm
.macro RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER
+ CFI_REMEMBER_STATE
cbnz r0, 1f @ result non-zero branch over
DEOPT_OR_RETURN r1
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER
+ CFI_REMEMBER_STATE
cbz r0, 1f @ result zero branch over
DEOPT_OR_RETURN r1, /* is_ref= */ 1
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
@@ -316,6 +348,8 @@ ENTRY art_quick_throw_null_pointer_exception_from_signal
mov r0, lr @ pass the fault address stored in LR by the fault handler.
mov r1, rSELF @ pass Thread::Current
bl artThrowNullPointerExceptionFromSignal @ (Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_throw_null_pointer_exception_from_signal
/*
@@ -515,13 +549,22 @@ ENTRY art_quick_osr_stub
END art_quick_osr_stub
/*
- * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_.
- * Both must reside on the stack, between current SP and target SP.
+ * On entry r0 is the long jump context. This is expected to be returned from a previous
+ * entrypoint call which threw an exception or deoptimized.
* The r12 (IP) shall be clobbered rather than retrieved from gprs_.
*/
ARM_ENTRY art_quick_do_long_jump
- vldm r1, {s0-s31} @ Load all fprs from argument fprs_.
- mov sp, r0 @ Make SP point to gprs_.
+ // Reserve space for the gprs + fprs;
+ INCREASE_FRAME ARM_LONG_JUMP_CONTEXT_SIZE
+
+ mov r1, sp
+ add r2, sp, #ARM_LONG_JUMP_GPRS_SIZE
+
+ bl artContextCopyForLongJump // Context* context, uintptr_t* gprs, uintptr_t* fprs
+
+ add r0, sp, #ARM_LONG_JUMP_GPRS_SIZE
+
+ vldm r0, {s0-s31} @ Load all fprs from argument fprs_.
@ Do not access fprs_ from now, they may be below SP.
ldm sp, {r0-r11} @ load r0-r11 from gprs_.
ldr r12, [sp, #60] @ Load the value of PC (r15) from gprs_ (60 = 4 * 15) into IP (r12).
@@ -630,7 +673,8 @@ ENTRY art_quick_check_instance_of
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, rSELF @ pass Thread::Current
bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
- bkpt
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_check_instance_of
// Restore rReg's value from [sp, #offset] if rReg is not the same as rExclude.
@@ -781,7 +825,8 @@ ENTRY art_quick_aput_obj
mov r1, r2
mov r2, rSELF @ Pass Thread::Current.
bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
- bkpt @ Unreachable.
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
@@ -1252,9 +1297,19 @@ ENTRY art_quick_test_suspend
SETUP_SAVE_EVERYTHING_FRAME r0, RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET @ save everything for GC stack crawl
mov r0, rSELF
bl artTestSuspendFromCode @ (Thread*)
+
+ CFI_REMEMBER_STATE
+ cbnz r0, .Ltest_suspend_deoptimize
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
bx lr
+
+.Ltest_suspend_deoptimize:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_test_suspend
.extern artImplicitSuspendFromCode
@@ -1262,9 +1317,19 @@ ENTRY art_quick_implicit_suspend
mov r0, rSELF
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves for stack crawl
bl artImplicitSuspendFromCode @ (Thread*)
+
+ CFI_REMEMBER_STATE
+ cbnz r0, .Limplicit_suspend_deopt
+
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
bx lr
+
+.Limplicit_suspend_deopt:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_REFS_ONLY
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_implicit_suspend
/*
@@ -1488,6 +1553,8 @@ ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME r1
mov r1, rSELF @ pass Thread::Current
blx artDeoptimizeFromCompiledCode @ (DeoptimizationKind, Thread*)
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_deoptimize_from_compiled_code
/*
@@ -2389,9 +2456,19 @@ ENTRY art_quick_method_entry_hook
mov r1, rSELF @ pass Thread::Current
mov r2, sp @ pass SP
bl artMethodEntryHook @ (ArtMethod*, Thread*, SP)
+
+ CFI_REMEMBER_STATE
+ cbnz r0, .Lentryhook_deopt
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
blx lr
+
+.Lentryhook_deopt:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_method_entry_hook
ENTRY art_quick_method_exit_hook
@@ -2406,9 +2483,18 @@ ENTRY art_quick_method_exit_hook
mov r0, rSELF @ pass Thread::Current
blx artMethodExitHook @ (Thread*, ArtMethod**, gpr_res*, fpr_res*,
@ frame_size)
-
DECREASE_FRAME 8 @ pop arguments on stack
+
+ CFI_REMEMBER_STATE
+ cbnz r0, .Lexithook_deopt_or_exception
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
blx lr
+
+.Lexithook_deopt_or_exception:
+ // Deoptimize or exception thrown.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump @ (Context*)
+ bkpt // Unreached
END art_quick_method_exit_hook
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 25f84e7c61..56ce037b1a 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -369,6 +369,7 @@
// Point of no return.
bl artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*)
+ bl art_quick_do_long_jump // (Context*)
brk 0 // Unreached
.endm
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 39a82965cd..00c7b5776c 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -54,4 +54,11 @@
// The offset of the reference load LDR from the return address in LR for GC root loads.
#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET (-8)
+// Size of Context::gprs_.
+#define ARM64_LONG_JUMP_GPRS_SIZE 272
+// Size of Context::fprs_.
+#define ARM64_LONG_JUMP_FPRS_SIZE 256
+// Size of Context::gprs_ + Context::fprs_.
+#define ARM64_LONG_JUMP_CONTEXT_SIZE (ARM64_LONG_JUMP_GPRS_SIZE + ARM64_LONG_JUMP_FPRS_SIZE)
+
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/context_arm64.cc b/runtime/arch/arm64/context_arm64.cc
index 96cd101fe8..77009e4932 100644
--- a/runtime/arch/arm64/context_arm64.cc
+++ b/runtime/arch/arm64/context_arm64.cc
@@ -129,8 +129,6 @@ void Arm64Context::SmashCallerSaves() {
fprs_[D31] = nullptr;
}
-extern "C" NO_RETURN void art_quick_do_long_jump(uint64_t*, uint64_t*);
-
#if defined(__aarch64__) && defined(__BIONIC__) && defined(M_MEMTAG_STACK_IS_ON)
static inline __attribute__((no_sanitize("memtag"))) void untag_memory(void* from, void* to) {
__asm__ __volatile__(
@@ -145,10 +143,7 @@ static inline __attribute__((no_sanitize("memtag"))) void untag_memory(void* fro
}
#endif
-__attribute__((no_sanitize("memtag"))) void Arm64Context::DoLongJump() {
- uint64_t gprs[arraysize(gprs_)];
- uint64_t fprs[kNumberOfDRegisters];
-
+void Arm64Context::CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) {
// The long jump routine called below expects to find the value for SP at index 31.
DCHECK_EQ(SP, 31);
@@ -170,8 +165,7 @@ __attribute__((no_sanitize("memtag"))) void Arm64Context::DoLongJump() {
// Tell HWASan about the new stack top.
if (__hwasan_handle_longjmp != nullptr)
__hwasan_handle_longjmp(reinterpret_cast<void*>(gprs[SP]));
- // The Marking Register will be updated by art_quick_do_long_jump.
- art_quick_do_long_jump(gprs, fprs);
+ // The Marking Register will be updated after return by art_quick_do_long_jump.
}
} // namespace arm64
diff --git a/runtime/arch/arm64/context_arm64.h b/runtime/arch/arm64/context_arm64.h
index 0a284c93d8..c537365cd6 100644
--- a/runtime/arch/arm64/context_arm64.h
+++ b/runtime/arch/arm64/context_arm64.h
@@ -87,7 +87,7 @@ class Arm64Context final : public Context {
void SetFPR(uint32_t reg, uintptr_t value) override;
void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
+ void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) override;
static constexpr size_t kPC = kNumberOfXRegisters;
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index c39dd0a8a7..2bfb02d062 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -428,10 +428,12 @@ ENTRY art_jni_lock_object_no_inline
1:
// All args are irrelevant when throwing an exception. Remove the spill area.
DECREASE_FRAME (ALL_ARGS_SIZE + /*padding*/ 8 + /*LR*/ 8)
- // Make a tail call to `artDeliverPendingExceptionFromCode()`.
+ // Make a call to `artDeliverPendingExceptionFromCode()`.
// Rely on the JNI transition frame constructed in the JNI stub.
- mov x0, xSELF // Pass Thread::Current().
- b artDeliverPendingExceptionFromCode // (Thread*)
+ mov x0, xSELF // Pass Thread::Current().
+ bl artDeliverPendingExceptionFromCode // (Thread*)
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_jni_lock_object_no_inline
/*
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 8b1aef8494..2600211649 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -198,9 +198,11 @@
.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION is_ref = 0
ldr x1, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ CFI_REMEMBER_STATE
cbnz x1, 1f
DEOPT_OR_RETURN x1, \is_ref // Check if deopt is required
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
@@ -218,9 +220,19 @@
mov x1, x0 // pass the result
mov x0, xSELF // Thread::Current
bl artDeoptimizeIfNeeded
+
+ CFI_REMEMBER_STATE
+ cbnz x0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
ret
+
+3:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
.endm
.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_X0 temp, is_ref
@@ -237,18 +249,28 @@
mov x1, x0 // pass the result
mov x0, xSELF // Thread::Current
bl artDeoptimizeIfNeeded
+
CFI_REMEMBER_STATE
+ cbnz x0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
ret
+
+3:
+ // Deoptimize.
CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
.endm
.macro RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER
+ CFI_REMEMBER_STATE
cbnz w0, 1f // result non-zero branch over
DEOPT_OR_RETURN x1
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
@@ -258,7 +280,8 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
bl \cxx_name // \cxx_name(Thread*)
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END \c_name
.endm
@@ -268,7 +291,8 @@ ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
mov x0, xSELF // pass Thread::Current
bl \cxx_name // \cxx_name(Thread*)
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END \c_name
.endm
@@ -278,7 +302,8 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context.
mov x1, xSELF // pass Thread::Current.
bl \cxx_name // \cxx_name(arg, Thread*).
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END \c_name
.endm
@@ -288,7 +313,8 @@ ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
bl \cxx_name // \cxx_name(arg1, arg2, Thread*)
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END \c_name
.endm
@@ -320,7 +346,8 @@ ENTRY art_quick_throw_null_pointer_exception_from_signal
mov x0, lr // pass the fault address stored in LR by the fault handler.
mov x1, xSELF // pass Thread::Current.
bl artThrowNullPointerExceptionFromSignal // (arg, Thread*).
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_throw_null_pointer_exception_from_signal
/*
@@ -776,53 +803,60 @@ ENTRY art_quick_osr_stub
END art_quick_osr_stub
/*
- * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_.
- * Both must reside on the stack, between current SP and target SP.
+ * On entry x0 is the long jump context. This is expected to be returned from a previous
+ * entrypoint call which threw an exception or deoptimized.
* IP0 and IP1 shall be clobbered rather than retrieved from gprs_.
*/
ENTRY art_quick_do_long_jump
+ // Reserve space for the gprs + fprs;
+ INCREASE_FRAME ARM64_LONG_JUMP_CONTEXT_SIZE
+
+ mov x1, sp
+ add x2, sp, #ARM64_LONG_JUMP_GPRS_SIZE
+
+ bl artContextCopyForLongJump // Context* context, uintptr_t* gprs, uintptr_t* fprs
+
+ add x0, sp, #ARM64_LONG_JUMP_GPRS_SIZE
+
// Load FPRs
- ldp d0, d1, [x1, #0]
- ldp d2, d3, [x1, #16]
- ldp d4, d5, [x1, #32]
- ldp d6, d7, [x1, #48]
- ldp d8, d9, [x1, #64]
- ldp d10, d11, [x1, #80]
- ldp d12, d13, [x1, #96]
- ldp d14, d15, [x1, #112]
- ldp d16, d17, [x1, #128]
- ldp d18, d19, [x1, #144]
- ldp d20, d21, [x1, #160]
- ldp d22, d23, [x1, #176]
- ldp d24, d25, [x1, #192]
- ldp d26, d27, [x1, #208]
- ldp d28, d29, [x1, #224]
- ldp d30, d31, [x1, #240]
-
- // Load GPRs. Delay loading x0, x1 because x0 is used as gprs_.
- ldp x2, x3, [x0, #16]
- ldp x4, x5, [x0, #32]
- ldp x6, x7, [x0, #48]
- ldp x8, x9, [x0, #64]
- ldp x10, x11, [x0, #80]
- ldp x12, x13, [x0, #96]
- ldp x14, x15, [x0, #112]
+ ldp d0, d1, [x0, #0]
+ ldp d2, d3, [x0, #16]
+ ldp d4, d5, [x0, #32]
+ ldp d6, d7, [x0, #48]
+ ldp d8, d9, [x0, #64]
+ ldp d10, d11, [x0, #80]
+ ldp d12, d13, [x0, #96]
+ ldp d14, d15, [x0, #112]
+ ldp d16, d17, [x0, #128]
+ ldp d18, d19, [x0, #144]
+ ldp d20, d21, [x0, #160]
+ ldp d22, d23, [x0, #176]
+ ldp d24, d25, [x0, #192]
+ ldp d26, d27, [x0, #208]
+ ldp d28, d29, [x0, #224]
+ ldp d30, d31, [x0, #240]
+
+ ldp x0, x1, [sp, #0]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ ldp x8, x9, [sp, #64]
+ ldp x10, x11, [sp, #80]
+ ldp x12, x13, [sp, #96]
+ ldp x14, x15, [sp, #112]
// Do not load IP0 (x16) and IP1 (x17), these shall be clobbered below.
// Don't load the platform register (x18) either.
- ldr x19, [x0, #152] // xSELF.
- ldp x20, x21, [x0, #160] // For Baker RB, wMR (w20) is reloaded below.
- ldp x22, x23, [x0, #176]
- ldp x24, x25, [x0, #192]
- ldp x26, x27, [x0, #208]
- ldp x28, x29, [x0, #224]
- ldp x30, xIP0, [x0, #240] // LR and SP, load SP to IP0.
+ ldr x19, [sp, #152] // xSELF.
+ ldp x20, x21, [sp, #160] // For Baker RB, wMR (w20) is reloaded below.
+ ldp x22, x23, [sp, #176]
+ ldp x24, x25, [sp, #192]
+ ldp x26, x27, [sp, #208]
+ ldp x28, x29, [sp, #224]
+ ldp x30, xIP0, [sp, #240] // LR and SP, load SP to IP0.
// Load PC to IP1, it's at the end (after the space for the unused XZR).
- ldr xIP1, [x0, #33*8]
-
- // Load x0, x1.
- ldp x0, x1, [x0, #0]
+ ldr xIP1, [sp, #33*8]
// Set SP. Do not access fprs_ and gprs_ from now, they are below SP.
mov sp, xIP0
@@ -922,7 +956,8 @@ ENTRY art_quick_check_instance_of
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov x2, xSELF // pass Thread::Current
bl artThrowClassCastExceptionForObject // (Object*, Class*, Thread*)
- brk 0 // We should not return here...
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_check_instance_of
// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude.
@@ -1072,7 +1107,8 @@ ENTRY art_quick_aput_obj
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
bl artThrowArrayStoreException // (Object*, Object*, Thread*).
- brk 0 // Unreachable.
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
@@ -1181,9 +1217,11 @@ END \name
.endm
.macro RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER
+ CFI_REMEMBER_STATE
cbz w0, 1f // result zero branch over
DEOPT_OR_RETURN x1, /*is_ref=*/1 // check for deopt or return
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
@@ -1565,10 +1603,21 @@ ENTRY art_quick_test_suspend
SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
mov x0, xSELF
bl artTestSuspendFromCode // (Thread*)
+
+ CFI_REMEMBER_STATE
+ cbnz x0, .Ltest_suspend_deoptimize
+
+ // Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
REFRESH_SUSPEND_CHECK_REGISTER
ret
+
+.Ltest_suspend_deoptimize:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_test_suspend
/*
@@ -1580,10 +1629,20 @@ ENTRY art_quick_implicit_suspend
SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
mov x0, xSELF
bl artImplicitSuspendFromCode // (Thread*)
+
+ CFI_REMEMBER_STATE
+ cbnz x0, .Limplicit_suspend_deopt
+
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
REFRESH_SUSPEND_CHECK_REGISTER
br lr // Do not use RET as we do not enter the entrypoint with "BL".
+
+.Limplicit_suspend_deopt:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_implicit_suspend
/*
@@ -1873,7 +1932,8 @@ ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME
mov x1, xSELF // Pass thread.
bl artDeoptimizeFromCompiledCode // (DeoptimizationKind, Thread*)
- brk 0
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_deoptimize_from_compiled_code
@@ -2531,9 +2591,19 @@ ENTRY art_quick_method_entry_hook
mov x2, sp // pass SP
bl artMethodEntryHook // (ArtMethod*, Thread*, SP)
+ CFI_REMEMBER_STATE
+ cbnz x0, .Lentryhook_deopt
+
+ // Normal return.
RESTORE_SAVE_EVERYTHING_FRAME // Note: will restore xSELF
REFRESH_MARKING_REGISTER
ret
+
+.Lentryhook_deopt:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_method_entry_hook
.extern artMethodExitHook
@@ -2548,8 +2618,17 @@ ENTRY art_quick_method_exit_hook
bl artMethodExitHook // (Thread*, ArtMethod**, gpr_res*, fpr_res*,
// frame_size)
+ CFI_REMEMBER_STATE
+ cbnz x0, .Lexithook_deopt_or_exception
+
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
ret
+
+.Lexithook_deopt_or_exception:
+ // Deoptimize or exception thrown.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ bl art_quick_do_long_jump // (Context*)
+ brk 0 // Unreached
END art_quick_method_exit_hook
diff --git a/runtime/arch/context.cc b/runtime/arch/context.cc
index b9931c44f8..f5ec06d269 100644
--- a/runtime/arch/context.cc
+++ b/runtime/arch/context.cc
@@ -22,4 +22,10 @@ Context* Context::Create() {
return new RuntimeContextType;
}
+extern "C" void artContextCopyForLongJump(Context* context, uintptr_t* gprs, uintptr_t* fprs) {
+ context->CopyContextTo(gprs, fprs);
+ // Once the context has been copied, it is no longer needed.
+ delete context;
+}
+
} // namespace art
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index bfe322ec33..1a82aa8553 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -92,8 +92,9 @@ class Context {
// an nterp frame.
virtual void SetNterpDexPC([[maybe_unused]] uintptr_t new_value) { abort(); }
- // Switches execution of the executing context to this context
- NO_RETURN virtual void DoLongJump() = 0;
+ // Copies the values of GPRs and FPRs registers from this context to external buffers;
+ // the use case is to do a long jump afterwards.
+ virtual void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) = 0;
enum {
kBadGprBase = 0xebad6070,
@@ -101,6 +102,9 @@ class Context {
};
};
+// Copy the GPRs and FPRs from the context to the given buffers.
+extern "C" void artContextCopyForLongJump(Context* context, uintptr_t* gprs, uintptr_t* fprs);
+
} // namespace art
#endif // ART_RUNTIME_ARCH_CONTEXT_H_
diff --git a/runtime/arch/riscv64/asm_support_riscv64.S b/runtime/arch/riscv64/asm_support_riscv64.S
index 22acbfb778..f8e3b488b5 100644
--- a/runtime/arch/riscv64/asm_support_riscv64.S
+++ b/runtime/arch/riscv64/asm_support_riscv64.S
@@ -714,8 +714,9 @@
// Thread::Current()->exception_ when the runtime method frame is ready.
.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
mv a0, xSELF
- call artDeliverPendingExceptionFromCode // Point of no return.
- unimp // Unreachable.
+ call artDeliverPendingExceptionFromCode
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
.endm
diff --git a/runtime/arch/riscv64/asm_support_riscv64.h b/runtime/arch/riscv64/asm_support_riscv64.h
index 6d6ae62afb..f233eee36a 100644
--- a/runtime/arch/riscv64/asm_support_riscv64.h
+++ b/runtime/arch/riscv64/asm_support_riscv64.h
@@ -49,4 +49,11 @@
#define NTERP_SIZE_SAVE_CALLEE_SAVES 192
// clang-format on
+// Size of Context::gprs_.
+#define RISCV64_LONG_JUMP_GPRS_SIZE 264
+// Size of Context::fprs_.
+#define RISCV64_LONG_JUMP_FPRS_SIZE 256
+// Size of Context::gprs_ + Context::fprs_.
+#define RISCV64_LONG_JUMP_CONTEXT_SIZE (RISCV64_LONG_JUMP_GPRS_SIZE + RISCV64_LONG_JUMP_FPRS_SIZE)
+
#endif // ART_RUNTIME_ARCH_RISCV64_ASM_SUPPORT_RISCV64_H_
diff --git a/runtime/arch/riscv64/context_riscv64.cc b/runtime/arch/riscv64/context_riscv64.cc
index c9bfa16156..dceebe1e1f 100644
--- a/runtime/arch/riscv64/context_riscv64.cc
+++ b/runtime/arch/riscv64/context_riscv64.cc
@@ -120,12 +120,7 @@ void Riscv64Context::SmashCallerSaves() {
fprs_[FA7] = nullptr;
}
-extern "C" NO_RETURN void art_quick_do_long_jump(uint64_t*, uint64_t*);
-
-void Riscv64Context::DoLongJump() {
- uint64_t gprs[arraysize(gprs_)];
- uint64_t fprs[kNumberOfFRegisters];
-
+void Riscv64Context::CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) {
// The long jump routine called below expects to find the value for SP at index 2.
DCHECK_EQ(SP, 2);
@@ -143,7 +138,6 @@ void Riscv64Context::DoLongJump() {
if (__hwasan_handle_longjmp != nullptr) {
__hwasan_handle_longjmp(reinterpret_cast<void*>(gprs[SP]));
}
- art_quick_do_long_jump(gprs, fprs);
}
} // namespace riscv64
diff --git a/runtime/arch/riscv64/context_riscv64.h b/runtime/arch/riscv64/context_riscv64.h
index 839317d2e0..278297d68c 100644
--- a/runtime/arch/riscv64/context_riscv64.h
+++ b/runtime/arch/riscv64/context_riscv64.h
@@ -79,7 +79,7 @@ class Riscv64Context final : public Context {
void SetFPR(uint32_t reg, uintptr_t value) override;
void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
+ void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) override;
static constexpr size_t kPC = kNumberOfXRegisters;
diff --git a/runtime/arch/riscv64/jni_entrypoints_riscv64.S b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
index c9d034b6cc..c1f855b4bf 100644
--- a/runtime/arch/riscv64/jni_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/jni_entrypoints_riscv64.S
@@ -522,10 +522,12 @@ ENTRY art_jni_lock_object_no_inline
1:
// All args are irrelevant when throwing an exception. Remove the spill area.
DECREASE_FRAME (ALL_ARGS_SIZE + /*padding*/ 8 + /*RA*/ 8)
- // Make a tail call to `artDeliverPendingExceptionFromCode()`.
+ // Make a call to `artDeliverPendingExceptionFromCode()`.
// Rely on the JNI transition frame constructed in the JNI stub.
mv a0, xSELF // Pass Thread::Current().
- tail artDeliverPendingExceptionFromCode // (Thread*)
+ call artDeliverPendingExceptionFromCode // (Thread*)
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_jni_lock_object_no_inline
/*
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index 141974e14a..9c7108e8e9 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -429,8 +429,17 @@ ENTRY art_quick_method_entry_hook
mv a2, sp // pass SP
call artMethodEntryHook // (ArtMethod*, Thread*, SP)
+ CFI_REMEMBER_STATE
+ bnez a0, .Lentryhook_deopt
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Lentryhook_deopt:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_quick_method_entry_hook
@@ -446,91 +455,106 @@ ENTRY art_quick_method_exit_hook
call artMethodExitHook // (Thread*, ArtMethod**, gpr_res*, fpr_res*,
// frame_size)
+ CFI_REMEMBER_STATE
+ bnez a0, .Lexithook_deopt_or_exception
+
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
-END art_quick_method_exit_hook
+.Lexithook_deopt_or_exception:
+ // Deoptimize or exception thrown.
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
+END art_quick_method_exit_hook
-// On entry a0 is uintptr_t* gprs_ and a1 is uint64_t* fprs_.
-// Both must reside on the stack, between current sp and target sp.
+// On entry a0 is the long jump context. This is expected to be returned from a previous entrypoint
+// call which threw an exception or deoptimized.
ENTRY art_quick_do_long_jump
+ // Reserve space for the gprs + fprs;
+ INCREASE_FRAME RISCV64_LONG_JUMP_CONTEXT_SIZE
+
+ mv a1, sp
+ add a2, sp, RISCV64_LONG_JUMP_GPRS_SIZE
+
+ call artContextCopyForLongJump // Context* context, uintptr_t* gprs, uintptr_t* fprs
+
+ add a0, sp, RISCV64_LONG_JUMP_GPRS_SIZE
+
// Load FPRs
- fld ft0, 8*0(a1) // f0
- fld ft1, 8*1(a1) // f1
- fld ft2, 8*2(a1) // f2
- fld ft3, 8*3(a1) // f3
- fld ft4, 8*4(a1) // f4
- fld ft5, 8*5(a1) // f5
- fld ft6, 8*6(a1) // f6
- fld ft7, 8*7(a1) // f7
- fld fs0, 8*8(a1) // f8
- fld fs1, 8*9(a1) // f9
- fld fa0, 8*10(a1) // f10
- fld fa1, 8*11(a1) // f11
- fld fa2, 8*12(a1) // f12
- fld fa3, 8*13(a1) // f13
- fld fa4, 8*14(a1) // f14
- fld fa5, 8*15(a1) // f15
- fld fa6, 8*16(a1) // f16
- fld fa7, 8*17(a1) // f17
- fld fs2, 8*18(a1) // f18
- fld fs3, 8*19(a1) // f19
- fld fs4, 8*20(a1) // f20
- fld fs5, 8*21(a1) // f21
- fld fs6, 8*22(a1) // f22
- fld fs7, 8*23(a1) // f23
- fld fs8, 8*24(a1) // f24
- fld fs9, 8*25(a1) // f25
- fld fs10, 8*26(a1) // f26
- fld fs11, 8*27(a1) // f27
- fld ft8, 8*28(a1) // f28
- fld ft9, 8*29(a1) // f29
- fld ft10, 8*30(a1) // f30
- fld ft11, 8*31(a1) // f31
+ fld ft0, 8*0(a0) // f0
+ fld ft1, 8*1(a0) // f1
+ fld ft2, 8*2(a0) // f2
+ fld ft3, 8*3(a0) // f3
+ fld ft4, 8*4(a0) // f4
+ fld ft5, 8*5(a0) // f5
+ fld ft6, 8*6(a0) // f6
+ fld ft7, 8*7(a0) // f7
+ fld fs0, 8*8(a0) // f8
+ fld fs1, 8*9(a0) // f9
+ fld fa0, 8*10(a0) // f10
+ fld fa1, 8*11(a0) // f11
+ fld fa2, 8*12(a0) // f12
+ fld fa3, 8*13(a0) // f13
+ fld fa4, 8*14(a0) // f14
+ fld fa5, 8*15(a0) // f15
+ fld fa6, 8*16(a0) // f16
+ fld fa7, 8*17(a0) // f17
+ fld fs2, 8*18(a0) // f18
+ fld fs3, 8*19(a0) // f19
+ fld fs4, 8*20(a0) // f20
+ fld fs5, 8*21(a0) // f21
+ fld fs6, 8*22(a0) // f22
+ fld fs7, 8*23(a0) // f23
+ fld fs8, 8*24(a0) // f24
+ fld fs9, 8*25(a0) // f25
+ fld fs10, 8*26(a0) // f26
+ fld fs11, 8*27(a0) // f27
+ fld ft8, 8*28(a0) // f28
+ fld ft9, 8*29(a0) // f29
+ fld ft10, 8*30(a0) // f30
+ fld ft11, 8*31(a0) // f31
// Load GPRs.
- // Skip slot 8*0(a0) for zero/x0 as it is hard-wired zero.
- ld ra, 8*1(a0) // x1
- // Skip slot 8*2(a0) for sp/x2 as it is set below.
- // Skip slot 8*3(a0) for platform-specific thread pointer gp/x3.
- // Skip slot 8*4(a0) for platform-specific global pointer tp/x4.
- // Skip slot 8*5(a0) for t0/x5 as it is clobbered below.
- // Skip slot 8*6(a0) for t1/x6 as it is clobbered below.
- ld t2, 8*7(a0) // x7
- ld s0, 8*8(a0) // x8
- ld s1, 8*9(a0) // x9
- // Delay loading a0 as the base is in a0.
- ld a1, 8*11(a0) // x11
- ld a2, 8*12(a0) // x12
- ld a3, 8*13(a0) // x13
- ld a4, 8*14(a0) // x14
- ld a5, 8*15(a0) // x15
- ld a6, 8*16(a0) // x16
- ld a7, 8*17(a0) // x17
- ld s2, 8*18(a0) // x18
- ld s3, 8*19(a0) // x19
- ld s4, 8*20(a0) // x20
- ld s5, 8*21(a0) // x21
- ld s6, 8*22(a0) // x22
- ld s7, 8*23(a0) // x23
- ld s8, 8*24(a0) // x24
- ld s9, 8*25(a0) // x25
- ld s10, 8*26(a0) // x26
- ld s11, 8*27(a0) // x27
- ld t3, 8*28(a0) // x28
- ld t4, 8*29(a0) // x29
- ld t5, 8*30(a0) // x30
- ld t6, 8*31(a0) // x31
+ // Skip slot 8*0(sp) for zero/x0 as it is hard-wired zero.
+ ld ra, 8*1(sp) // x1
+ // Skip slot 8*2(sp) for sp/x2 as it is set below.
+ // Skip slot 8*3(sp) for platform-specific thread pointer gp/x3.
+ // Skip slot 8*4(sp) for platform-specific global pointer tp/x4.
+ // Skip slot 8*5(sp) for t0/x5 as it is clobbered below.
+ // Skip slot 8*6(sp) for t1/x6 as it is clobbered below.
+ ld t2, 8*7(sp) // x7
+ ld s0, 8*8(sp) // x8
+ ld s1, 8*9(sp) // x9
+ ld a0, 8*10(sp) // x10
+ ld a1, 8*11(sp) // x11
+ ld a2, 8*12(sp) // x12
+ ld a3, 8*13(sp) // x13
+ ld a4, 8*14(sp) // x14
+ ld a5, 8*15(sp) // x15
+ ld a6, 8*16(sp) // x16
+ ld a7, 8*17(sp) // x17
+ ld s2, 8*18(sp) // x18
+ ld s3, 8*19(sp) // x19
+ ld s4, 8*20(sp) // x20
+ ld s5, 8*21(sp) // x21
+ ld s6, 8*22(sp) // x22
+ ld s7, 8*23(sp) // x23
+ ld s8, 8*24(sp) // x24
+ ld s9, 8*25(sp) // x25
+ ld s10, 8*26(sp) // x26
+ ld s11, 8*27(sp) // x27
+ ld t3, 8*28(sp) // x28
+ ld t4, 8*29(sp) // x29
+ ld t5, 8*30(sp) // x30
+ ld t6, 8*31(sp) // x31
// Load sp to t0.
- ld t0, 8*2(a0)
+ ld t0, 8*2(sp)
// Load PC to t1, it is in the last stack slot.
- ld t1, 8*32(a0)
-
- // Now load a0.
- ld a0, 8*10(a0) // x10
+ ld t1, 8*32(sp)
// Set sp. Do not access fprs_ and gprs_ from now, they are below sp.
mv sp, t0
@@ -549,32 +573,47 @@ END art_quick_do_long_jump
mv a1, a0 // pass the result
mv a0, xSELF // pass Thread::Current
call artDeoptimizeIfNeeded // (Thread*, uintptr_t, bool)
+
+ CFI_REMEMBER_STATE
+ bnez a0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+3:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
.endm
.macro RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER
+ CFI_REMEMBER_STATE
beqz a0, 1f
DEOPT_OR_RETURN a1, /*is_ref=*/ 1
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER
+ CFI_REMEMBER_STATE
bnez a0, 1f
DEOPT_OR_RETURN a1
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION
.endm
.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION is_ref = 0
lwu a1, THREAD_EXCEPTION_OFFSET(xSELF) // Get exception field.
+ CFI_REMEMBER_STATE
bnez a1, 1f
DEOPT_OR_RETURN a1, \is_ref // Check if deopt is required.
1:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
DELIVER_PENDING_EXCEPTION // Deliver exception on current thread.
.endm
@@ -597,10 +636,18 @@ END art_quick_do_long_jump
mv a1, a0 // pass the result
mv a0, xSELF // Thread::Current
call artDeoptimizeIfNeeded
+
CFI_REMEMBER_STATE
+ bnez a0, 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+3:
+ // Deoptimize
CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
.endm
@@ -683,7 +730,8 @@ ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME
mv a1, xSELF // Pass Thread::Current().
call artDeoptimizeFromCompiledCode // (DeoptimizationKind, Thread*)
- unimp
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_quick_deoptimize_from_compiled_code
@@ -738,7 +786,8 @@ ENTRY art_quick_check_instance_of
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // Save all registers as basis for long jump context.
mv a2, xSELF // Pass Thread::Current().
call artThrowClassCastExceptionForObject // (Object*, Class*, Thread*)
- unimp // We should not return here...
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_quick_check_instance_of
@@ -748,7 +797,8 @@ ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context.
mv a\n, xSELF // pass Thread::Current.
call \cxx_name // \cxx_name(args..., Thread*).
- unimp
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END \c_name
.endm
@@ -769,7 +819,8 @@ ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context.
mv a\n, xSELF // pass Thread::Current.
call \cxx_name // \cxx_name(args..., Thread*).
- unimp
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END \c_name
.endm
@@ -801,7 +852,8 @@ ENTRY art_quick_throw_null_pointer_exception_from_signal
mv a0, ra // pass the fault address stored in RA by the fault handler.
mv a1, xSELF // pass Thread::Current.
call artThrowNullPointerExceptionFromSignal // (arg, Thread*).
- unimp
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_quick_throw_null_pointer_exception_from_signal
@@ -859,8 +911,18 @@ ENTRY art_quick_test_suspend
RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
mv a0, xSELF
call artTestSuspendFromCode
+
+ CFI_REMEMBER_STATE
+ bnez a0, .Ltest_suspend_deoptimize
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Ltest_suspend_deoptimize:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
END art_quick_test_suspend
@@ -1331,7 +1393,8 @@ ENTRY art_quick_aput_obj
mv a1, a2 // Pass value.
mv a2, xSELF // Pass Thread::Current().
call artThrowArrayStoreException // (Object*, Object*, Thread*).
- unimp // Unreachable.
+ call art_quick_do_long_jump // (Context*)
+ unimp // Unreached
#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 7ec2e2ea9b..4298c65c5f 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -384,6 +384,8 @@ MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
CFI_ADJUST_CFA_OFFSET(-16) // Reset CFA in case there is more code afterwards.
END_MACRO
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index 8c19e07ec3..a752a53f3e 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -29,4 +29,11 @@
#define SAVE_EVERYTHING_FRAME_EAX_OFFSET \
(FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
+// Size of Context::gprs_ + 1 so the stack pointer can be stored without being popped by pop-all.
+#define X86_LONG_JUMP_GPRS_SIZE 36
+// Size of Context::fprs_.
+#define X86_LONG_JUMP_FPRS_SIZE 64
+// Size of Context::gprs_ + Context::fprs_.
+#define X86_LONG_JUMP_CONTEXT_SIZE (X86_LONG_JUMP_GPRS_SIZE + X86_LONG_JUMP_FPRS_SIZE)
+
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index f4b5e5bd52..40f1a4aa8d 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -88,15 +88,13 @@ void X86Context::SetFPR(uint32_t reg, uintptr_t value) {
*fprs_[reg] = value;
}
-void X86Context::DoLongJump() {
+void X86Context::CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) {
#if defined(__i386__)
// Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
// the top for the stack pointer that doesn't get popped in a pop-all.
- volatile uintptr_t gprs[kNumberOfCpuRegisters + 1];
for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
- gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : kBadGprBase + i;
+ gprs[kNumberOfCpuRegisters - i - 1] = (gprs_[i] != nullptr) ? *gprs_[i] : (kBadGprBase + i);
}
- uint32_t fprs[kNumberOfFloatRegisters];
for (size_t i = 0; i < kNumberOfFloatRegisters; ++i) {
fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : kBadFprBase + i;
}
@@ -104,28 +102,9 @@ void X86Context::DoLongJump() {
uintptr_t esp = gprs[kNumberOfCpuRegisters - ESP - 1] - sizeof(intptr_t);
gprs[kNumberOfCpuRegisters] = esp;
*(reinterpret_cast<uintptr_t*>(esp)) = eip_;
- MEMORY_TOOL_HANDLE_NO_RETURN;
- __asm__ __volatile__(
- "movl %1, %%ebx\n\t" // Address base of FPRs.
- "movsd 0(%%ebx), %%xmm0\n\t" // Load up XMM0-XMM7.
- "movsd 8(%%ebx), %%xmm1\n\t"
- "movsd 16(%%ebx), %%xmm2\n\t"
- "movsd 24(%%ebx), %%xmm3\n\t"
- "movsd 32(%%ebx), %%xmm4\n\t"
- "movsd 40(%%ebx), %%xmm5\n\t"
- "movsd 48(%%ebx), %%xmm6\n\t"
- "movsd 56(%%ebx), %%xmm7\n\t"
- "movl %0, %%esp\n\t" // ESP points to gprs.
- "popal\n\t" // Load all registers except ESP and EIP with values in gprs.
- "popl %%esp\n\t" // Load stack pointer.
- "ret\n\t" // From higher in the stack pop eip.
- : // output.
- : "g"(&gprs[0]), "g"(&fprs[0]) // input.
- :); // clobber.
#else
UNIMPLEMENTED(FATAL);
#endif
- UNREACHABLE();
}
} // namespace x86
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index ef2b27196b..455f731723 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -85,7 +85,7 @@ class X86Context final : public Context {
void SetFPR(uint32_t reg, uintptr_t value) override;
void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
+ void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) override;
private:
// Pretend XMM registers are made of uin32_t pieces, because they are manipulated
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index 448774a003..cc845dae02 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -358,6 +358,8 @@ DEFINE_FUNCTION art_jni_lock_object_no_inline
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeliverPendingExceptionFromCode) // (Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_jni_lock_object_no_inline
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 5a32464290..b4293cd94b 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -211,6 +211,8 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call CALLVAR(cxx_name) // cxx_name(Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -223,6 +225,8 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
call CALLVAR(cxx_name) // cxx_name(Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -236,6 +240,8 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -250,6 +256,8 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
PUSH ecx // pass arg2
PUSH eax // pass arg1
call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -276,6 +284,8 @@ DEFINE_FUNCTION_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, 2
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
call SYMBOL(artThrowNullPointerExceptionFromSignal) // (addr, self)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_throw_null_pointer_exception_from_signal
@@ -644,6 +654,50 @@ DEFINE_FUNCTION art_quick_invoke_static_stub
ret
END_FUNCTION art_quick_invoke_static_stub
+ /*
+ * Long jump stub.
+ * On entry edi is the long jump context. This is expected to be returned from a previous
+ * entrypoint call which threw an exception or deoptimized.
+ */
+DEFINE_FUNCTION art_quick_do_long_jump
+#if defined(__APPLE__)
+ int3
+ int3
+#else
+ // Reserve space for the gprs + fprs;
+ INCREASE_FRAME X86_LONG_JUMP_CONTEXT_SIZE
+
+ lea 0(%esp), %esi // GPRS
+ lea X86_LONG_JUMP_GPRS_SIZE(%esp), %edx // FPRS
+
+ PUSH_ARG edx
+ PUSH_ARG esi
+ PUSH_ARG edi
+
+ call SYMBOL(artContextCopyForLongJump) // Context* context,
+ // uintptr_t* gprs,
+ // uintptr_t* fprs
+
+ POP_ARG edi
+ POP_ARG esi // GPRS
+ POP_ARG edx // FPRS
+
+ // Address base of FPRs.
+ movsd 0(%edx), %xmm0 // Load up XMM0-XMM7.
+ movsd 8(%edx), %xmm1
+ movsd 16(%edx), %xmm2
+ movsd 24(%edx), %xmm3
+ movsd 32(%edx), %xmm4
+ movsd 40(%edx), %xmm5
+ movsd 48(%edx), %xmm6
+ movsd 56(%edx), %xmm7
+ movl %esi, %esp // ESP points to gprs.
+ popal // Load all registers except ESP and EIP with values in gprs.
+ POP esp // Load stack pointer.
+ ret // From higher in the stack pop eip.
+#endif
+END_FUNCTION art_quick_do_long_jump
+
MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_REFS_ONLY_FRAME ebx // save ref containing registers for GC
@@ -737,18 +791,22 @@ MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT, c_name, cxx_name)
END_MACRO
MACRO0(RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER)
+ CFI_REMEMBER_STATE
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
DEOPT_OR_RETURN ebx, /*is_ref=*/1 // check if deopt is required
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO1(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION, is_ref = 0)
+ CFI_REMEMBER_STATE
cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
jne 1f // if exception field != 0 goto 1
DEOPT_OR_RETURN ebx, \is_ref // check if deopt is required
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -770,8 +828,20 @@ MACRO2(DEOPT_OR_RETURN, temp, is_ref = 0)
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeoptimizeIfNeeded)
DECREASE_FRAME(16) // pop arguments
+
+ CFI_REMEMBER_STATE
+ cmp LITERAL(0), %eax
+ jne 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+3:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_MACRO
MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0)
@@ -791,18 +861,30 @@ MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0)
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artDeoptimizeIfNeeded)
DECREASE_FRAME(16) // pop arguments
+
CFI_REMEMBER_STATE
+ cmp LITERAL(0), %eax
+ jne 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+3:
+ // Deoptimize
CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_MACRO
MACRO0(RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER)
+ CFI_REMEMBER_STATE
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
DEOPT_OR_RETURN ebx // check if deopt is needed
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -1226,6 +1308,8 @@ DEFINE_FUNCTION art_quick_check_instance_of
PUSH ecx // pass arg2
PUSH eax // pass arg1
call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_check_instance_of
@@ -1302,6 +1386,8 @@ DEFINE_FUNCTION art_quick_aput_obj
PUSH_ARG edx // Pass arg2 - value.
PUSH_ARG eax // Pass arg1 - array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
#ifdef USE_READ_BARRIER
@@ -1358,8 +1444,20 @@ DEFINE_FUNCTION art_quick_test_suspend
CFI_ADJUST_CFA_OFFSET(4)
call SYMBOL(artTestSuspendFromCode) // (Thread*)
DECREASE_FRAME 16 // pop arguments
+
+ CFI_REMEMBER_STATE
+ cmp LITERAL(0), %eax
+ jne .Ltest_suspend_deoptimize
+
RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address
ret // return
+
+.Ltest_suspend_deoptimize:
+ // Deoptimize
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_test_suspend
DEFINE_FUNCTION art_quick_d2l
@@ -1703,12 +1801,14 @@ ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMeth
*/
DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME ebx
- subl LITERAL(8), %esp // Align stack.
+ subl LITERAL(8), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax
call SYMBOL(artDeoptimizeFromCompiledCode) // (DeoptimizationKind, Thread*)
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_deoptimize_from_compiled_code
@@ -2161,8 +2261,20 @@ DEFINE_FUNCTION art_quick_method_entry_hook
addl LITERAL(16), %esp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-16)
+ CFI_REMEMBER_STATE
+ cmp LITERAL(0), %eax
+ jne .Lentryhook_deopt
+
+ // Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Lentryhook_deopt:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_method_entry_hook
DEFINE_FUNCTION art_quick_method_exit_hook
@@ -2170,7 +2282,7 @@ DEFINE_FUNCTION art_quick_method_exit_hook
SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED edi
leal FRAME_SIZE_SAVE_EVERYTHING(%esp), %edi // Remember ArtMethod**
- subl LITERAL(4), %esp // Align stack.
+ subl LITERAL(4), %esp // Align stack.
CFI_ADJUST_CFA_OFFSET(4)
PUSH_ARG edx // Save gpr return value. edx and eax need to be together
@@ -2187,13 +2299,27 @@ DEFINE_FUNCTION art_quick_method_exit_hook
call SYMBOL(artMethodExitHook) // (Thread*, ArtMethod**, gpr_result*, fpr_result*,
// frame_size)
- // Return result could have been changed if it's a reference.
+ // Keep gpr_result in case the return result was changed.
movl 20(%esp), %ecx
- movl %ecx, (80+32)(%esp)
- addl LITERAL(32), %esp // Pop arguments and grp_result.
+
+ addl LITERAL(32), %esp // Pop arguments and gpr_result.
CFI_ADJUST_CFA_OFFSET(-32)
+ CFI_REMEMBER_STATE
+ cmp LITERAL(0), %eax
+ jne .Lexithook_deopt_or_exception
+
+ // Return result could have been changed if it's a reference.
+ movl %ecx, (80)(%esp)
+
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Lexithook_deopt_or_exception:
+ // Deoptimize or exception thrown.
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ mov %eax, %edi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_method_exit_hook
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 7efa77b77e..bf0b958749 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -469,6 +469,8 @@ MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
// (Thread*) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_MACRO
/*
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index e4d7aa9457..8acce20aad 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -29,4 +29,11 @@
#define SAVE_EVERYTHING_FRAME_RAX_OFFSET \
(FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
+// Size of Context::gprs_.
+#define X86_64_LONG_JUMP_GPRS_SIZE 136
+// Size of Context::fprs_.
+#define X86_64_LONG_JUMP_FPRS_SIZE 128
+// Size of Context::gprs_ + Context::fprs_.
+#define X86_64_LONG_JUMP_CONTEXT_SIZE (X86_64_LONG_JUMP_GPRS_SIZE + X86_64_LONG_JUMP_FPRS_SIZE)
+
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/context_x86_64.cc b/runtime/arch/x86_64/context_x86_64.cc
index 196f29f31d..d40395bc8b 100644
--- a/runtime/arch/x86_64/context_x86_64.cc
+++ b/runtime/arch/x86_64/context_x86_64.cc
@@ -100,13 +100,8 @@ void X86_64Context::SetFPR(uint32_t reg, uintptr_t value) {
*fprs_[reg] = value;
}
-extern "C" NO_RETURN void art_quick_do_long_jump(uintptr_t*, uintptr_t*);
-
-void X86_64Context::DoLongJump() {
+void X86_64Context::CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) {
#if defined(__x86_64__)
- uintptr_t gprs[kNumberOfCpuRegisters + 1];
- uintptr_t fprs[kNumberOfFloatRegisters];
-
for (size_t i = 0; i < kNumberOfCpuRegisters; ++i) {
gprs[kNumberOfCpuRegisters - i - 1] = gprs_[i] != nullptr ? *gprs_[i] : kBadGprBase + i;
}
@@ -118,8 +113,6 @@ void X86_64Context::DoLongJump() {
uintptr_t rsp = gprs[kNumberOfCpuRegisters - RSP - 1] - sizeof(intptr_t);
gprs[kNumberOfCpuRegisters] = rsp;
*(reinterpret_cast<uintptr_t*>(rsp)) = rip_;
-
- art_quick_do_long_jump(gprs, fprs);
#else
UNIMPLEMENTED(FATAL);
UNREACHABLE();
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index 19e5f57345..cd2e027656 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -85,7 +85,7 @@ class X86_64Context final : public Context {
void SetFPR(uint32_t reg, uintptr_t value) override;
void SmashCallerSaves() override;
- NO_RETURN void DoLongJump() override;
+ void CopyContextTo(uintptr_t* gprs, uintptr_t* fprs) override;
private:
// Pointers to register locations. Values are initialized to null or the special registers below.
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
index a7966db928..a635de8c28 100644
--- a/runtime/arch/x86_64/jni_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -458,11 +458,15 @@ DEFINE_FUNCTION art_jni_lock_object_no_inline
ret
.cfi_adjust_cfa_offset MANAGED_ARGS_SAVE_SIZE
1:
- // All args are irrelevant when throwing an exception. Remove the spill area.
- DECREASE_FRAME MANAGED_ARGS_SAVE_SIZE
+ // All args are irrelevant when throwing an exception. Remove the spill area except for new
+ // padding to align stack.
+ DECREASE_FRAME MANAGED_ARGS_SAVE_SIZE - /*new padding*/ 8
// Rely on the JNI transition frame constructed in the JNI stub.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread::Current().
- jmp SYMBOL(artDeliverPendingExceptionFromCode) // (Thread*); tail call.
+ call SYMBOL(artDeliverPendingExceptionFromCode) // (Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_jni_lock_object_no_inline
/*
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 10e098d7db..a538aa769e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -201,6 +201,8 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -211,6 +213,8 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -221,6 +225,8 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -231,6 +237,8 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name)
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION VAR(c_name)
END_MACRO
@@ -254,6 +262,8 @@ DEFINE_FUNCTION_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, 2
// Outgoing argument set up; RDI already contains the fault address.
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artThrowNullPointerExceptionFromSignal) // (addr, self)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_throw_null_pointer_exception_from_signal
@@ -591,34 +601,42 @@ END_FUNCTION art_quick_invoke_static_stub
/*
* Long jump stub.
- * On entry:
- * rdi = gprs
- * rsi = fprs
+ * On entry rdi is the long jump context. This is expected to be returned from a previous
+ * entrypoint call which threw an exception or deoptimized.
*/
DEFINE_FUNCTION art_quick_do_long_jump
#if defined(__APPLE__)
int3
int3
#else
+ // Reserve space for the gprs + fprs;
+ INCREASE_FRAME X86_64_LONG_JUMP_CONTEXT_SIZE
+
+ leaq 0(%rsp), %rsi // GPRS
+ leaq X86_64_LONG_JUMP_GPRS_SIZE(%rsp), %rdx // FPRS
+
+ call SYMBOL(artContextCopyForLongJump) // Context* context,
+ // uintptr_t* gprs,
+ // uintptr_t* fprs
+
// Restore FPRs.
- movq 0(%rsi), %xmm0
- movq 8(%rsi), %xmm1
- movq 16(%rsi), %xmm2
- movq 24(%rsi), %xmm3
- movq 32(%rsi), %xmm4
- movq 40(%rsi), %xmm5
- movq 48(%rsi), %xmm6
- movq 56(%rsi), %xmm7
- movq 64(%rsi), %xmm8
- movq 72(%rsi), %xmm9
- movq 80(%rsi), %xmm10
- movq 88(%rsi), %xmm11
- movq 96(%rsi), %xmm12
- movq 104(%rsi), %xmm13
- movq 112(%rsi), %xmm14
- movq 120(%rsi), %xmm15
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+0)(%rsp), %xmm0
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+8)(%rsp), %xmm1
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+16)(%rsp), %xmm2
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+24)(%rsp), %xmm3
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+32)(%rsp), %xmm4
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+40)(%rsp), %xmm5
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+48)(%rsp), %xmm6
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+56)(%rsp), %xmm7
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+64)(%rsp), %xmm8
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+72)(%rsp), %xmm9
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+80)(%rsp), %xmm10
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+88)(%rsp), %xmm11
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+96)(%rsp), %xmm12
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+104)(%rsp), %xmm13
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+112)(%rsp), %xmm14
+ movq (X86_64_LONG_JUMP_GPRS_SIZE+120)(%rsp), %xmm15
// Restore FPRs.
- movq %rdi, %rsp // RSP points to gprs.
// Load all registers except RSP and RIP with values in gprs.
popq %r15
popq %r14
@@ -709,20 +727,24 @@ MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT, c_name, cxx_name)
END_MACRO
MACRO0(RETURN_OR_DEOPT_IF_RESULT_IS_NON_NULL_OR_DELIVER)
+ CFI_REMEMBER_STATE
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
DEOPT_OR_RETURN /*is_ref=*/1 // Check if deopt is required
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, 8
DELIVER_PENDING_EXCEPTION
END_MACRO
MACRO1(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION, is_ref = 0)
movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
+ CFI_REMEMBER_STATE
testq %rcx, %rcx // rcx == 0 ?
jnz 1f // if rcx != 0 goto 1
DEOPT_OR_RETURN \is_ref // Check if deopt is required
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, 8
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -740,8 +762,20 @@ MACRO1(DEOPT_OR_RETURN, is_ref = 0)
movq %rax, %rsi // pass the result
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
call SYMBOL(artDeoptimizeIfNeeded)
+
+ CFI_REMEMBER_STATE
+ cmpq LITERAL(0), %rax
+ jne 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+3:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_MACRO
MACRO1(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX, is_ref = 0)
@@ -757,19 +791,31 @@ MACRO1(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX, is_ref = 0)
movq %rax, %rsi // pass the result
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
call SYMBOL(artDeoptimizeIfNeeded)
+
CFI_REMEMBER_STATE
+ cmpq LITERAL(0), %rax
+ jne 3f
+
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+3:
+ // Deoptimize.
CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_MACRO
MACRO0(RETURN_OR_DEOPT_IF_INT_RESULT_IS_ZERO_OR_DELIVER)
+ CFI_REMEMBER_STATE
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
DEOPT_OR_RETURN // Check if we need a deopt
1: // deliver exception on current thread
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, 8
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -1162,6 +1208,8 @@ DEFINE_FUNCTION art_quick_check_instance_of
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_check_instance_of
@@ -1227,10 +1275,12 @@ DEFINE_FUNCTION art_quick_aput_obj
.Laput_obj_throw_array_store_exception:
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // Save all registers as basis for long jump context.
// Outgoing argument set up.
- movq %rdx, %rsi // Pass arg 2 = value.
- movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
- // Pass arg 1 = array.
+ movq %rdx, %rsi // Pass arg 2 = value.
+ movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
+ // Pass arg 1 = array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
#ifdef USE_READ_BARRIER
@@ -1269,8 +1319,21 @@ DEFINE_FUNCTION art_quick_test_suspend
// Outgoing argument set up
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
call SYMBOL(artTestSuspendFromCode) // (Thread*)
+
+ CFI_REMEMBER_STATE
+ cmpq LITERAL(0), %rax
+ jne .Ltest_suspend_deoptimize
+
+ // Normal return.
RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address
ret
+
+.Ltest_suspend_deoptimize:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_test_suspend
UNIMPLEMENTED art_quick_ldiv
@@ -1569,6 +1632,8 @@ DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code
// Stack should be aligned now.
movq %gs:THREAD_SELF_OFFSET, %rsi // Pass Thread.
call SYMBOL(artDeoptimizeFromCompiledCode) // (DeoptimizationKind, Thread*)
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
UNREACHABLE
END_FUNCTION art_quick_deoptimize_from_compiled_code
@@ -1999,10 +2064,22 @@ DEFINE_FUNCTION art_quick_method_entry_hook
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
movq %rsp, %rdx // SP
- call SYMBOL(artMethodEntryHook) // (ArtMethod*, Thread*, sp)
+ CFI_REMEMBER_STATE
+ call SYMBOL(artMethodEntryHook) // (ArtMethod*, Thread*, sp)
+
+ cmpq LITERAL(0), %rax
+ jne .Lentryhook_deopt
+ // Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Lentryhook_deopt:
+ // Deoptimize.
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_method_entry_hook
// On entry, method is at the bottom of the stack.
@@ -2018,7 +2095,18 @@ DEFINE_FUNCTION art_quick_method_exit_hook
call SYMBOL(artMethodExitHook) // (Thread*, ArtMethod**, gpr_res*, fpr_res*,
// frame_size)
+ CFI_REMEMBER_STATE
+ cmpq LITERAL(0), %rax
+ jne .Lexithook_deopt_or_exception
+
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
+
+.Lexithook_deopt_or_exception:
+ // Deoptimize or exception thrown.
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ movq %rax, %rdi // pass Context*
+ call SYMBOL(art_quick_do_long_jump)
+ UNREACHABLE
END_FUNCTION art_quick_method_exit_hook
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 854d2737a4..cc1b5ed6df 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -342,9 +342,6 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
// removed by a pro-guard like tool.
// Note: this is not RI behavior. RI would have failed when loading the class.
self->ClearException();
- // Delete any long jump context as this routine is called during a stack walk which will
- // release its in use context at the end.
- delete self->GetLongJumpContext();
LOG(WARNING) << "Unresolved exception class when finding catch block: "
<< DescriptorToDot(GetTypeDescriptorFromTypeIdx(iter_type_idx));
} else if (iter_exception_type->IsAssignableFrom(exception_type.Get())) {
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 29060457fd..3ed3718788 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -25,10 +25,10 @@
namespace art HIDDEN {
-NO_RETURN static void artDeoptimizeImpl(Thread* self,
- DeoptimizationKind kind,
- bool single_frame,
- bool skip_method_exit_callbacks)
+static Context* artDeoptimizeImpl(Thread* self,
+ DeoptimizationKind kind,
+ bool single_frame,
+ bool skip_method_exit_callbacks)
REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime::Current()->IncrementDeoptimizationCount(kind);
if (VLOG_IS_ON(deopt)) {
@@ -49,24 +49,24 @@ NO_RETURN static void artDeoptimizeImpl(Thread* self,
exception_handler.DeoptimizeStack(skip_method_exit_callbacks);
}
if (exception_handler.IsFullFragmentDone()) {
- exception_handler.DoLongJump(true);
+ return exception_handler.PrepareLongJump(true);
} else {
exception_handler.DeoptimizePartialFragmentFixup();
// We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would
// be caller-saved. This has the downside that we cannot track incorrect register usage down the
// line.
- exception_handler.DoLongJump(false);
+ return exception_handler.PrepareLongJump(false);
}
}
-extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks)
+extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- artDeoptimizeImpl(self, DeoptimizationKind::kFullFrame, false, skip_method_exit_callbacks);
+ return artDeoptimizeImpl(self, DeoptimizationKind::kFullFrame, false, skip_method_exit_callbacks);
}
// This is called directly from compiled code by an HDeoptimize.
-extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Thread* self)
+extern "C" Context* artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// Before deoptimizing to interpreter, we must push the deoptimization context.
@@ -79,7 +79,7 @@ extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind,
DeoptimizationMethodType::kDefault);
// Deopting from compiled code, so method exit haven't run yet. Don't skip method exit callbacks
// if required.
- artDeoptimizeImpl(self, kind, true, /* skip_method_exit_callbacks= */ false);
+ return artDeoptimizeImpl(self, kind, true, /* skip_method_exit_callbacks= */ false);
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 53e14d0d7a..f7d6de2c3c 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -21,7 +21,7 @@
namespace art HIDDEN {
-extern "C" void artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref)
+extern "C" Context* artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref)
REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
DCHECK(!self->IsExceptionPending());
@@ -32,10 +32,10 @@ extern "C" void artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_re
DeoptimizationMethodType type = instr->GetDeoptimizationMethodType(*sp);
JValue jvalue;
jvalue.SetJ(result);
- instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref);
+ return instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref);
}
-extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+extern "C" Context* artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
@@ -45,11 +45,11 @@ extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::muta
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
JValue result;
result.SetJ(0);
- Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ return Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
-extern "C" void artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
+extern "C" Context* artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend(/*implicit=*/ true);
@@ -59,7 +59,7 @@ extern "C" void artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
JValue result;
result.SetJ(0);
- Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ return Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 781f3e549d..3cd109fd31 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -27,23 +27,23 @@
namespace art HIDDEN {
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
-extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
+extern "C" Context* artDeliverPendingExceptionFromCode(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
-extern "C" NO_RETURN uint64_t artInvokeObsoleteMethod(ArtMethod* method, Thread* self)
+extern "C" Context* artInvokeObsoleteMethod(ArtMethod* method, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method->IsObsolete());
ScopedQuickEntrypointChecks sqec(self);
ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
method->PrettyMethod().c_str());
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Called by generated code to throw an exception.
-extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
+extern "C" Context* artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* exception may be null, in which case this routine should
@@ -58,61 +58,61 @@ extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* excepti
} else {
self->SetException(exception);
}
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Called by generated code to throw a NPE exception.
-extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
+extern "C" Context* artThrowNullPointerExceptionFromCode(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Installed by a signal handler to throw a NPE exception.
-extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
+extern "C" Context* artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Called by generated code to throw an arithmetic divide by zero exception.
-extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
+extern "C" Context* artThrowDivZeroFromCode(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Called by generated code to throw an array index out of bounds exception.
-extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
+extern "C" Context* artThrowArrayBoundsFromCode(int index, int length, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
// Called by generated code to throw a string index out of bounds exception.
-extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Thread* self)
+extern "C" Context* artThrowStringBoundsFromCode(int index, int length, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowStringIndexOutOfBoundsException(index, length);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
-extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
+extern "C" Context* artThrowStackOverflowFromCode(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowStackOverflowError(self);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
-extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
- mirror::Class* src_type,
- Thread* self)
+extern "C" Context* artThrowClassCastException(mirror::Class* dest_type,
+ mirror::Class* src_type,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
if (dest_type == nullptr) {
@@ -137,23 +137,24 @@ extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
}
DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
-extern "C" NO_RETURN void artThrowClassCastExceptionForObject(mirror::Object* obj,
- mirror::Class* dest_type,
- Thread* self)
+extern "C" Context* artThrowClassCastExceptionForObject(mirror::Object* obj,
+ mirror::Class* dest_type,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
- artThrowClassCastException(dest_type, obj->GetClass(), self);
+ return artThrowClassCastException(dest_type, obj->GetClass(), self);
}
-extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
- Thread* self)
+extern "C" Context* artThrowArrayStoreException(mirror::Object* array,
+ mirror::Object* value,
+ Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
- self->QuickDeliverException();
+ return self->QuickDeliverException();
}
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b53339acd9..a4bd92da57 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -60,8 +60,8 @@
namespace art HIDDEN {
-extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Thread* self);
-extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
+extern "C" Context* artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Thread* self);
+extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
// Visits the arguments as saved to the stack by a CalleeSaveType::kRefAndArgs callee save frame.
class QuickArgumentVisitor {
@@ -2517,12 +2517,13 @@ extern "C" void artJniMethodEntryHook(Thread* self)
instr->MethodEnterEvent(self, method);
}
-extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp)
+extern "C" Context* artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
if (instr->HasFastMethodEntryListenersOnly()) {
instr->MethodEnterEvent(self, method);
- return;
+ // No exception or deoptimization.
+ return nullptr;
}
if (instr->HasMethodEntryListeners()) {
@@ -2533,18 +2534,20 @@ extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod**
// Instrumentation can request deoptimizing only a particular method (for ex: when
// there are break points on the method). In such cases deoptimize only this method.
// FullFrame deoptimizations are handled on method exits.
- artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
+ return artDeoptimizeFromCompiledCode(DeoptimizationKind::kDebugging, self);
}
} else {
DCHECK(!instr->IsDeoptimized(method));
}
+ // No exception or deoptimization.
+ return nullptr;
}
-extern "C" void artMethodExitHook(Thread* self,
- ArtMethod** sp,
- uint64_t* gpr_result,
- uint64_t* fpr_result,
- uint32_t frame_size)
+extern "C" Context* artMethodExitHook(Thread* self,
+ ArtMethod** sp,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result,
+ uint32_t frame_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
// Instrumentation exit stub must not be entered with a pending exception.
@@ -2560,7 +2563,8 @@ extern "C" void artMethodExitHook(Thread* self,
// or a return value.
JValue return_value;
instr->MethodExitEvent(self, method, /* frame= */ {}, return_value);
- return;
+ // No exception or deoptimization.
+ return nullptr;
}
bool is_ref = false;
@@ -2593,8 +2597,7 @@ extern "C" void artMethodExitHook(Thread* self,
if (self->IsExceptionPending() || self->ObserveAsyncException()) {
// The exception was thrown from the method exit callback. We should not call method unwind
// callbacks for this case.
- self->QuickDeliverException(/* is_method_exit_exception= */ true);
- UNREACHABLE();
+ return self->QuickDeliverException(/* is_method_exit_exception= */ true);
}
// We should deoptimize here if the caller requires a deoptimization or if the current method
@@ -2609,9 +2612,11 @@ extern "C" void artMethodExitHook(Thread* self,
ret_val, is_ref, self->GetException(), false, deopt_method_type);
// Method exit callback has already been run for this method. So tell the deoptimizer to skip
// callbacks for this frame.
- artDeoptimize(self, /*skip_method_exit_callbacks = */ true);
- UNREACHABLE();
+ return artDeoptimize(self, /*skip_method_exit_callbacks = */ true);
}
+
+ // No exception or deoptimization.
+ return nullptr;
}
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index d4014162c4..9556672d05 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -96,9 +96,7 @@ class EntrypointsOrderTest : public CommonArtTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context,
- stacked_shadow_frame_record, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, stacked_shadow_frame_record, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stacked_shadow_frame_record,
deoptimization_context_stack, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, deoptimization_context_stack,
@@ -108,11 +106,11 @@ class EntrypointsOrderTest : public CommonArtTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, active_suspendall_barrier, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspendall_barrier,
active_suspend1_barriers, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend1_barriers, thread_local_start,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend1_barriers, thread_local_pos,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_limit, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_limit, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_limit, thread_local_objects, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, checkpoint_function, sizeof(size_t));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_function, jni_entrypoints,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 8977635cd8..24f543ba84 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -55,8 +55,8 @@
#include "thread_list.h"
namespace art HIDDEN {
-extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
-extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self);
+extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
+extern "C" void artDeliverPendingExceptionFromCode(Thread* self);
namespace instrumentation {
@@ -1724,11 +1724,11 @@ bool Instrumentation::PushDeoptContextIfNeeded(Thread* self,
return true;
}
-void Instrumentation::DeoptimizeIfNeeded(Thread* self,
- ArtMethod** sp,
- DeoptimizationMethodType type,
- JValue return_value,
- bool is_reference) {
+Context* Instrumentation::DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue return_value,
+ bool is_reference) {
if (self->IsAsyncExceptionPending() || ShouldDeoptimizeCaller(self, sp)) {
self->PushDeoptimizationContext(return_value,
is_reference,
@@ -1737,8 +1737,10 @@ void Instrumentation::DeoptimizeIfNeeded(Thread* self,
type);
// This is requested from suspend points or when returning from runtime methods so exit
// callbacks wouldn't be run yet. So don't skip method callbacks.
- artDeoptimize(self, /* skip_method_exit_callbacks= */ false);
+ return artDeoptimize(self, /* skip_method_exit_callbacks= */ false);
}
+ // No exception or deoptimization.
+ return nullptr;
}
bool Instrumentation::NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method) {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 35e7f13c08..3ef24d1340 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -26,6 +26,7 @@
#include <queue>
#include <unordered_set>
+#include "arch/context.h"
#include "arch/instruction_set.h"
#include "base/locks.h"
#include "base/macros.h"
@@ -533,11 +534,14 @@ class Instrumentation {
DeoptimizationMethodType deopt_type,
bool is_ref,
const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
- void DeoptimizeIfNeeded(Thread* self,
- ArtMethod** sp,
- DeoptimizationMethodType type,
- JValue result,
- bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Deoptimize upon pending exception or if the caller requires it. Returns a long jump context if
+ // a deoptimization is needed and taken.
+ Context* DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue result,
+ bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
// This returns if the caller of runtime method requires a deoptimization. This checks both if the
// method requires a deopt or if this particular frame needs a deopt because of a class
// redefinition.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 37734f7d74..8924108f74 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -52,7 +52,7 @@ static constexpr size_t kInvalidFrameDepth = 0xffffffff;
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
: self_(self),
- context_(self->GetLongJumpContext()),
+ context_(Context::Create()),
is_deoptimization_(is_deoptimization),
handler_quick_frame_(nullptr),
handler_quick_frame_pc_(0),
@@ -200,7 +200,7 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception,
// Walk the stack to find catch handler.
CatchBlockStackVisitor visitor(self_,
- context_,
+ context_.get(),
&exception_ref,
this,
/*skip_frames=*/already_popped,
@@ -709,7 +709,7 @@ void QuickExceptionHandler::DeoptimizeStack(bool skip_method_exit_callbacks) {
self_->DumpStack(LOG_STREAM(INFO) << "Deoptimizing: ");
}
- DeoptimizeStackVisitor visitor(self_, context_, this, false, skip_method_exit_callbacks);
+ DeoptimizeStackVisitor visitor(self_, context_.get(), this, false, skip_method_exit_callbacks);
visitor.WalkStack(true);
PrepareForLongJumpToInvokeStubOrInterpreterBridge();
}
@@ -720,7 +720,7 @@ void QuickExceptionHandler::DeoptimizeSingleFrame(DeoptimizationKind kind) {
// This deopt is requested while still executing the method. We haven't run method exit callbacks
// yet, so don't skip them.
DeoptimizeStackVisitor visitor(
- self_, context_, this, true, /* skip_method_exit_callbacks= */ false);
+ self_, context_.get(), this, true, /* skip_method_exit_callbacks= */ false);
visitor.WalkStack(true);
// Compiled code made an explicit deoptimization.
@@ -806,9 +806,8 @@ void QuickExceptionHandler::DeoptimizePartialFragmentFixup() {
}
}
-void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
- // Place context back on thread so it will be available when we continue.
- self_->ReleaseLongJumpContext(context_);
+Context* QuickExceptionHandler::PrepareLongJump(bool smash_caller_saves) {
+ // Prepare and return the context.
context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
CHECK_NE(handler_quick_frame_pc_, 0u);
context_->SetPC(handler_quick_frame_pc_);
@@ -827,8 +826,7 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) {
}
// Clear the dex_pc list so as not to leak memory.
handler_dex_pc_list_.reset();
- context_->DoLongJump();
- UNREACHABLE();
+ return context_.release();
}
void QuickExceptionHandler::DumpFramesWithType(Thread* self, bool details) {
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index d39c82bea3..733f9e037f 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -46,11 +46,6 @@ class QuickExceptionHandler {
QuickExceptionHandler(Thread* self, bool is_deoptimization)
REQUIRES_SHARED(Locks::mutator_lock_);
- NO_RETURN ~QuickExceptionHandler() {
- LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
- UNREACHABLE();
- }
-
// Find the catch handler for the given exception and call all required Instrumentation methods.
// Note this might result in the exception being caught being different from 'exception'.
void FindCatch(ObjPtr<mirror::Throwable> exception, bool is_method_exit_exception)
@@ -81,8 +76,8 @@ class QuickExceptionHandler {
void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Long jump either to a catch handler or to the upcall.
- NO_RETURN void DoLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Prepares a long jump context for a jump to either to a catch handler or to the upcall.
+ Context* PrepareLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_);
void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
@@ -148,7 +143,7 @@ class QuickExceptionHandler {
private:
Thread* const self_;
- Context* const context_;
+ std::unique_ptr<Context> context_;
// Should we deoptimize the stack?
const bool is_deoptimization_;
// Quick frame with found handler or last frame if no handler found.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index ee1dc15413..0ce018ab76 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -133,7 +133,7 @@ namespace art HIDDEN {
using android::base::StringAppendV;
using android::base::StringPrintf;
-extern "C" NO_RETURN void artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
+extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks);
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
@@ -2697,10 +2697,6 @@ Thread::~Thread() {
delete wait_cond_;
delete wait_mutex_;
- if (tlsPtr_.long_jump_context != nullptr) {
- delete tlsPtr_.long_jump_context;
- }
-
if (initialized) {
CleanupCpu();
}
@@ -3938,7 +3934,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
os << offset;
}
-void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
+Context* Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
// Get exception from thread.
ObjPtr<mirror::Throwable> exception = GetException();
CHECK(exception != nullptr);
@@ -3946,8 +3942,7 @@ void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
// This wasn't a real exception, so just clear it here. If there was an actual exception it
// will be recorded in the DeoptimizationContext and it will be restored later.
ClearException();
- artDeoptimize(this, skip_method_exit_callbacks);
- UNREACHABLE();
+ return artDeoptimize(this, skip_method_exit_callbacks);
}
ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr());
@@ -3989,8 +3984,7 @@ void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
exception,
/* from_code= */ false,
method_type);
- artDeoptimize(this, skip_method_exit_callbacks);
- UNREACHABLE();
+ return artDeoptimize(this, skip_method_exit_callbacks);
} else {
LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
<< visitor.caller->PrettyMethod();
@@ -4015,18 +4009,7 @@ void Thread::QuickDeliverException(bool skip_method_exit_callbacks) {
// Check the to-space invariant on the re-installed exception (if applicable).
ReadBarrier::MaybeAssertToSpaceInvariant(GetException());
}
- exception_handler.DoLongJump();
-}
-
-Context* Thread::GetLongJumpContext() {
- Context* result = tlsPtr_.long_jump_context;
- if (result == nullptr) {
- result = Context::Create();
- } else {
- tlsPtr_.long_jump_context = nullptr; // Avoid context being shared.
- result->Reset();
- }
- return result;
+ return exception_handler.PrepareLongJump();
}
ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out,
@@ -4831,20 +4814,6 @@ void Thread::ClearAllInterpreterCaches() {
Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
}
-
-void Thread::ReleaseLongJumpContextInternal() {
- // Each QuickExceptionHandler gets a long jump context and uses
- // it for doing the long jump, after finding catch blocks/doing deoptimization.
- // Both finding catch blocks and deoptimization can trigger another
- // exception such as a result of class loading. So there can be nested
- // cases of exception handling and multiple contexts being used.
- // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
- // for reuse so there is no need to always allocate a new one each time when
- // getting a context. Since we only keep one context for reuse, delete the
- // existing one since the passed in context is yet to be used for longjump.
- delete tlsPtr_.long_jump_context;
-}
-
void Thread::SetNativePriority(int new_priority) {
palette_status_t status = PaletteSchedSetPriority(GetTid(), new_priority);
CHECK(status == PALETTE_STATUS_OK || status == PALETTE_STATUS_CHECK_ERRNO);
diff --git a/runtime/thread.h b/runtime/thread.h
index fda086adf0..1969aa1fde 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -684,21 +684,13 @@ class EXPORT Thread {
// that needs to be dealt with, false otherwise.
bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
- // Find catch block and perform long jump to appropriate exception handle. When
- // is_method_exit_exception is true, the exception was thrown by the method exit callback and we
- // should not send method unwind for the method on top of the stack since method exit callback was
- // already called.
- NO_RETURN void QuickDeliverException(bool is_method_exit_exception = false)
+ // Find catch block then prepare and return the long jump context to the appropriate exception
+ // handler. When is_method_exit_exception is true, the exception was thrown by the method exit
+ // callback and we should not send method unwind for the method on top of the stack since method
+ // exit callback was already called.
+ Context* QuickDeliverException(bool is_method_exit_exception = false)
REQUIRES_SHARED(Locks::mutator_lock_);
- Context* GetLongJumpContext();
- void ReleaseLongJumpContext(Context* context) {
- if (tlsPtr_.long_jump_context != nullptr) {
- ReleaseLongJumpContextInternal();
- }
- tlsPtr_.long_jump_context = context;
- }
-
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
@@ -1835,8 +1827,6 @@ class EXPORT Thread {
static bool IsAotCompiler();
- void ReleaseLongJumpContextInternal();
-
void SetCachedThreadName(const char* name);
// Helper class for manipulating the 32 bits of atomically changed state and flags.
@@ -2141,7 +2131,6 @@ class EXPORT Thread {
monitor_enter_object(nullptr),
top_handle_scope(nullptr),
class_loader_override(nullptr),
- long_jump_context(nullptr),
stacked_shadow_frame_record(nullptr),
deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr),
@@ -2149,9 +2138,9 @@ class EXPORT Thread {
pthread_self(0),
active_suspendall_barrier(nullptr),
active_suspend1_barriers(nullptr),
- thread_local_start(nullptr),
thread_local_pos(nullptr),
thread_local_end(nullptr),
+ thread_local_start(nullptr),
thread_local_limit(nullptr),
thread_local_objects(0),
checkpoint_function(nullptr),
@@ -2240,9 +2229,6 @@ class EXPORT Thread {
// useful for testing.
jobject class_loader_override;
- // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
- Context* long_jump_context;
-
// For gc purpose, a shadow frame record stack that keeps track of:
// 1) shadow frames under construction.
// 2) deoptimization shadow frames.
@@ -2281,14 +2267,14 @@ class EXPORT Thread {
// The struct as a whole is still stored on the requesting thread's stack.
WrappedSuspend1Barrier* active_suspend1_barriers GUARDED_BY(Locks::thread_suspend_count_lock_);
- // Thread-local allocation pointer. Can be moved below the following two to correct alignment.
- uint8_t* thread_local_start;
-
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
+ // Thread-local allocation pointer. Can be moved above the preceding two to correct alignment.
+ uint8_t* thread_local_start;
+
// Thread local limit is how much we can expand the thread local buffer to, it is greater or
// equal to thread_local_end.
uint8_t* thread_local_limit;