diff options
author | 2024-08-01 11:56:37 +0000 | |
---|---|---|
committer | 2024-08-05 10:18:42 +0000 | |
commit | e90e6f26ed47130e5f8ecb19c74e63d4742b54bc (patch) | |
tree | a78aa243ed88bcfd2fad453b2ebc341f6ccfd589 | |
parent | 010414cb2be41b31c8e25e157d350c0f827acab1 (diff) |
Clean up after exception delivery rewrite.
Clean up after
https://android-review.googlesource.com/2680017
and fix CFI for `art_quick_do_long_jump`.
Test: m test-art-host-gest
Test: testrunner.py --host --optimizing
Change-Id: Ief6cda2c223c4225dc5b5145452d926674430caa
-rw-r--r-- | runtime/arch/arm/quick_entrypoints_arm.S | 1 | ||||
-rw-r--r-- | runtime/arch/arm64/quick_entrypoints_arm64.S | 1 | ||||
-rw-r--r-- | runtime/arch/context.cc | 2 | ||||
-rw-r--r-- | runtime/arch/riscv64/quick_entrypoints_riscv64.S | 1 | ||||
-rw-r--r-- | runtime/arch/x86/asm_support_x86.S | 1 | ||||
-rw-r--r-- | runtime/arch/x86/jni_entrypoints_x86.S | 1 | ||||
-rw-r--r-- | runtime/arch/x86/quick_entrypoints_x86.S | 78 | ||||
-rw-r--r-- | runtime/arch/x86_64/quick_entrypoints_x86_64.S | 56 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc | 47 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_thread_entrypoints.cc | 11 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_throw_entrypoints.cc | 57 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 22 | ||||
-rw-r--r-- | runtime/instrumentation.cc | 15 | ||||
-rw-r--r-- | runtime/instrumentation.h | 12 | ||||
-rw-r--r-- | runtime/quick_exception_handler.cc | 4 | ||||
-rw-r--r-- | runtime/quick_exception_handler.h | 4 | ||||
-rw-r--r-- | runtime/thread.cc | 44 | ||||
-rw-r--r-- | runtime/thread.h | 8 | ||||
-rwxr-xr-x | tools/check_cfi.py | 4 |
19 files changed, 208 insertions, 161 deletions
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 6d5b7b8a68..2621918d09 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -571,6 +571,7 @@ ARM_ENTRY art_quick_do_long_jump ldr lr, [sp, #56] @ Load LR from gprs_, 56 = 4 * 14. ldr sp, [sp, #52] @ Load SP from gprs_ 52 = 4 * 13. @ Do not access gprs_ from now, they are below SP. + .cfi_def_cfa_offset 0 REFRESH_MARKING_REGISTER bx r12 @ Do long jump. END art_quick_do_long_jump diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 2600211649..6189320847 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -860,6 +860,7 @@ ENTRY art_quick_do_long_jump // Set SP. Do not access fprs_ and gprs_ from now, they are below SP. mov sp, xIP0 + .cfi_def_cfa_offset 0 REFRESH_MARKING_REGISTER REFRESH_SUSPEND_CHECK_REGISTER diff --git a/runtime/arch/context.cc b/runtime/arch/context.cc index f5ec06d269..1d69691619 100644 --- a/runtime/arch/context.cc +++ b/runtime/arch/context.cc @@ -25,6 +25,8 @@ Context* Context::Create() { extern "C" void artContextCopyForLongJump(Context* context, uintptr_t* gprs, uintptr_t* fprs) { context->CopyContextTo(gprs, fprs); // Once the context has been copied, it is no longer needed. + // The context pointer is passed via hand-written assembly stubs, otherwise we'd take the + // context argument as a `std::unique_ptr<>` to indicate the ownership handover. delete context; } diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S index 9c7108e8e9..06b9e9b7bb 100644 --- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S +++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S @@ -558,6 +558,7 @@ ENTRY art_quick_do_long_jump // Set sp. Do not access fprs_ and gprs_ from now, they are below sp. mv sp, t0 + .cfi_def_cfa_offset 0 jr t1 END art_quick_do_long_jump diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S index 4298c65c5f..49b0eb5f0d 100644 --- a/runtime/arch/x86/asm_support_x86.S +++ b/runtime/arch/x86/asm_support_x86.S @@ -384,7 +384,6 @@ MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE CFI_ADJUST_CFA_OFFSET(-16) // Reset CFA in case there is more code afterwards. diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S index cc845dae02..09b4d5a0f5 100644 --- a/runtime/arch/x86/jni_entrypoints_x86.S +++ b/runtime/arch/x86/jni_entrypoints_x86.S @@ -358,7 +358,6 @@ DEFINE_FUNCTION art_jni_lock_object_no_inline pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) call SYMBOL(artDeliverPendingExceptionFromCode) // (Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_jni_lock_object_no_inline diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index b4293cd94b..d5c186e1ad 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -211,7 +211,6 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) call CALLVAR(cxx_name) // cxx_name(Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION VAR(c_name) @@ -225,7 +224,6 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() CFI_ADJUST_CFA_OFFSET(4) call CALLVAR(cxx_name) // cxx_name(Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION VAR(c_name) @@ -240,7 +238,6 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) CFI_ADJUST_CFA_OFFSET(4) PUSH eax // pass arg1 call CALLVAR(cxx_name) // cxx_name(arg1, Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION VAR(c_name) @@ -256,7 +253,6 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING, c_name, cxx_name) PUSH ecx // pass arg2 PUSH eax // pass arg1 call CALLVAR(cxx_name) // cxx_name(arg1, arg2, Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION VAR(c_name) @@ -284,7 +280,6 @@ DEFINE_FUNCTION_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, 2 CFI_ADJUST_CFA_OFFSET(4) PUSH eax // pass arg1 call SYMBOL(artThrowNullPointerExceptionFromSignal) // (addr, self) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_throw_null_pointer_exception_from_signal @@ -656,31 +651,30 @@ END_FUNCTION art_quick_invoke_static_stub /* * Long jump stub. - * On entry edi is the long jump context. This is expected to be returned from a previous - * entrypoint call which threw an exception or deoptimized. + * Custom calling convention: On entry EAX is the long jump context. This is expected to + * be returned from a previous entrypoint call which threw an exception or deoptimized. */ DEFINE_FUNCTION art_quick_do_long_jump #if defined(__APPLE__) int3 int3 #else - // Reserve space for the gprs + fprs; - INCREASE_FRAME X86_LONG_JUMP_CONTEXT_SIZE + // Reserve space for the gprs + fprs; add 16-byte stack alignment padding for call. + // (Note that the return address plus 3 args below shall take exactly 16 bytes.) + INCREASE_FRAME (X86_LONG_JUMP_CONTEXT_SIZE + 15) & ~15 - lea 0(%esp), %esi // GPRS - lea X86_LONG_JUMP_GPRS_SIZE(%esp), %edx // FPRS + lea 0(%esp), %esi // GPRS + lea X86_LONG_JUMP_GPRS_SIZE(%esp), %edx // FPRS PUSH_ARG edx PUSH_ARG esi - PUSH_ARG edi - + PUSH_ARG eax call SYMBOL(artContextCopyForLongJump) // Context* context, // uintptr_t* gprs, // uintptr_t* fprs - POP_ARG edi - POP_ARG esi // GPRS - POP_ARG edx // FPRS + DECREASE_FRAME 8 // Remove the context and GPRS arguments. + POP_ARG edx // Restore FPRS, make ESP point to GPRS. // Address base of FPRs. movsd 0(%edx), %xmm0 // Load up XMM0-XMM7. @@ -691,10 +685,12 @@ DEFINE_FUNCTION art_quick_do_long_jump movsd 40(%edx), %xmm5 movsd 48(%edx), %xmm6 movsd 56(%edx), %xmm7 - movl %esi, %esp // ESP points to gprs. popal // Load all registers except ESP and EIP with values in gprs. - POP esp // Load stack pointer. - ret // From higher in the stack pop eip. + CFI_ADJUST_CFA_OFFSET(-(X86_LONG_JUMP_GPRS_SIZE - /*ESP*/ 4)) + + POP_ARG esp // Load stack pointer. + CFI_DEF_CFA(esp, 4) + ret // From higher in the stack pop EIP. #endif END_FUNCTION art_quick_do_long_jump @@ -830,8 +826,8 @@ MACRO2(DEOPT_OR_RETURN, temp, is_ref = 0) DECREASE_FRAME(16) // pop arguments CFI_REMEMBER_STATE - cmp LITERAL(0), %eax - jne 3f + testl %eax, %eax + jnz 3f RESTORE_SAVE_EVERYTHING_FRAME ret @@ -839,7 +835,6 @@ MACRO2(DEOPT_OR_RETURN, temp, is_ref = 0) 3: // Deoptimize CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_MACRO @@ -863,8 +858,8 @@ MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0) DECREASE_FRAME(16) // pop arguments CFI_REMEMBER_STATE - cmp LITERAL(0), %eax - jne 3f + testl %eax, %eax + jnz 3f RESTORE_SAVE_EVERYTHING_FRAME ret @@ -872,7 +867,6 @@ MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0) 3: // Deoptimize CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_MACRO @@ -1308,7 +1302,6 @@ DEFINE_FUNCTION art_quick_check_instance_of PUSH ecx // pass arg2 PUSH eax // pass arg1 call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_check_instance_of @@ -1386,7 +1379,6 @@ DEFINE_FUNCTION art_quick_aput_obj PUSH_ARG edx // Pass arg2 - value. PUSH_ARG eax // Pass arg1 - array. call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE @@ -1446,8 +1438,8 @@ DEFINE_FUNCTION art_quick_test_suspend DECREASE_FRAME 16 // pop arguments CFI_REMEMBER_STATE - cmp LITERAL(0), %eax - jne .Ltest_suspend_deoptimize + testl %eax, %eax + jnz .Ltest_suspend_deoptimize RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address ret // return @@ -1455,7 +1447,6 @@ DEFINE_FUNCTION art_quick_test_suspend .Ltest_suspend_deoptimize: // Deoptimize CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_test_suspend @@ -1807,7 +1798,6 @@ DEFINE_FUNCTION art_quick_deoptimize_from_compiled_code CFI_ADJUST_CFA_OFFSET(4) PUSH eax call SYMBOL(artDeoptimizeFromCompiledCode) // (DeoptimizationKind, Thread*) - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_deoptimize_from_compiled_code @@ -2179,8 +2169,8 @@ DEFINE_FUNCTION art_quick_update_inline_cache movl INLINE_CACHE_CLASSES_OFFSET(%ebp), %eax cmpl %ecx, %eax je .Ldone - cmpl LITERAL(0), %eax - jne .Lentry2 + testl %eax, %eax + jnz .Lentry2 lock cmpxchg %ecx, INLINE_CACHE_CLASSES_OFFSET(%ebp) jz .Ldone jmp .Lentry1 @@ -2188,8 +2178,8 @@ DEFINE_FUNCTION art_quick_update_inline_cache movl (INLINE_CACHE_CLASSES_OFFSET+4)(%ebp), %eax cmpl %ecx, %eax je .Ldone - cmpl LITERAL(0), %eax - jne .Lentry3 + testl %eax, %eax + jnz .Lentry3 lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+4)(%ebp) jz .Ldone jmp .Lentry2 @@ -2197,8 +2187,8 @@ DEFINE_FUNCTION art_quick_update_inline_cache movl (INLINE_CACHE_CLASSES_OFFSET+8)(%ebp), %eax cmpl %ecx, %eax je .Ldone - cmpl LITERAL(0), %eax - jne .Lentry4 + testl %eax, %eax + jnz .Lentry4 lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+8)(%ebp) jz .Ldone jmp .Lentry3 @@ -2206,8 +2196,8 @@ DEFINE_FUNCTION art_quick_update_inline_cache movl (INLINE_CACHE_CLASSES_OFFSET+12)(%ebp), %eax cmpl %ecx, %eax je .Ldone - cmpl LITERAL(0), %eax - jne .Lentry5 + testl %eax, %eax + jnz .Lentry5 lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+12)(%ebp) jz .Ldone jmp .Lentry4 @@ -2262,8 +2252,8 @@ DEFINE_FUNCTION art_quick_method_entry_hook CFI_ADJUST_CFA_OFFSET(-16) CFI_REMEMBER_STATE - cmp LITERAL(0), %eax - jne .Lentryhook_deopt + testl %eax, %eax + jnz .Lentryhook_deopt // Normal return. RESTORE_SAVE_EVERYTHING_FRAME @@ -2272,7 +2262,6 @@ DEFINE_FUNCTION art_quick_method_entry_hook .Lentryhook_deopt: // Deoptimize. CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_method_entry_hook @@ -2306,8 +2295,8 @@ DEFINE_FUNCTION art_quick_method_exit_hook CFI_ADJUST_CFA_OFFSET(-32) CFI_REMEMBER_STATE - cmp LITERAL(0), %eax - jne .Lexithook_deopt_or_exception + testl %eax, %eax + jnz .Lexithook_deopt_or_exception // Return result could have been changed if it's a reference. movl %ecx, (80)(%esp) @@ -2319,7 +2308,6 @@ DEFINE_FUNCTION art_quick_method_exit_hook .Lexithook_deopt_or_exception: // Deoptimize or exception thrown. CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING - mov %eax, %edi // pass Context* call SYMBOL(art_quick_do_long_jump) UNREACHABLE END_FUNCTION art_quick_method_exit_hook diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index a538aa769e..52024ca040 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -638,23 +638,24 @@ DEFINE_FUNCTION art_quick_do_long_jump movq (X86_64_LONG_JUMP_GPRS_SIZE+120)(%rsp), %xmm15 // Restore FPRs. // Load all registers except RSP and RIP with values in gprs. - popq %r15 - popq %r14 - popq %r13 - popq %r12 - popq %r11 - popq %r10 - popq %r9 - popq %r8 - popq %rdi - popq %rsi - popq %rbp + POP_ARG r15 + POP_ARG r14 + POP_ARG r13 + POP_ARG r12 + POP_ARG r11 + POP_ARG r10 + POP_ARG r9 + POP_ARG r8 + POP_ARG rdi + POP_ARG rsi + POP_ARG rbp addq LITERAL(8), %rsp // Skip rsp - popq %rbx - popq %rdx - popq %rcx - popq %rax + POP_ARG rbx + POP_ARG rdx + POP_ARG rcx + POP_ARG rax popq %rsp // Load stack pointer. + CFI_DEF_CFA(rsp, 8) ret // From higher in the stack pop rip. #endif // __APPLE__ END_FUNCTION art_quick_do_long_jump @@ -764,8 +765,8 @@ MACRO1(DEOPT_OR_RETURN, is_ref = 0) call SYMBOL(artDeoptimizeIfNeeded) CFI_REMEMBER_STATE - cmpq LITERAL(0), %rax - jne 3f + testq %rax, %rax + jnz 3f RESTORE_SAVE_EVERYTHING_FRAME ret @@ -793,8 +794,8 @@ MACRO1(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX, is_ref = 0) call SYMBOL(artDeoptimizeIfNeeded) CFI_REMEMBER_STATE - cmpq LITERAL(0), %rax - jne 3f + testq %rax, %rax + jnz 3f RESTORE_SAVE_EVERYTHING_FRAME ret @@ -1188,8 +1189,8 @@ DEFINE_FUNCTION art_quick_check_instance_of CFI_ADJUST_CFA_OFFSET(8) SETUP_FP_CALLEE_SAVE_FRAME call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass) - testq %rax, %rax CFI_REMEMBER_STATE + testq %rax, %rax jz .Lthrow_class_cast_exception // jump forward if not assignable RESTORE_FP_CALLEE_SAVE_FRAME addq LITERAL(24), %rsp // pop arguments @@ -1321,8 +1322,8 @@ DEFINE_FUNCTION art_quick_test_suspend call SYMBOL(artTestSuspendFromCode) // (Thread*) CFI_REMEMBER_STATE - cmpq LITERAL(0), %rax - jne .Ltest_suspend_deoptimize + testq %rax, %rax + jnz .Ltest_suspend_deoptimize // Normal return. RESTORE_SAVE_EVERYTHING_FRAME // restore frame up to return address @@ -2063,12 +2064,11 @@ DEFINE_FUNCTION art_quick_method_entry_hook movq FRAME_SIZE_SAVE_EVERYTHING(%rsp), %rdi // pass ArtMethod movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() movq %rsp, %rdx // SP - - CFI_REMEMBER_STATE call SYMBOL(artMethodEntryHook) // (ArtMethod*, Thread*, sp) - cmpq LITERAL(0), %rax - jne .Lentryhook_deopt + CFI_REMEMBER_STATE + testq %rax, %rax + jnz .Lentryhook_deopt // Normal return. RESTORE_SAVE_EVERYTHING_FRAME @@ -2096,8 +2096,8 @@ DEFINE_FUNCTION art_quick_method_exit_hook // frame_size) CFI_REMEMBER_STATE - cmpq LITERAL(0), %rax - jne .Lexithook_deopt_or_exception + testq %rax, %rax + jnz .Lexithook_deopt_or_exception // Normal return. RESTORE_SAVE_EVERYTHING_FRAME diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc index 3ed3718788..3dfeacc7ba 100644 --- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "arch/context.h" #include "base/logging.h" // For VLOG_IS_ON. #include "base/mutex.h" #include "callee_save_frame.h" @@ -25,44 +26,14 @@ namespace art HIDDEN { -static Context* artDeoptimizeImpl(Thread* self, - DeoptimizationKind kind, - bool single_frame, - bool skip_method_exit_callbacks) - REQUIRES_SHARED(Locks::mutator_lock_) { - Runtime::Current()->IncrementDeoptimizationCount(kind); - if (VLOG_IS_ON(deopt)) { - if (single_frame) { - // Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the - // specialized visitor that will show whether a method is Quick or Shadow. - } else { - LOG(INFO) << "Deopting:"; - self->Dump(LOG_STREAM(INFO)); - } - } - - self->AssertHasDeoptimizationContext(); - QuickExceptionHandler exception_handler(self, true); - if (single_frame) { - exception_handler.DeoptimizeSingleFrame(kind); - } else { - exception_handler.DeoptimizeStack(skip_method_exit_callbacks); - } - if (exception_handler.IsFullFragmentDone()) { - return exception_handler.PrepareLongJump(true); - } else { - exception_handler.DeoptimizePartialFragmentFixup(); - // We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would - // be caller-saved. This has the downside that we cannot track incorrect register usage down the - // line. - return exception_handler.PrepareLongJump(false); - } -} - extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); - return artDeoptimizeImpl(self, DeoptimizationKind::kFullFrame, false, skip_method_exit_callbacks); + std::unique_ptr<Context> context = self->Deoptimize(DeoptimizationKind::kFullFrame, + /*single_frame=*/ false, + skip_method_exit_callbacks); + DCHECK(context != nullptr); + return context.release(); } // This is called directly from compiled code by an HDeoptimize. @@ -79,7 +50,11 @@ extern "C" Context* artDeoptimizeFromCompiledCode(DeoptimizationKind kind, Threa DeoptimizationMethodType::kDefault); // Deopting from compiled code, so method exit haven't run yet. Don't skip method exit callbacks // if required. - return artDeoptimizeImpl(self, kind, true, /* skip_method_exit_callbacks= */ false); + std::unique_ptr<Context> context = self->Deoptimize(kind, + /*single_frame=*/ true, + /* skip_method_exit_callbacks= */ false); + DCHECK(context != nullptr); + return context.release(); } } // namespace art diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc index f7d6de2c3c..e3511c80d5 100644 --- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "arch/context.h" #include "callee_save_frame.h" #include "jit/jit.h" #include "runtime.h" @@ -23,6 +24,7 @@ namespace art HIDDEN { extern "C" Context* artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_) { + ScopedQuickEntrypointChecks sqec(self); instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation(); DCHECK(!self->IsExceptionPending()); @@ -32,7 +34,8 @@ extern "C" Context* artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool i DeoptimizationMethodType type = instr->GetDeoptimizationMethodType(*sp); JValue jvalue; jvalue.SetJ(result); - return instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref); + std::unique_ptr<Context> context = instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref); + return context.release(); } extern "C" Context* artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -45,8 +48,9 @@ extern "C" Context* artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks:: ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); JValue result; result.SetJ(0); - return Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded( + std::unique_ptr<Context> context = Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded( self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false); + return context.release(); } extern "C" Context* artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { @@ -59,8 +63,9 @@ extern "C" Context* artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Loc ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame(); JValue result; result.SetJ(0); - return Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded( + std::unique_ptr<Context> context = Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded( self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false); + return context.release(); } extern "C" void artCompileOptimized(ArtMethod* method, Thread* self) diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 3cd109fd31..3ca95eabe4 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "arch/context.h" #include "art_method-inl.h" #include "callee_save_frame.h" #include "dex/code_item_accessors-inl.h" @@ -30,7 +31,9 @@ namespace art HIDDEN { extern "C" Context* artDeliverPendingExceptionFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } extern "C" Context* artInvokeObsoleteMethod(ArtMethod* method, Thread* self) @@ -39,7 +42,9 @@ extern "C" Context* artInvokeObsoleteMethod(ArtMethod* method, Thread* self) ScopedQuickEntrypointChecks sqec(self); ThrowInternalError("Attempting to invoke obsolete version of '%s'.", method->PrettyMethod().c_str()); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Called by generated code to throw an exception. @@ -58,7 +63,9 @@ extern "C" Context* artDeliverExceptionFromCode(mirror::Throwable* exception, Th } else { self->SetException(exception); } - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Called by generated code to throw a NPE exception. @@ -68,7 +75,9 @@ extern "C" Context* artThrowNullPointerExceptionFromCode(Thread* self) // We come from an explicit check in the generated code. This path is triggered // only if the object is indeed null. ThrowNullPointerExceptionFromDexPC(/* check_address= */ false, 0U); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Installed by a signal handler to throw a NPE exception. @@ -76,7 +85,9 @@ extern "C" Context* artThrowNullPointerExceptionFromSignal(uintptr_t addr, Threa REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowNullPointerExceptionFromDexPC(/* check_address= */ true, addr); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Called by generated code to throw an arithmetic divide by zero exception. @@ -84,7 +95,9 @@ extern "C" Context* artThrowDivZeroFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArithmeticExceptionDivideByZero(); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Called by generated code to throw an array index out of bounds exception. @@ -92,7 +105,9 @@ extern "C" Context* artThrowArrayBoundsFromCode(int index, int length, Thread* s REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayIndexOutOfBoundsException(index, length); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } // Called by generated code to throw a string index out of bounds exception. @@ -100,19 +115,23 @@ extern "C" Context* artThrowStringBoundsFromCode(int index, int length, Thread* REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowStringIndexOutOfBoundsException(index, length); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } extern "C" Context* artThrowStackOverflowFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowStackOverflowError(self); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } extern "C" Context* artThrowClassCastException(mirror::Class* dest_type, - mirror::Class* src_type, - Thread* self) + mirror::Class* src_type, + Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); if (dest_type == nullptr) { @@ -137,24 +156,28 @@ extern "C" Context* artThrowClassCastException(mirror::Class* dest_type, } DCHECK(!dest_type->IsAssignableFrom(src_type)); ThrowClassCastException(dest_type, src_type); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } extern "C" Context* artThrowClassCastExceptionForObject(mirror::Object* obj, - mirror::Class* dest_type, - Thread* self) + mirror::Class* dest_type, + Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(obj != nullptr); return artThrowClassCastException(dest_type, obj->GetClass(), self); } extern "C" Context* artThrowArrayStoreException(mirror::Object* array, - mirror::Object* value, - Thread* self) + mirror::Object* value, + Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); ThrowArrayStoreException(value->GetClass(), array->GetClass()); - return self->QuickDeliverException(); + std::unique_ptr<Context> context = self->QuickDeliverException(); + DCHECK(context != nullptr); + return context.release(); } } // namespace art diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index a4bd92da57..eb20ed2051 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "arch/context.h" #include "art_method-inl.h" #include "base/callee_save_type.h" #include "base/pointer_size.h" @@ -2519,6 +2520,7 @@ extern "C" void artJniMethodEntryHook(Thread* self) extern "C" Context* artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) { + ScopedQuickEntrypointChecks sqec(self); instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation(); if (instr->HasFastMethodEntryListenersOnly()) { instr->MethodEnterEvent(self, method); @@ -2544,11 +2546,12 @@ extern "C" Context* artMethodEntryHook(ArtMethod* method, Thread* self, ArtMetho } extern "C" Context* artMethodExitHook(Thread* self, - ArtMethod** sp, - uint64_t* gpr_result, - uint64_t* fpr_result, - uint32_t frame_size) + ArtMethod** sp, + uint64_t* gpr_result, + uint64_t* fpr_result, + uint32_t frame_size) REQUIRES_SHARED(Locks::mutator_lock_) { + ScopedQuickEntrypointChecks sqec(self); DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current())); // Instrumentation exit stub must not be entered with a pending exception. CHECK(!self->IsExceptionPending()) @@ -2597,7 +2600,10 @@ extern "C" Context* artMethodExitHook(Thread* self, if (self->IsExceptionPending() || self->ObserveAsyncException()) { // The exception was thrown from the method exit callback. We should not call method unwind // callbacks for this case. - return self->QuickDeliverException(/* is_method_exit_exception= */ true); + std::unique_ptr<Context> context = + self->QuickDeliverException(/* is_method_exit_exception= */ true); + DCHECK(context != nullptr); + return context.release(); } // We should deoptimize here if the caller requires a deoptimization or if the current method @@ -2612,7 +2618,11 @@ extern "C" Context* artMethodExitHook(Thread* self, ret_val, is_ref, self->GetException(), false, deopt_method_type); // Method exit callback has already been run for this method. So tell the deoptimizer to skip // callbacks for this frame. - return artDeoptimize(self, /*skip_method_exit_callbacks = */ true); + std::unique_ptr<Context> context = self->Deoptimize(DeoptimizationKind::kFullFrame, + /* single_frame= */ false, + /* skip_method_exit_callbacks= */ true); + DCHECK(context != nullptr); + return context.release(); } // No exception or deoptimization. diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 24f543ba84..ce8c35bfac 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -55,7 +55,6 @@ #include "thread_list.h" namespace art HIDDEN { -extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks); extern "C" void artDeliverPendingExceptionFromCode(Thread* self); namespace instrumentation { @@ -1724,11 +1723,11 @@ bool Instrumentation::PushDeoptContextIfNeeded(Thread* self, return true; } -Context* Instrumentation::DeoptimizeIfNeeded(Thread* self, - ArtMethod** sp, - DeoptimizationMethodType type, - JValue return_value, - bool is_reference) { +std::unique_ptr<Context> Instrumentation::DeoptimizeIfNeeded(Thread* self, + ArtMethod** sp, + DeoptimizationMethodType type, + JValue return_value, + bool is_reference) { if (self->IsAsyncExceptionPending() || ShouldDeoptimizeCaller(self, sp)) { self->PushDeoptimizationContext(return_value, is_reference, @@ -1737,7 +1736,9 @@ Context* Instrumentation::DeoptimizeIfNeeded(Thread* self, type); // This is requested from suspend points or when returning from runtime methods so exit // callbacks wouldn't be run yet. So don't skip method callbacks. - return artDeoptimize(self, /* skip_method_exit_callbacks= */ false); + return self->Deoptimize(DeoptimizationKind::kFullFrame, + /* single_frame= */ false, + /* skip_method_exit_callbacks= */ false); } // No exception or deoptimization. return nullptr; diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h index 3ef24d1340..59e6b29b90 100644 --- a/runtime/instrumentation.h +++ b/runtime/instrumentation.h @@ -26,7 +26,6 @@ #include <queue> #include <unordered_set> -#include "arch/context.h" #include "arch/instruction_set.h" #include "base/locks.h" #include "base/macros.h" @@ -44,6 +43,7 @@ class Throwable; } // namespace mirror class ArtField; class ArtMethod; +class Context; template <typename T> class Handle; template <typename T> class MutableHandle; struct NthCallerVisitor; @@ -537,11 +537,11 @@ class Instrumentation { // Deoptimize upon pending exception or if the caller requires it. Returns a long jump context if // a deoptimization is needed and taken. - Context* DeoptimizeIfNeeded(Thread* self, - ArtMethod** sp, - DeoptimizationMethodType type, - JValue result, - bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_); + std::unique_ptr<Context> DeoptimizeIfNeeded(Thread* self, + ArtMethod** sp, + DeoptimizationMethodType type, + JValue result, + bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_); // This returns if the caller of runtime method requires a deoptimization. This checks both if the // method requires a deopt or if this particular frame needs a deopt because of a class // redefinition. diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index 8924108f74..beb42a6a05 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -806,7 +806,7 @@ void QuickExceptionHandler::DeoptimizePartialFragmentFixup() { } } -Context* QuickExceptionHandler::PrepareLongJump(bool smash_caller_saves) { +std::unique_ptr<Context> QuickExceptionHandler::PrepareLongJump(bool smash_caller_saves) { // Prepare and return the context. context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_)); CHECK_NE(handler_quick_frame_pc_, 0u); @@ -826,7 +826,7 @@ Context* QuickExceptionHandler::PrepareLongJump(bool smash_caller_saves) { } // Clear the dex_pc list so as not to leak memory. handler_dex_pc_list_.reset(); - return context_.release(); + return std::move(context_); } void QuickExceptionHandler::DumpFramesWithType(Thread* self, bool details) { diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h index 733f9e037f..e5cce62b44 100644 --- a/runtime/quick_exception_handler.h +++ b/runtime/quick_exception_handler.h @@ -19,6 +19,7 @@ #include <android-base/logging.h> #include <cstdint> +#include <memory> #include <optional> #include "base/array_ref.h" @@ -77,7 +78,8 @@ class QuickExceptionHandler { REQUIRES_SHARED(Locks::mutator_lock_); // Prepares a long jump context for a jump to either to a catch handler or to the upcall. - Context* PrepareLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_); + std::unique_ptr<Context> PrepareLongJump(bool smash_caller_saves = true) + REQUIRES_SHARED(Locks::mutator_lock_); void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) { handler_quick_frame_ = handler_quick_frame; diff --git a/runtime/thread.cc b/runtime/thread.cc index 0ce018ab76..df75f34e59 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -133,8 +133,6 @@ namespace art HIDDEN { using android::base::StringAppendV; using android::base::StringPrintf; -extern "C" Context* artDeoptimize(Thread* self, bool skip_method_exit_callbacks); - bool Thread::is_started_ = false; pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; @@ -3934,7 +3932,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { os << offset; } -Context* Thread::QuickDeliverException(bool skip_method_exit_callbacks) { +std::unique_ptr<Context> Thread::QuickDeliverException(bool skip_method_exit_callbacks) { // Get exception from thread. ObjPtr<mirror::Throwable> exception = GetException(); CHECK(exception != nullptr); @@ -3942,7 +3940,9 @@ Context* Thread::QuickDeliverException(bool skip_method_exit_callbacks) { // This wasn't a real exception, so just clear it here. If there was an actual exception it // will be recorded in the DeoptimizationContext and it will be restored later. ClearException(); - return artDeoptimize(this, skip_method_exit_callbacks); + return Deoptimize(DeoptimizationKind::kFullFrame, + /*single_frame=*/ false, + skip_method_exit_callbacks); } ReadBarrier::MaybeAssertToSpaceInvariant(exception.Ptr()); @@ -3984,7 +3984,9 @@ Context* Thread::QuickDeliverException(bool skip_method_exit_callbacks) { exception, /* from_code= */ false, method_type); - return artDeoptimize(this, skip_method_exit_callbacks); + return Deoptimize(DeoptimizationKind::kFullFrame, + /*single_frame=*/ false, + skip_method_exit_callbacks); } else { LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " << visitor.caller->PrettyMethod(); @@ -4012,6 +4014,38 @@ Context* Thread::QuickDeliverException(bool skip_method_exit_callbacks) { return exception_handler.PrepareLongJump(); } +std::unique_ptr<Context> Thread::Deoptimize(DeoptimizationKind kind, + bool single_frame, + bool skip_method_exit_callbacks) { + Runtime::Current()->IncrementDeoptimizationCount(kind); + if (VLOG_IS_ON(deopt)) { + if (single_frame) { + // Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the + // specialized visitor that will show whether a method is Quick or Shadow. + } else { + LOG(INFO) << "Deopting:"; + Dump(LOG_STREAM(INFO)); + } + } + + AssertHasDeoptimizationContext(); + QuickExceptionHandler exception_handler(this, true); + if (single_frame) { + exception_handler.DeoptimizeSingleFrame(kind); + } else { + exception_handler.DeoptimizeStack(skip_method_exit_callbacks); + } + if (exception_handler.IsFullFragmentDone()) { + return exception_handler.PrepareLongJump(/*smash_caller_saves=*/ true); + } else { + exception_handler.DeoptimizePartialFragmentFixup(); + // We cannot smash the caller-saves, as we need the ArtMethod in a parameter register that would + // be caller-saved. This has the downside that we cannot track incorrect register usage down the + // line. + return exception_handler.PrepareLongJump(/*smash_caller_saves=*/ false); + } +} + ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc_out, bool check_suspended, bool abort_on_error) const { diff --git a/runtime/thread.h b/runtime/thread.h index 1969aa1fde..92d1f9e96b 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -688,7 +688,13 @@ class EXPORT Thread { // handler. When is_method_exit_exception is true, the exception was thrown by the method exit // callback and we should not send method unwind for the method on top of the stack since method // exit callback was already called. - Context* QuickDeliverException(bool is_method_exit_exception = false) + std::unique_ptr<Context> QuickDeliverException(bool is_method_exit_exception = false) + REQUIRES_SHARED(Locks::mutator_lock_); + + // Perform deoptimization. Return a `Context` prepared for a long jump. + std::unique_ptr<Context> Deoptimize(DeoptimizationKind kind, + bool single_frame, + bool skip_method_exit_callbacks) REQUIRES_SHARED(Locks::mutator_lock_); // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will diff --git a/tools/check_cfi.py b/tools/check_cfi.py index ac6f810f2b..7d23821932 100755 --- a/tools/check_cfi.py +++ b/tools/check_cfi.py @@ -33,8 +33,8 @@ ARCHES = ["i386", "x86_64", "arm", "aarch64", "riscv64"] IGNORE : Dict[str, List[str]] = { # Aligns stack. "art_quick_osr_stub": ["i386"], - # Intermediate invalid CFI while loading all registers. - "art_quick_do_long_jump": ["x86_64"], + # Unverifiable intermediate CFI after loading the stack pointer from context. + "art_quick_do_long_jump": ["arm", "aarch64", "i386", "x86_64", "riscv64"], # Saves/restores SP in other register. "art_quick_generic_jni_trampoline": ["arm", "i386", "x86_64"], # Starts with non-zero offset at the start of the method. |