diff options
32 files changed, 366 insertions, 194 deletions
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h index db82a9bf20..3af36f6791 100644 --- a/libdexfile/dex/standard_dex_file.h +++ b/libdexfile/dex/standard_dex_file.h @@ -35,6 +35,22 @@ class StandardDexFile : public DexFile { struct CodeItem : public dex::CodeItem { static constexpr size_t kAlignment = 4; + static constexpr size_t InsSizeOffset() { + return OFFSETOF_MEMBER(CodeItem, ins_size_); + } + + static constexpr size_t OutsSizeOffset() { + return OFFSETOF_MEMBER(CodeItem, outs_size_); + } + + static constexpr size_t RegistersSizeOffset() { + return OFFSETOF_MEMBER(CodeItem, registers_size_); + } + + static constexpr size_t InsnsOffset() { + return OFFSETOF_MEMBER(CodeItem, insns_); + } + private: CodeItem() = default; diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc index e186cd3992..4e7d64ccfa 100644 --- a/runtime/arch/arm/fault_handler_arm.cc +++ b/runtime/arch/arm/fault_handler_arm.cc @@ -45,9 +45,12 @@ static uint32_t GetInstructionSize(uint8_t* pc) { return instr_size; } -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context, +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, + void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) { + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); *out_sp = static_cast<uintptr_t>(sc->arm_sp); @@ -63,9 +66,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm)); if (overflow_addr == fault_addr) { *out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0); + *out_is_stack_overflow = true; } else { // The method is at the top of the stack. *out_method = reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]); + *out_is_stack_overflow = false; } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc index 751c05b7e9..c139e21d05 100644 --- a/runtime/arch/arm64/fault_handler_arm64.cc +++ b/runtime/arch/arm64/fault_handler_arm64.cc @@ -38,9 +38,12 @@ extern "C" void art_quick_implicit_suspend(); namespace art { -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context, +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, + void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) { + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) { struct ucontext *uc = reinterpret_cast<struct ucontext *>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); *out_sp = static_cast<uintptr_t>(sc->sp); @@ -56,9 +59,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm64)); if (overflow_addr == fault_addr) { *out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]); + *out_is_stack_overflow = true; } else { // The method is at the top of the stack. *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + *out_is_stack_overflow = false; } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc index 0354f0c7a6..f55df92517 100644 --- a/runtime/arch/mips/fault_handler_mips.cc +++ b/runtime/arch/mips/fault_handler_mips.cc @@ -37,9 +37,12 @@ extern "C" void art_quick_throw_null_pointer_exception_from_signal(); namespace art { -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, + void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) { + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips::SP]); @@ -55,9 +58,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips)); if (overflow_addr == fault_addr) { *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]); + *is_stack_overflow = true; } else { // The method is at the top of the stack. *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + *is_stack_overflow = false; } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc index 6255235a21..ff53fa6965 100644 --- a/runtime/arch/mips64/fault_handler_mips64.cc +++ b/runtime/arch/mips64/fault_handler_mips64.cc @@ -38,9 +38,12 @@ extern "C" void art_quick_throw_null_pointer_exception_from_signal(); namespace art { -void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, +void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, + void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) { + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext); *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips64::SP]); @@ -56,9 +59,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64)); if (overflow_addr == fault_addr) { *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]); + *out_is_stack_overflow = true; } else { // The method is at the top of the stack. *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + *out_is_stack_overflow = false; } // Work out the return PC. This will be the address of the instruction diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc index 26312fb0ca..3a08ec5cd1 100644 --- a/runtime/arch/x86/fault_handler_x86.cc +++ b/runtime/arch/x86/fault_handler_x86.cc @@ -279,7 +279,9 @@ static uint32_t GetInstructionSize(const uint8_t* pc) { void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) { + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) { struct ucontext* uc = reinterpret_cast<struct ucontext*>(context); *out_sp = static_cast<uintptr_t>(uc->CTX_ESP); VLOG(signals) << "sp: " << std::hex << *out_sp; @@ -298,9 +300,11 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, #endif if (overflow_addr == fault_addr) { *out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD); + *out_is_stack_overflow = true; } else { // The method is at the top of the stack. *out_method = *reinterpret_cast<ArtMethod**>(*out_sp); + *out_is_stack_overflow = false; } uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP); diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S index b6a64b64ba..596e468830 100644 --- a/runtime/arch/x86_64/asm_support_x86_64.S +++ b/runtime/arch/x86_64/asm_support_x86_64.S @@ -189,4 +189,152 @@ MACRO1(UNPOISON_HEAP_REF, rRef) #endif // USE_HEAP_POISONING END_MACRO + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly) + */ +MACRO0(SETUP_SAVE_REFS_ONLY_FRAME) +#if defined(__APPLE__) + int3 + int3 +#else + // R10 := Runtime::Current() + movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 + movq (%r10), %r10 + // Save callee and GPR args, mixed together to agree with core spills bitmap. + PUSH r15 // Callee save. + PUSH r14 // Callee save. + PUSH r13 // Callee save. + PUSH r12 // Callee save. + PUSH rbp // Callee save. + PUSH rbx // Callee save. + // Create space for FPR args, plus space for ArtMethod*. + subq LITERAL(8 + 4 * 8), %rsp + CFI_ADJUST_CFA_OFFSET(8 + 4 * 8) + // Save FPRs. + movq %xmm12, 8(%rsp) + movq %xmm13, 16(%rsp) + movq %xmm14, 24(%rsp) + movq %xmm15, 32(%rsp) + // R10 := ArtMethod* for refs only callee save frame method. + movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10 + // Store ArtMethod* to bottom of stack. + movq %r10, 0(%rsp) + // Store rsp as the stop quick frame. + movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET + + // Ugly compile-time check, but we only have the preprocessor. + // Last +8: implicit return address pushed on stack when caller made call. +#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8) +#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected." +#endif +#endif // __APPLE__ +END_MACRO + +MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME) + movq 8(%rsp), %xmm12 + movq 16(%rsp), %xmm13 + movq 24(%rsp), %xmm14 + movq 32(%rsp), %xmm15 + addq LITERAL(8 + 4*8), %rsp + CFI_ADJUST_CFA_OFFSET(-8 - 4*8) + // TODO: optimize by not restoring callee-saves restored by the ABI + POP rbx + POP rbp + POP r12 + POP r13 + POP r14 + POP r15 +END_MACRO + + /* + * Macro that sets up the callee save frame to conform with + * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) + */ +MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME) +#if defined(__APPLE__) + int3 + int3 +#else + // R10 := Runtime::Current() + movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 + movq (%r10), %r10 + // Save callee save registers to agree with core spills bitmap. + PUSH r15 // Callee save. + PUSH r14 // Callee save. + PUSH r13 // Callee save. + PUSH r12 // Callee save. + PUSH rbp // Callee save. + PUSH rbx // Callee save. + // Create space for FPR args, plus space for ArtMethod*. + subq MACRO_LITERAL(4 * 8 + 8), %rsp + CFI_ADJUST_CFA_OFFSET(4 * 8 + 8) + // Save FPRs. + movq %xmm12, 8(%rsp) + movq %xmm13, 16(%rsp) + movq %xmm14, 24(%rsp) + movq %xmm15, 32(%rsp) + // R10 := ArtMethod* for save all callee save frame method. + movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10 + // Store ArtMethod* to bottom of stack. + movq %r10, 0(%rsp) + // Store rsp as the top quick frame. + movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET + + // Ugly compile-time check, but we only have the preprocessor. + // Last +8: implicit return address pushed on stack when caller made call. +#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8) +#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected." +#endif +#endif // __APPLE__ +END_MACRO + +MACRO0(SETUP_FP_CALLEE_SAVE_FRAME) + // Create space for ART FP callee-saved registers + subq MACRO_LITERAL(4 * 8), %rsp + CFI_ADJUST_CFA_OFFSET(4 * 8) + movq %xmm12, 0(%rsp) + movq %xmm13, 8(%rsp) + movq %xmm14, 16(%rsp) + movq %xmm15, 24(%rsp) +END_MACRO + +MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME) + // Restore ART FP callee-saved registers + movq 0(%rsp), %xmm12 + movq 8(%rsp), %xmm13 + movq 16(%rsp), %xmm14 + movq 24(%rsp), %xmm15 + addq MACRO_LITERAL(4 * 8), %rsp + CFI_ADJUST_CFA_OFFSET(- 4 * 8) +END_MACRO + + /* + * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending + * exception is Thread::Current()->exception_ when the runtime method frame is ready. + */ +MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY) + // (Thread*) setup + movq %gs:THREAD_SELF_OFFSET, %rdi + call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*) + UNREACHABLE +END_MACRO + /* + * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending + * exception is Thread::Current()->exception_. + */ +MACRO0(DELIVER_PENDING_EXCEPTION) + SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw + DELIVER_PENDING_EXCEPTION_FRAME_READY +END_MACRO + +MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) + movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field + testq %rcx, %rcx // rcx == 0 ? + jnz 1f // if rcx != 0 goto 1 + ret // return +1: // deliver exception on current thread + DELIVER_PENDING_EXCEPTION +END_MACRO + #endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_ diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 3b30c37309..f9b6d2e65f 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -26,127 +26,8 @@ MACRO0(ASSERT_USE_READ_BARRIER) #endif END_MACRO -MACRO0(SETUP_FP_CALLEE_SAVE_FRAME) - // Create space for ART FP callee-saved registers - subq MACRO_LITERAL(4 * 8), %rsp - CFI_ADJUST_CFA_OFFSET(4 * 8) - movq %xmm12, 0(%rsp) - movq %xmm13, 8(%rsp) - movq %xmm14, 16(%rsp) - movq %xmm15, 24(%rsp) -END_MACRO - -MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME) - // Restore ART FP callee-saved registers - movq 0(%rsp), %xmm12 - movq 8(%rsp), %xmm13 - movq 16(%rsp), %xmm14 - movq 24(%rsp), %xmm15 - addq MACRO_LITERAL(4 * 8), %rsp - CFI_ADJUST_CFA_OFFSET(- 4 * 8) -END_MACRO - // For x86, the CFA is esp+4, the address above the pushed return address on the stack. - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) - */ -MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME) -#if defined(__APPLE__) - int3 - int3 -#else - // R10 := Runtime::Current() - movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 - movq (%r10), %r10 - // Save callee save registers to agree with core spills bitmap. - PUSH r15 // Callee save. - PUSH r14 // Callee save. - PUSH r13 // Callee save. - PUSH r12 // Callee save. - PUSH rbp // Callee save. - PUSH rbx // Callee save. - // Create space for FPR args, plus space for ArtMethod*. - subq MACRO_LITERAL(4 * 8 + 8), %rsp - CFI_ADJUST_CFA_OFFSET(4 * 8 + 8) - // Save FPRs. - movq %xmm12, 8(%rsp) - movq %xmm13, 16(%rsp) - movq %xmm14, 24(%rsp) - movq %xmm15, 32(%rsp) - // R10 := ArtMethod* for save all callee save frame method. - movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10 - // Store ArtMethod* to bottom of stack. - movq %r10, 0(%rsp) - // Store rsp as the top quick frame. - movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET - - // Ugly compile-time check, but we only have the preprocessor. - // Last +8: implicit return address pushed on stack when caller made call. -#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8) -#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected." -#endif -#endif // __APPLE__ -END_MACRO - - /* - * Macro that sets up the callee save frame to conform with - * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly) - */ -MACRO0(SETUP_SAVE_REFS_ONLY_FRAME) -#if defined(__APPLE__) - int3 - int3 -#else - // R10 := Runtime::Current() - movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10 - movq (%r10), %r10 - // Save callee and GPR args, mixed together to agree with core spills bitmap. - PUSH r15 // Callee save. - PUSH r14 // Callee save. - PUSH r13 // Callee save. - PUSH r12 // Callee save. - PUSH rbp // Callee save. - PUSH rbx // Callee save. - // Create space for FPR args, plus space for ArtMethod*. - subq LITERAL(8 + 4 * 8), %rsp - CFI_ADJUST_CFA_OFFSET(8 + 4 * 8) - // Save FPRs. - movq %xmm12, 8(%rsp) - movq %xmm13, 16(%rsp) - movq %xmm14, 24(%rsp) - movq %xmm15, 32(%rsp) - // R10 := ArtMethod* for refs only callee save frame method. - movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10 - // Store ArtMethod* to bottom of stack. - movq %r10, 0(%rsp) - // Store rsp as the stop quick frame. - movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET - - // Ugly compile-time check, but we only have the preprocessor. - // Last +8: implicit return address pushed on stack when caller made call. -#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8) -#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected." -#endif -#endif // __APPLE__ -END_MACRO - -MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME) - movq 8(%rsp), %xmm12 - movq 16(%rsp), %xmm13 - movq 24(%rsp), %xmm14 - movq 32(%rsp), %xmm15 - addq LITERAL(8 + 4*8), %rsp - CFI_ADJUST_CFA_OFFSET(-8 - 4*8) - // TODO: optimize by not restoring callee-saves restored by the ABI - POP rbx - POP rbp - POP r12 - POP r13 - POP r14 - POP r15 -END_MACRO /* * Macro that sets up the callee save frame to conform with @@ -408,26 +289,6 @@ MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX) RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX END_MACRO - /* - * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending - * exception is Thread::Current()->exception_ when the runtime method frame is ready. - */ -MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY) - // (Thread*) setup - movq %gs:THREAD_SELF_OFFSET, %rdi - call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*) - UNREACHABLE -END_MACRO - - /* - * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending - * exception is Thread::Current()->exception_. - */ -MACRO0(DELIVER_PENDING_EXCEPTION) - SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw - DELIVER_PENDING_EXCEPTION_FRAME_READY -END_MACRO - MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name) DEFINE_FUNCTION VAR(c_name) SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context @@ -993,15 +854,6 @@ MACRO0(RETURN_IF_EAX_ZERO) DELIVER_PENDING_EXCEPTION END_MACRO -MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION) - movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field - testq %rcx, %rcx // rcx == 0 ? - jnz 1f // if rcx != 0 goto 1 - ret // return -1: // deliver exception on current thread - DELIVER_PENDING_EXCEPTION -END_MACRO - // Generate the allocation entrypoints for each allocator. GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS diff --git a/runtime/art_field.h b/runtime/art_field.h index bc2c399b74..c149003d90 100644 --- a/runtime/art_field.h +++ b/runtime/art_field.h @@ -96,10 +96,14 @@ class ArtField final { return MemberOffset(offset_); } - static MemberOffset OffsetOffset() { + static constexpr MemberOffset OffsetOffset() { return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_)); } + static constexpr MemberOffset DeclaringClassOffset() { + return MemberOffset(OFFSETOF_MEMBER(ArtField, declaring_class_)); + } + MemberOffset GetOffsetDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_); void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/art_method.h b/runtime/art_method.h index 7b435d5893..ff61065cb0 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -411,6 +411,10 @@ class ArtMethod final { return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_)); } + static constexpr MemberOffset ImtIndexOffset() { + return MemberOffset(OFFSETOF_MEMBER(ArtMethod, imt_index_)); + } + uint32_t GetCodeItemOffset() const { return dex_code_item_offset_; } diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h index 1cf7f8daeb..6240a7ba04 100644 --- a/runtime/entrypoints/quick/quick_entrypoints_enum.h +++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h @@ -36,7 +36,7 @@ std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind); // Translate a QuickEntrypointEnum value to the corresponding ThreadOffset. template <PointerSize pointer_size> -static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) { +static constexpr ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) { switch (trampoline) { // NOLINT(whitespace/braces) #define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \ diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc index d22f180c7a..5b7fe0ccf4 100644 --- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc @@ -24,13 +24,11 @@ namespace art { /* * Handle fill array data by copying appropriate part of dex file into array. */ -extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array, - ArtMethod* method, Thread* self) +extern "C" int artHandleFillArrayDataFromCode(const Instruction::ArrayDataPayload* payload, + mirror::Array* array, + Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { ScopedQuickEntrypointChecks sqec(self); - const uint16_t* const insns = method->DexInstructions().Insns(); - const Instruction::ArrayDataPayload* payload = - reinterpret_cast<const Instruction::ArrayDataPayload*>(insns + payload_offset); bool success = FillArrayData(array, payload); return success ? 0 : -1; } diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 3c65500909..5c9d226a99 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -354,7 +354,7 @@ class QuickArgumentVisitor { return stack_map.GetDexPc(); } } else { - return current_code->ToDexPc(*caller_sp, outer_pc); + return current_code->ToDexPc(caller_sp, outer_pc); } } diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc index 5c2830d96b..cae7debc03 100644 --- a/runtime/fault_handler.cc +++ b/runtime/fault_handler.cc @@ -291,10 +291,11 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che ArtMethod* method_obj = nullptr; uintptr_t return_pc = 0; uintptr_t sp = 0; + bool is_stack_overflow = false; // Get the architecture specific method address and return address. These // are in architecture specific files in arch/<arch>/fault_handler_<arch>. - GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp); + GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp, &is_stack_overflow); // If we don't have a potential method, we're outta here. VLOG(signals) << "potential method: " << method_obj; @@ -336,7 +337,15 @@ bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool che reinterpret_cast<uintptr_t>(method_header->GetEntryPoint()); VLOG(signals) << "pc offset: " << std::hex << sought_offset; } - uint32_t dexpc = method_header->ToDexPc(method_obj, return_pc, false); + uint32_t dexpc = dex::kDexNoIndex; + if (is_stack_overflow) { + // If it's an implicit stack overflow check, the frame is not setup, so we + // just infer the dex PC as zero. + dexpc = 0; + } else { + CHECK_EQ(*reinterpret_cast<ArtMethod**>(sp), method_obj); + dexpc = method_header->ToDexPc(reinterpret_cast<ArtMethod**>(sp), return_pc, false); + } VLOG(signals) << "dexpc: " << dexpc; return !check_dex_pc || dexpc != dex::kDexNoIndex; } @@ -380,9 +389,11 @@ bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, ArtMethod* method = nullptr; uintptr_t return_pc = 0; uintptr_t sp = 0; + bool is_stack_overflow = false; Thread* self = Thread::Current(); - manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp); + manager_->GetMethodAndReturnPcAndSp( + siginfo, context, &method, &return_pc, &sp, &is_stack_overflow); // Inside of generated code, sp[0] is the method, so sp is the frame. self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp)); self->DumpJavaStack(LOG_STREAM(ERROR)); diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h index f6cf2d77d7..8b89c22a0f 100644 --- a/runtime/fault_handler.h +++ b/runtime/fault_handler.h @@ -55,8 +55,12 @@ class FaultManager { // The IsInGeneratedCode() function checks that the mutator lock is held before it // calls GetMethodAndReturnPCAndSP(). // TODO: think about adding lock assertions and fake lock and unlock functions. - void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method, - uintptr_t* out_return_pc, uintptr_t* out_sp) + void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, + void* context, + ArtMethod** out_method, + uintptr_t* out_return_pc, + uintptr_t* out_sp, + bool* out_is_stack_overflow) NO_THREAD_SAFETY_ANALYSIS; bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS; diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index ca4895557d..011d947ea2 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -365,9 +365,9 @@ void InstrumentationInstallStack(Thread* thread, void* arg) std::string thread_name; GetThread()->GetThreadName(thread_name); uint32_t dex_pc = dex::kDexNoIndex; - if (last_return_pc_ != 0 && - GetCurrentOatQuickMethodHeader() != nullptr) { - dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_); + if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) { + dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc( + GetCurrentQuickFrame(), last_return_pc_); } LOG(FATAL) << "While walking " << thread_name << " found unexpected non-runtime method" << " without instrumentation exit return or interpreter frame." @@ -400,9 +400,8 @@ void InstrumentationInstallStack(Thread* thread, void* arg) SetReturnPc(instrumentation_exit_pc_); } uint32_t dex_pc = dex::kDexNoIndex; - if (last_return_pc_ != 0 && - GetCurrentOatQuickMethodHeader() != nullptr) { - dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_); + if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) { + dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(GetCurrentQuickFrame(), last_return_pc_); } dex_pcs_.push_back(dex_pc); last_return_pc_ = return_pc; diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h index 003ea6c8d3..0ada562438 100644 --- a/runtime/interpreter/interpreter_cache.h +++ b/runtime/interpreter/interpreter_cache.h @@ -45,10 +45,10 @@ class Thread; // Aligned to 16-bytes to make it easier to get the address of the cache // from assembly (it ensures that the offset is valid immediate value). class ALIGNED(16) InterpreterCache { + public: // Aligned since we load the whole entry in single assembly instruction. typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t)); - public: // 2x size increase/decrease corresponds to ~0.5% interpreter performance change. // Value of 256 has around 75% cache hit rate. static constexpr size_t kSize = 256; @@ -77,6 +77,10 @@ class ALIGNED(16) InterpreterCache { data_[IndexOf(key)] = Entry{key, value}; } + std::array<Entry, kSize>& GetArray() { + return data_; + } + private: bool IsCalledFromOwningThread(); diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index db5cbce647..fc0cf245ae 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -991,10 +991,6 @@ inline IterationRange<StrideIterator<ArtField>> Class::GetSFieldsUnchecked() { return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked()); } -inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) { - return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size)); -} - inline void Class::CheckPointerSize(PointerSize pointer_size) { DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); } diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index a8b8235ee3..ecbae712fd 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -794,6 +794,11 @@ class MANAGED Class final : public Object { static_cast<size_t>(pointer_size))); } + static constexpr MemberOffset EmbeddedVTableOffset(PointerSize pointer_size) { + return MemberOffset( + ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size)); + } + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags> bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_); @@ -1357,7 +1362,6 @@ class MANAGED Class final : public Object { // Check that the pointer size matches the one in the class linker. ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size); - static MemberOffset EmbeddedVTableOffset(PointerSize pointer_size); template <bool kVisitNativeRoots, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, ReadBarrierOption kReadBarrierOption = kWithReadBarrier, diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc index 7f473984cc..7a6ebf8075 100644 --- a/runtime/oat_quick_method_header.cc +++ b/runtime/oat_quick_method_header.cc @@ -24,9 +24,10 @@ namespace art { -uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method, +uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod** frame, const uintptr_t pc, bool abort_on_failure) const { + ArtMethod* method = *frame; const void* entry_point = GetEntryPoint(); uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point); if (method->IsNative()) { diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index 9d0883b556..0d08149693 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -83,7 +83,7 @@ class PACKED(4) OatQuickMethodHeader { // ART compiled method are prefixed with header, but we can also easily // accidentally use a function pointer to one of the stubs/trampolines. // We prefix those with 0xFF in the aseembly so that we can do DCHECKs. - CHECK_NE(code_size_, 0xFFFFFFFF) << code_; + CHECK_NE(code_size_, 0xFFFFFFFF) << code_size_; return code_size_ & kCodeSizeMask; } @@ -148,7 +148,9 @@ class PACKED(4) OatQuickMethodHeader { bool is_for_catch_handler, bool abort_on_failure = true) const; - uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const; + uint32_t ToDexPc(ArtMethod** frame, + const uintptr_t pc, + bool abort_on_failure = true) const; void SetHasShouldDeoptimizeFlag() { DCHECK_EQ(code_size_ & kShouldDeoptimizeMask, 0u); diff --git a/runtime/stack.cc b/runtime/stack.cc index 2a07051e36..f3557a383b 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -128,7 +128,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { return stack_map->GetDexPc(); } else { return cur_oat_quick_method_header_->ToDexPc( - GetMethod(), cur_quick_frame_pc_, abort_on_failure); + GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure); } } else { return 0; diff --git a/runtime/thread.cc b/runtime/thread.cc index 6f0776b6b5..c78d4ec46f 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -4037,6 +4037,20 @@ void Thread::VisitRoots(RootVisitor* visitor) { for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); } + for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) { + const Instruction* inst = reinterpret_cast<const Instruction*>(entry.first); + if (inst != nullptr && + (inst->Opcode() == Instruction::NEW_INSTANCE || + inst->Opcode() == Instruction::CHECK_CAST || + inst->Opcode() == Instruction::INSTANCE_OF || + inst->Opcode() == Instruction::NEW_ARRAY || + inst->Opcode() == Instruction::CONST_CLASS || + inst->Opcode() == Instruction::CONST_STRING || + inst->Opcode() == Instruction::CONST_STRING_JUMBO)) { + visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&entry.second), + RootInfo(kRootThreadObject, thread_id)); + } + } } void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { diff --git a/runtime/thread.h b/runtime/thread.h index 29375e51f4..d6faa95824 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -718,7 +718,14 @@ class Thread { } public: - static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset, + template<PointerSize pointer_size> + static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset( + size_t quick_entrypoint_offset) { + return ThreadOffsetFromTlsPtr<pointer_size>( + OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset); + } + + static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset, PointerSize pointer_size) { if (pointer_size == PointerSize::k32) { return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset). @@ -730,12 +737,6 @@ class Thread { } template<PointerSize pointer_size> - static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) { - return ThreadOffsetFromTlsPtr<pointer_size>( - OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset); - } - - template<PointerSize pointer_size> static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) { return ThreadOffsetFromTlsPtr<pointer_size>( OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset); @@ -743,7 +744,7 @@ class Thread { // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`. template <PointerSize pointer_size> - static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) { + static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) { // The entry point list defines 30 ReadBarrierMarkRegX entry points. DCHECK_LT(reg, 30u); // The ReadBarrierMarkRegX entry points are ordered by increasing diff --git a/tools/cpp-define-generator/art_field.def b/tools/cpp-define-generator/art_field.def new file mode 100644 index 0000000000..a15076f9c0 --- /dev/null +++ b/tools/cpp-define-generator/art_field.def @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if ASM_DEFINE_INCLUDE_DEPENDENCIES +#include "art_field.h" +#endif + +ASM_DEFINE(ART_FIELD_OFFSET_OFFSET, + art::ArtField::OffsetOffset().Int32Value()) +ASM_DEFINE(ART_FIELD_DECLARING_CLASS_OFFSET, + art::ArtField::DeclaringClassOffset().Int32Value()) diff --git a/tools/cpp-define-generator/art_method.def b/tools/cpp-define-generator/art_method.def index 21859dc9ce..75fbab0c28 100644 --- a/tools/cpp-define-generator/art_method.def +++ b/tools/cpp-define-generator/art_method.def @@ -20,6 +20,8 @@ ASM_DEFINE(ART_METHOD_ACCESS_FLAGS_OFFSET, art::ArtMethod::AccessFlagsOffset().Int32Value()) +ASM_DEFINE(ART_METHOD_IS_STATIC_FLAG, + art::kAccStatic) ASM_DEFINE(ART_METHOD_DECLARING_CLASS_OFFSET, art::ArtMethod::DeclaringClassOffset().Int32Value()) ASM_DEFINE(ART_METHOD_JNI_OFFSET_32, @@ -30,3 +32,9 @@ ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_32, art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value()) ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_64, art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value()) +ASM_DEFINE(ART_METHOD_METHOD_INDEX_OFFSET, + art::ArtMethod::MethodIndexOffset().Int32Value()) +ASM_DEFINE(ART_METHOD_IMT_INDEX_OFFSET, + art::ArtMethod::ImtIndexOffset().Int32Value()) +ASM_DEFINE(ART_METHOD_HOTNESS_COUNT_OFFSET, + art::ArtMethod::HotnessCountOffset().Int32Value()) diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def index 9aad8a42ca..9747844ef8 100644 --- a/tools/cpp-define-generator/asm_defines.def +++ b/tools/cpp-define-generator/asm_defines.def @@ -19,7 +19,9 @@ #endif #include "globals.def" +#include "art_field.def" #include "art_method.def" +#include "code_item.def" #include "lockword.def" #include "mirror_array.def" #include "mirror_class.def" diff --git a/tools/cpp-define-generator/code_item.def b/tools/cpp-define-generator/code_item.def new file mode 100644 index 0000000000..01b0e85adf --- /dev/null +++ b/tools/cpp-define-generator/code_item.def @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2019 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if ASM_DEFINE_INCLUDE_DEPENDENCIES +#include "dex/standard_dex_file.h" +#endif + +ASM_DEFINE(CODE_ITEM_REGISTERS_SIZE_OFFSET, + art::StandardDexFile::CodeItem::RegistersSizeOffset()) +ASM_DEFINE(CODE_ITEM_INS_SIZE_OFFSET, + art::StandardDexFile::CodeItem::InsSizeOffset()) +ASM_DEFINE(CODE_ITEM_OUTS_SIZE_OFFSET, + art::StandardDexFile::CodeItem::OutsSizeOffset()) +ASM_DEFINE(CODE_ITEM_INSNS_OFFSET, + art::StandardDexFile::CodeItem::InsnsOffset()) diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def index 09d33ce756..ca0c8ba363 100644 --- a/tools/cpp-define-generator/globals.def +++ b/tools/cpp-define-generator/globals.def @@ -72,3 +72,5 @@ ASM_DEFINE(STACK_REFERENCE_SIZE, sizeof(art::StackReference<art::mirror::Object>)) ASM_DEFINE(STD_MEMORY_ORDER_RELAXED, std::memory_order_relaxed) +ASM_DEFINE(STACK_OVERFLOW_RESERVED_BYTES, + GetStackOverflowReservedBytes(art::kRuntimeISA)) diff --git a/tools/cpp-define-generator/mirror_class.def b/tools/cpp-define-generator/mirror_class.def index c15ae92ece..6df6c41690 100644 --- a/tools/cpp-define-generator/mirror_class.def +++ b/tools/cpp-define-generator/mirror_class.def @@ -36,3 +36,11 @@ ASM_DEFINE(MIRROR_CLASS_STATUS_OFFSET, art::mirror::Class::StatusOffset().Int32Value()) ASM_DEFINE(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT, art::mirror::Class::kPrimitiveTypeSizeShiftShift) +ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_32, + art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k32).Int32Value()) +ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_64, + art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k64).Int32Value()) +ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_32, + art::mirror::Class::ImtPtrOffset(art::PointerSize::k32).Int32Value()) +ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_64, + art::mirror::Class::ImtPtrOffset(art::PointerSize::k64).Int32Value()) diff --git a/tools/cpp-define-generator/mirror_object.def b/tools/cpp-define-generator/mirror_object.def index facb037c97..7d7028b466 100644 --- a/tools/cpp-define-generator/mirror_object.def +++ b/tools/cpp-define-generator/mirror_object.def @@ -24,3 +24,10 @@ ASM_DEFINE(MIRROR_OBJECT_HEADER_SIZE, sizeof(art::mirror::Object)) ASM_DEFINE(MIRROR_OBJECT_LOCK_WORD_OFFSET, art::mirror::Object::MonitorOffset().Int32Value()) +ASM_DEFINE(GRAY_BYTE_OFFSET, + art::mirror::Object::MonitorOffset().Int32Value() + + art::LockWord::kReadBarrierStateShift / art::kBitsPerByte) +ASM_DEFINE(GRAY_BIT_POSITION, + art::LockWord::kReadBarrierStateShift % art::kBitsPerByte) +ASM_DEFINE(READ_BARRIER_TEST_VALUE, + static_cast<int8_t>(1 << (art::LockWord::kReadBarrierStateShift % art::kBitsPerByte))) diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def index 8c91dc8a87..72cd2a98f1 100644 --- a/tools/cpp-define-generator/thread.def +++ b/tools/cpp-define-generator/thread.def @@ -15,6 +15,7 @@ */ #if ASM_DEFINE_INCLUDE_DEPENDENCIES +#include "entrypoints/quick/quick_entrypoints_enum.h" #include "thread.h" #endif @@ -36,6 +37,8 @@ ASM_DEFINE(THREAD_INTERPRETER_CACHE_OFFSET, art::Thread::InterpreterCacheOffset<art::kRuntimePointerSize>().Int32Value()) ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_LOG2, art::Thread::InterpreterCacheSizeLog2()) +ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_MASK, + (sizeof(art::InterpreterCache::Entry) * (art::InterpreterCache::kSize - 1))) ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET, art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value()) ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET, @@ -60,3 +63,11 @@ ASM_DEFINE(THREAD_USE_MTERP_OFFSET, art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value()) ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET, art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value()) +ASM_DEFINE(THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET, + art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocObjectInitialized) + .Int32Value()) +ASM_DEFINE(THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET, + art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocArrayResolved) + .Int32Value()) +ASM_DEFINE(THREAD_READ_BARRIER_MARK_REG00_OFFSET, + art::Thread::ReadBarrierMarkEntryPointsOffset<art::kRuntimePointerSize>(0)) |