summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arch_test.cc50
-rw-r--r--runtime/arch/arm/callee_save_frame_arm.h (renamed from runtime/arch/arm/quick_method_frame_info_arm.h)91
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S267
-rw-r--r--runtime/arch/arm64/callee_save_frame_arm64.h (renamed from runtime/arch/arm64/quick_method_frame_info_arm64.h)91
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S113
-rw-r--r--runtime/arch/mips/callee_save_frame_mips.h (renamed from runtime/arch/mips/quick_method_frame_info_mips.h)78
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc2
-rw-r--r--runtime/arch/mips64/callee_save_frame_mips64.h (renamed from runtime/arch/mips64/quick_method_frame_info_mips64.h)78
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc2
-rw-r--r--runtime/arch/x86/callee_save_frame_x86.h98
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S4
-rw-r--r--runtime/arch/x86/quick_method_frame_info_x86.h89
-rw-r--r--runtime/arch/x86/registers_x86.h14
-rw-r--r--runtime/arch/x86_64/callee_save_frame_x86_64.h (renamed from runtime/arch/x86_64/quick_method_frame_info_x86_64.h)74
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S4
-rw-r--r--runtime/check_reference_map_visitor.h13
-rw-r--r--runtime/common_throws.cc13
-rw-r--r--runtime/common_throws.h4
-rw-r--r--runtime/dex/dex_file_annotations.cc117
-rw-r--r--runtime/dex/dex_file_annotations.h37
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h15
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc16
-rw-r--r--runtime/entrypoints/quick/callee_save_frame.h86
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc107
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc100
-rw-r--r--runtime/generated/asm_support_gen.h18
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jit/jit.cc14
-rw-r--r--runtime/lock_word.h4
-rw-r--r--runtime/method_info.h10
-rw-r--r--runtime/mirror/var_handle.cc31
-rw-r--r--runtime/mirror/var_handle.h5
-rw-r--r--runtime/native/java_lang_Class.cc8
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc2
-rw-r--r--runtime/native/java_lang_reflect_Method.cc2
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_quick_method_header.cc17
-rw-r--r--runtime/oat_quick_method_header.h5
-rw-r--r--runtime/quick_exception_handler.cc47
-rw-r--r--runtime/runtime-inl.h11
-rw-r--r--runtime/runtime.cc53
-rw-r--r--runtime/runtime.h5
-rw-r--r--runtime/stack.cc67
-rw-r--r--runtime/stack_map.cc113
-rw-r--r--runtime/stack_map.h1008
-rw-r--r--runtime/thread.cc20
-rw-r--r--runtime/var_handles.cc4
47 files changed, 1181 insertions, 1832 deletions
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 1ba4070056..d4ceede07a 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -18,6 +18,7 @@
#include "art_method-inl.h"
#include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
#include "common_runtime_test.h"
#include "quick/quick_method_frame_info.h"
@@ -57,21 +58,6 @@ class ArchTest : public CommonRuntimeTest {
void FinalizeSetup() OVERRIDE {
ASSERT_EQ(InstructionSet::kX86_64, Runtime::Current()->GetInstructionSet());
}
-
- static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
- NO_THREAD_SAFETY_ANALYSIS {
- Runtime* const runtime = Runtime::Current();
- Thread* const self = Thread::Current();
- ScopedObjectAccess soa(self); // So we can create callee-save methods.
-
- runtime->SetInstructionSet(isa);
- ArtMethod* save_method = runtime->CreateCalleeSaveMethod();
- runtime->SetCalleeSaveMethod(save_method, type);
- QuickMethodFrameInfo frame_info = runtime->GetRuntimeMethodFrameInfo(save_method);
- EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
- << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
- << frame_info.FpSpillMask() << std::dec;
- }
};
TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
@@ -205,26 +191,20 @@ static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
} // namespace x86_64
// Check architecture specific constants are sound.
-#define TEST_ARCH(Arch, arch) \
- TEST_F(ArchTest, Arch) { \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveAllCalleeSaves, \
- arch::kFrameSizeSaveAllCalleeSaves); \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveRefsOnly, \
- arch::kFrameSizeSaveRefsOnly); \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveRefsAndArgs, \
- arch::kFrameSizeSaveRefsAndArgs); \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveEverything, \
- arch::kFrameSizeSaveEverything); \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveEverythingForClinit, \
- arch::kFrameSizeSaveEverythingForClinit); \
- CheckFrameSize(InstructionSet::k##Arch, \
- CalleeSaveType::kSaveEverythingForSuspendCheck, \
- arch::kFrameSizeSaveEverythingForSuspendCheck); \
+// We expect the return PC to be stored at the highest address slot in the frame.
+#define TEST_ARCH_TYPE(Arch, arch, type) \
+ EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetFrameSize(CalleeSaveType::k##type), \
+ arch::kFrameSize##type); \
+ EXPECT_EQ(arch::Arch##CalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::k##type), \
+ arch::kFrameSize##type - static_cast<size_t>(k##Arch##PointerSize))
+#define TEST_ARCH(Arch, arch) \
+ TEST_F(ArchTest, Arch) { \
+ TEST_ARCH_TYPE(Arch, arch, SaveAllCalleeSaves); \
+ TEST_ARCH_TYPE(Arch, arch, SaveRefsOnly); \
+ TEST_ARCH_TYPE(Arch, arch, SaveRefsAndArgs); \
+ TEST_ARCH_TYPE(Arch, arch, SaveEverything); \
+ TEST_ARCH_TYPE(Arch, arch, SaveEverythingForClinit); \
+ TEST_ARCH_TYPE(Arch, arch, SaveEverythingForSuspendCheck); \
}
TEST_ARCH(Arm, arm)
TEST_ARCH(Arm64, arm64)
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/callee_save_frame_arm.h
index 5c5b81baae..11eefb9283 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/callee_save_frame_arm.h
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
-#define ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+#ifndef ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/globals.h"
#include "quick/quick_method_frame_info.h"
#include "registers_arm.h"
@@ -55,56 +56,56 @@ static constexpr uint32_t kArmCalleeSaveFpAllSpills =
static constexpr uint32_t kArmCalleeSaveFpEverythingSpills =
kArmCalleeSaveFpArgSpills | kArmCalleeSaveFpAllSpills;
-constexpr uint32_t ArmCalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
-}
+class ArmCalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kArmCalleeSaveAlwaysSpills | kArmCalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveEverythingSpills : 0);
+ }
-constexpr uint32_t ArmCalleeSaveFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
-}
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
+ }
-constexpr uint32_t ArmCalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(ArmCalleeSaveCoreSpills(type)) /* gprs */ +
- POPCOUNT(ArmCalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
-}
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kArmPointerSize), kStackAlignment);
+ }
-constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(ArmCalleeSaveFrameSize(type),
- ArmCalleeSaveCoreSpills(type),
- ArmCalleeSaveFpSpills(type));
-}
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
-constexpr size_t ArmCalleeSaveFpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return ArmCalleeSaveFrameSize(type) -
- (POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
- POPCOUNT(ArmCalleeSaveFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
-}
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArmPointerSize);
+ }
-constexpr size_t ArmCalleeSaveGpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
-}
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArmPointerSize);
+ }
-constexpr size_t ArmCalleeSaveLrOffset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return ArmCalleeSaveFrameSize(type) -
- POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * static_cast<size_t>(kArmPointerSize);
-}
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kArmPointerSize);
+ }
+};
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_ARCH_ARM_QUICK_METHOD_FRAME_INFO_ARM_H_
+#endif // ART_RUNTIME_ARCH_ARM_CALLEE_SAVE_FRAME_ARM_H_
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index cd00125de5..311e838fb3 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -55,7 +55,7 @@
@ Load kSaveAllCalleeSaves Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
@@ -86,7 +86,7 @@
@ Load kSaveRefsOnly Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_REFS_ONLY != 28 + 4)
@@ -147,13 +147,13 @@
@ Load kSaveRefsAndArgs Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -193,7 +193,7 @@
@ Load kSaveEverything Method* into rTemp.
ldr \rTemp, [\rTemp, #\runtime_method_offset]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8)
@@ -301,7 +301,7 @@
* exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
@@ -318,7 +318,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -327,7 +327,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -336,7 +336,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -345,13 +345,13 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
- ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET] // Get exception field.
+ ldr \reg, [rSELF, #THREAD_EXCEPTION_OFFSET] @ Get exception field.
cbnz \reg, 1f
bx lr
1:
@@ -377,7 +377,7 @@ END \c_name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -389,7 +389,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -401,7 +401,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
+ mov r3, rSELF @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, new_val, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
@@ -448,7 +448,7 @@ ENTRY art_quick_throw_null_pointer_exception_from_signal
@ save all registers as basis for long jump context
SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1
mov r0, lr @ pass the fault address stored in LR by the fault handler.
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artThrowNullPointerExceptionFromSignal @ (Thread*)
END art_quick_throw_null_pointer_exception_from_signal
@@ -494,7 +494,7 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFr
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_SAVE_REFS_AND_ARGS_FRAME r2 @ save callee saves in case allocation triggers GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
mov r12, r1 @ save Method*->code_
@@ -682,50 +682,48 @@ TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode,
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
+ ldr r1, [rSELF, #THREAD_ID_OFFSET]
cbz r0, .Lslow_lock
.Lretry_lock:
- ldr r2, [r9, #THREAD_ID_OFFSET]
- ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- mov r3, r1
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- cbnz r3, .Lnot_unlocked @ already thin locked
- @ unlocked case - r1: original lock word that's zero except for the read barrier bits.
- orr r2, r1, r2 @ r2 holds thread id with count of 0 with preserved read barrier bits
- strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- cbnz r3, .Llock_strex_fail @ store failed, retry
- dmb ish @ full (LoadLoad|LoadStore) memory barrier
+ ldrex r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ eor r3, r2, r1 @ Prepare the value to store if unlocked
+ @ (thread id, count of 0 and preserved read barrier bits),
+ @ or prepare to compare thread id for recursive lock check
+ @ (lock_word.ThreadId() ^ self->ThreadId()).
+ ands ip, r2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ Test the non-gc bits.
+ bne .Lnot_unlocked @ Check if unlocked.
+ @ unlocked case - store r3: original lock word plus thread id, preserved read barrier bits.
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ cbnz r2, .Llock_strex_fail @ If store failed, retry.
+ dmb ish @ Full (LoadLoad|LoadStore) memory barrier.
bx lr
-.Lnot_unlocked: @ r1: original lock word, r2: thread_id with count of 0 and zero read barrier bits
- lsr r3, r1, LOCK_WORD_STATE_SHIFT
- cbnz r3, .Lslow_lock @ if either of the top two bits are set, go slow path
- eor r2, r1, r2 @ lock_word.ThreadId() ^ self->ThreadId()
- uxth r2, r2 @ zero top 16 bits
- cbnz r2, .Lslow_lock @ lock word and self thread id's match -> recursive lock
- @ else contention, go to slow path
- mov r3, r1 @ copy the lock word to check count overflow.
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits.
- add r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count in lock word placing in r2 to check overflow
- lsr r3, r2, #LOCK_WORD_GC_STATE_SHIFT @ if the first gc state bit is set, we overflowed.
- cbnz r3, .Lslow_lock @ if we overflow the count go slow path
- add r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count for real
- strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r3, .Llock_strex_fail @ strex failed, retry
+.Lnot_unlocked: @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+ @ Check lock word state and thread id together,
+ bfc r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+ cbnz r3, .Lslow_lock @ if either of the top two bits are set, or the lock word's
+ @ thread id did not match, go slow path.
+ add r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Increment the recursive lock count.
+ @ Extract the new thin lock count for overflow check.
+ ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path.
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
+ cbnz r2, .Llock_strex_fail @ If strex failed, retry.
bx lr
.Llock_strex_fail:
b .Lretry_lock @ retry
-.Lslow_lock:
- SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
- mov r1, r9 @ pass Thread::Current
- bl artLockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_lock_object_no_inline (tail call).
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
+ // This is also the slow path for art_quick_lock_object. Note that we
+ // need a local label, the assembler complains about target being out of
+ // range if we try to jump to `art_quick_lock_object_no_inline`.
+.Lslow_lock:
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -739,62 +737,59 @@ END art_quick_lock_object_no_inline
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
+ ldr r1, [rSELF, #THREAD_ID_OFFSET]
cbz r0, .Lslow_unlock
.Lretry_unlock:
#ifndef USE_READ_BARRIER
- ldr r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ ldr r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
- ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ Need to use atomic instructions for read barrier
+ @ Need to use atomic instructions for read barrier.
+ ldrex r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#endif
- lsr r2, r1, #LOCK_WORD_STATE_SHIFT
- cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
- ldr r2, [r9, #THREAD_ID_OFFSET]
- mov r3, r1 @ copy lock word to check thread id equality
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- eor r3, r3, r2 @ lock_word.ThreadId() ^ self->ThreadId()
- uxth r3, r3 @ zero top 16 bits
- cbnz r3, .Lslow_unlock @ do lock word and self thread id's match?
- mov r3, r1 @ copy lock word to detect transition to unlocked
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- cmp r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
- bpl .Lrecursive_thin_unlock
- @ transition to unlocked
- mov r3, r1
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED @ r3: zero except for the preserved gc bits
- dmb ish @ full (LoadStore|StoreStore) memory barrier
+ eor r3, r2, r1 @ Prepare the value to store if simply locked
+ @ (mostly 0s, and preserved read barrier bits),
+ @ or prepare to compare thread id for recursive lock check
+ @ (lock_word.ThreadId() ^ self->ThreadId()).
+ ands ip, r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ Test the non-gc bits.
+ bne .Lnot_simply_locked @ Locked recursively or by other thread?
+ @ Transition to unlocked.
+ dmb ish @ Full (LoadStore|StoreStore) memory barrier.
#ifndef USE_READ_BARRIER
str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r2, .Lunlock_strex_fail @ store failed, retry
+ cbnz r2, .Lunlock_strex_fail @ If the store failed, retry.
#endif
bx lr
-.Lrecursive_thin_unlock: @ r1: original lock word
- sub r1, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ decrement count
+.Lnot_simply_locked: @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+ @ Check lock word state and thread id together,
+ bfc r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+ cbnz r3, .Lslow_unlock @ if either of the top two bits are set, or the lock word's
+ @ thread id did not match, go slow path.
+ sub r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Decrement recursive lock count.
#ifndef USE_READ_BARRIER
- str r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
- strex r2, r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r2, .Lunlock_strex_fail @ store failed, retry
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
+ cbnz r2, .Lunlock_strex_fail @ If the store failed, retry.
#endif
bx lr
.Lunlock_strex_fail:
b .Lretry_unlock @ retry
-.Lslow_unlock:
- @ save callee saves in case exception allocation triggers GC
- SETUP_SAVE_REFS_ONLY_FRAME r1
- mov r1, r9 @ pass Thread::Current
- bl artUnlockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_unlock_object_no_inline (tail call).
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
+ // This is also the slow path for art_quick_unlock_object. Note that we
+ // need a local label, the assembler complains about target being out of
+ // range if we try to jump to `art_quick_unlock_object_no_inline`.
+.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
SETUP_SAVE_REFS_ONLY_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -832,7 +827,7 @@ ENTRY art_quick_check_instance_of
.Lthrow_class_cast_exception_for_bitstring_check:
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
bkpt
END art_quick_check_instance_of
@@ -917,7 +912,7 @@ ENTRY art_quick_aput_obj
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
- ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+ ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
@@ -945,7 +940,7 @@ ENTRY art_quick_aput_obj
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
- ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+ ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
@@ -954,7 +949,7 @@ ENTRY art_quick_aput_obj
/* No need to repeat restore cfi directives, the ones above apply here. */
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
mov r1, r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
bkpt @ unreached
END art_quick_aput_obj
@@ -964,7 +959,7 @@ END art_quick_aput_obj
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -977,7 +972,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -990,7 +985,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
+ mov r3, rSELF @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
@@ -1004,7 +999,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
add sp, #16 @ strip the extra frame
@@ -1023,7 +1018,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_EVERYTHING_FRAME r1, \runtime_method_offset @ save everything in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t index, Thread*)
cbz r0, 1f @ If result is null, deliver the OOME.
.cfi_remember_state
@@ -1065,9 +1060,9 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode,
.extern artGet64StaticFromCompiledCode
ENTRY art_quick_get64_static
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
- bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ mov r1, rSELF @ pass Thread::Current
+ bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
@@ -1091,9 +1086,9 @@ TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCo
.extern artGet64InstanceFromCompiledCode
ENTRY art_quick_get64_instance
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ mov r2, rSELF @ pass Thread::Current
+ bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
@@ -1125,7 +1120,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiled
ENTRY art_quick_set64_instance
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64InstanceFromCompiledCode @ (field_idx, Object*, new_val, Thread*)
add sp, #16 @ release out args
@@ -1140,7 +1135,7 @@ END art_quick_set64_instance
ENTRY art_quick_set64_static
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*)
add sp, #16 @ release out args
@@ -1185,12 +1180,12 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
ENTRY \c_name
// Fast path rosalloc allocation.
- // r0: type/return value, r9: Thread::Current
+ // r0: type/return value, rSELF (r9): Thread::Current
// r1, r2, r3, r12: free.
- ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ ldr r3, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
- ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ ldr r12, [rSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
bhs .Lslow_path\c_name
@@ -1208,7 +1203,7 @@ ENTRY \c_name
// from the size. Since the size is
// already aligned we can combine the
// two shifts together.
- add r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ add r12, rSELF, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
// Subtract pointer size since ther
// are no runs for 0 byte allocations
// and the size is already aligned.
@@ -1236,9 +1231,9 @@ ENTRY \c_name
// local allocation stack and
// increment the thread local
// allocation stack top.
- ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ ldr r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
- str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
// After this "STR" the object is published to the thread local allocation stack,
@@ -1287,7 +1282,7 @@ ENTRY \c_name
.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1301,7 +1296,7 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
-// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// r0: type, rSELF (r9): Thread::Current, r1, r2, r3, r12: free.
// Need to preserve r0 to the slow path.
//
// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
@@ -1313,7 +1308,7 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldrd r12, r3, [rSELF, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
@@ -1326,9 +1321,9 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
// for the return value.
- ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldr r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]
add r1, r2, r3
- str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ str r1, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
// After this "STR" the object is published to the thread local allocation stack,
// and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
// It is not yet visible to the running (user) compiled code until after the return.
@@ -1346,9 +1341,9 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
//
// (Note: The actual check is done by checking that the object's class pointer is non-null.
// Also, unlike rosalloc, the object can never be observed as null).
- ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ ldr r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
- str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ str r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r0
str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
@@ -1375,12 +1370,12 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
ENTRY \name
// Fast path tlab allocation.
- // r0: type, r9: Thread::Current
+ // r0: type, rSELF (r9): Thread::Current
// r1, r2, r3, r12: free.
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r1, r9 // Pass Thread::Current.
+ mov r1, rSELF // Pass Thread::Current.
bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1397,7 +1392,7 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
// and art_quick_alloc_array_resolved/initialized_region_tlab.
//
-// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// r0: type, r1: component_count, r2: total_size, rSELF (r9): Thread::Current, r3, r12: free.
// Need to preserve r0 and r1 to the slow path.
.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
and r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED // Apply alignment mask
@@ -1409,7 +1404,7 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ldrd r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldrd r3, r12, [rSELF, #THREAD_LOCAL_POS_OFFSET]
sub r12, r12, r3 // Compute the remaining buf size.
cmp r2, r12 // Check if the total_size fits.
// The array class is always initialized here. Unlike new-instance,
@@ -1417,10 +1412,10 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
add r2, r2, r3
- str r2, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
- ldr r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ str r2, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r2, r2, #1
- str r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ str r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r0
str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
@@ -1443,7 +1438,7 @@ ENTRY \name
// Fast path array allocation for region tlab allocation.
// r0: mirror::Class* type
// r1: int32_t component_count
- // r9: thread
+ // rSELF (r9): thread
// r2, r3, r12: free.
\size_setup .Lslow_path\name
ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
@@ -1452,7 +1447,7 @@ ENTRY \name
// r1: int32_t component_count
// r2: Thread* self
SETUP_SAVE_REFS_ONLY_FRAME r2 // save callee saves in case of GC
- mov r2, r9 // pass Thread::Current
+ mov r2, rSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1575,10 +1570,10 @@ END art_quick_implicit_suspend
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
.cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1706,7 +1701,7 @@ END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
cbz r0, 1f @ is code pointer null? goto exception
@@ -1780,10 +1775,10 @@ ENTRY art_quick_generic_jni_trampoline
blx artQuickGenericJniEndTrampoline
// Restore self pointer.
- mov r9, r11
+ mov rSELF, r11
// Pending exceptions possible.
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
cbnz r2, .Lexception_in_native
// Tear down the alloca.
@@ -1804,7 +1799,7 @@ ENTRY art_quick_generic_jni_trampoline
.cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
.Lexception_in_native:
- ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+ ldr ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
add ip, ip, #-1 // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
mov sp, ip
.cfi_def_cfa_register sp
@@ -1815,10 +1810,10 @@ END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_SAVE_REFS_AND_ARGS_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
.cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1846,7 +1841,7 @@ ENTRY art_quick_instrumentation_entry
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
@ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
str r0, [sp, #4]
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP)
cbz r0, .Ldeliver_instrumentation_entry_exception
@@ -1872,7 +1867,7 @@ ENTRY art_quick_instrumentation_exit
add r3, sp, #8 @ store fpr_res pointer, in kSaveEverything frame
add r2, sp, #136 @ store gpr_res pointer, in kSaveEverything frame
mov r1, sp @ pass SP
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res*, fpr_res*)
cbz r0, .Ldo_deliver_instrumentation_exception
@@ -1901,7 +1896,7 @@ END art_quick_instrumentation_exit
.extern artDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_EVERYTHING_FRAME r0
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
blx artDeoptimize @ (Thread*)
END art_quick_deoptimize
@@ -1912,7 +1907,7 @@ END art_quick_deoptimize
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
blx artDeoptimizeFromCompiledCode @ (DeoptimizationKind, Thread*)
END art_quick_deoptimize_from_compiled_code
@@ -2691,7 +2686,7 @@ END art_quick_read_barrier_mark_introspection
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
mov r0, #0 @ initialize 64-bit JValue as zero.
str r0, [sp, #-4]!
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/callee_save_frame_arm64.h
index 2d2b500ae9..bc36bfabec 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/callee_save_frame_arm64.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
-#define ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#ifndef ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
@@ -79,57 +79,56 @@ static constexpr uint32_t kArm64CalleeSaveFpEverythingSpills =
(1 << art::arm64::D27) | (1 << art::arm64::D28) | (1 << art::arm64::D29) |
(1 << art::arm64::D30) | (1 << art::arm64::D31);
-constexpr uint32_t Arm64CalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
-}
+class Arm64CalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveEverythingSpills : 0);
+ }
-constexpr uint32_t Arm64CalleeSaveFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
-}
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
+ }
-constexpr uint32_t Arm64CalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(Arm64CalleeSaveCoreSpills(type)) /* gprs */ +
- POPCOUNT(Arm64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
-}
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kArm64PointerSize), kStackAlignment);
+ }
-constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(Arm64CalleeSaveFrameSize(type),
- Arm64CalleeSaveCoreSpills(type),
- Arm64CalleeSaveFpSpills(type));
-}
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
-constexpr size_t Arm64CalleeSaveFpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return Arm64CalleeSaveFrameSize(type) -
- (POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
- POPCOUNT(Arm64CalleeSaveFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
-}
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kArm64PointerSize);
+ }
-constexpr size_t Arm64CalleeSaveGpr1Offset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
-}
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kArm64PointerSize);
+ }
-constexpr size_t Arm64CalleeSaveLrOffset(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return Arm64CalleeSaveFrameSize(type) -
- POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) *
- static_cast<size_t>(kArm64PointerSize);
-}
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kArm64PointerSize);
+ }
+};
} // namespace arm64
} // namespace art
-#endif // ART_RUNTIME_ARCH_ARM64_QUICK_METHOD_FRAME_INFO_ARM64_H_
+#endif // ART_RUNTIME_ARCH_ARM64_CALLEE_SAVE_FRAME_ARM64_H_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac5b2b8b88..14d0cc7db4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1151,45 +1151,36 @@ END art_quick_do_long_jump
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
- cbz w0, .Lslow_lock
- add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
+ ldr w1, [xSELF, #THREAD_ID_OFFSET]
+ cbz w0, art_quick_lock_object_no_inline
+ // Exclusive load/store has no immediate anymore.
+ add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
.Lretry_lock:
- ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
- ldaxr w1, [x4] // acquire needed only in most common case
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- cbnz w3, .Lnot_unlocked // already thin locked
- // unlocked case - x1: original lock word that's zero except for the read barrier bits.
- orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
- stxr w3, w2, [x4]
- cbnz w3, .Llock_stxr_fail // store failed, retry
+ ldaxr w2, [x4] // Acquire needed only in most common case.
+ eor w3, w2, w1 // Prepare the value to store if unlocked
+ // (thread id, count of 0 and preserved read barrier bits),
+ // or prepare to compare thread id for recursive lock check
+ // (lock_word.ThreadId() ^ self->ThreadId()).
+ tst w2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits.
+ b.ne .Lnot_unlocked // Check if unlocked.
+ // unlocked case - store w3: original lock word plus thread id, preserved read barrier bits.
+ stxr w2, w3, [x4]
+ cbnz w2, .Lretry_lock // If the store failed, retry.
ret
-.Lnot_unlocked: // x1: original lock word
- lsr w3, w1, LOCK_WORD_STATE_SHIFT
- cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path
- eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId()
- uxth w2, w2 // zero top 16 bits
- cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
- // else contention, go to slow path
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
- add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
- lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed.
- cbnz w3, .Lslow_lock // if we overflow the count go slow path
- add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real
- stxr w3, w2, [x4]
- cbnz w3, .Llock_stxr_fail // store failed, retry
+.Lnot_unlocked: // w2: original lock word, w1: thread id, w3: w2 ^ w1
+ // Check lock word state and thread id together,
+ tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+ b.ne art_quick_lock_object_no_inline
+ add w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // Increment the recursive lock count.
+ tst w3, #LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED // Test the new thin lock count.
+ b.eq art_quick_lock_object_no_inline // Zero as the new count indicates overflow, go slow path.
+ stxr w2, w3, [x4]
+ cbnz w2, .Lretry_lock // If the store failed, retry.
ret
-.Llock_stxr_fail:
- b .Lretry_lock // retry
-.Lslow_lock:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
- mov x1, xSELF // pass Thread::Current
- bl artLockObjectFromCode // (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
+ // This is also the slow path for art_quick_lock_object.
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
@@ -1206,54 +1197,46 @@ END art_quick_lock_object_no_inline
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
- cbz x0, .Lslow_unlock
- add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
+ ldr w1, [xSELF, #THREAD_ID_OFFSET]
+ cbz x0, art_quick_unlock_object_no_inline
+ // Exclusive load/store has no immediate anymore.
+ add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
.Lretry_unlock:
#ifndef USE_READ_BARRIER
- ldr w1, [x4]
+ ldr w2, [x4]
#else
- ldxr w1, [x4] // Need to use atomic instructions for read barrier
+ ldxr w2, [x4] // Need to use atomic instructions for read barrier.
#endif
- lsr w2, w1, LOCK_WORD_STATE_SHIFT
- cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
- ldr w2, [xSELF, #THREAD_ID_OFFSET]
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
- uxth w3, w3 // zero top 16 bits
- cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
- bpl .Lrecursive_thin_unlock
- // transition to unlocked
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
+ eor w3, w2, w1 // Prepare the value to store if simply locked
+ // (mostly 0s, and preserved read barrier bits),
+ // or prepare to compare thread id for recursive lock check
+ // (lock_word.ThreadId() ^ self->ThreadId()).
+ tst w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits.
+ b.ne .Lnot_simply_locked // Locked recursively or by other thread?
+ // Transition to unlocked.
#ifndef USE_READ_BARRIER
stlr w3, [x4]
#else
- stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier
- cbnz w2, .Lunlock_stxr_fail // store failed, retry
+ stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier.
+ cbnz w2, .Lretry_unlock // If the store failed, retry.
#endif
ret
-.Lrecursive_thin_unlock: // w1: original lock word
- sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count
+.Lnot_simply_locked:
+ // Check lock word state and thread id together,
+ tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+ b.ne art_quick_unlock_object_no_inline
+ sub w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count
#ifndef USE_READ_BARRIER
- str w1, [x4]
+ str w3, [x4]
#else
- stxr w2, w1, [x4] // Need to use atomic instructions for read barrier
- cbnz w2, .Lunlock_stxr_fail // store failed, retry
+ stxr w2, w3, [x4] // Need to use atomic instructions for read barrier.
+ cbnz w2, .Lretry_unlock // If the store failed, retry.
#endif
ret
-.Lunlock_stxr_fail:
- b .Lretry_unlock // retry
-.Lslow_unlock:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
- mov x1, xSELF // pass Thread::Current
- bl artUnlockObjectFromCode // (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
+ // This is also the slow path for art_quick_unlock_object.
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
diff --git a/runtime/arch/mips/quick_method_frame_info_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
index 8c86252152..6e88d08432 100644
--- a/runtime/arch/mips/quick_method_frame_info_mips.h
+++ b/runtime/arch/mips/callee_save_frame_mips.h
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#ifndef ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/globals.h"
#include "quick/quick_method_frame_info.h"
#include "registers_mips.h"
@@ -80,37 +81,56 @@ static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
(1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
(1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-constexpr uint32_t MipsCalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
-}
+class MipsCalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
+ }
-constexpr uint32_t MipsCalleeSaveFPSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
-}
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
+ }
-constexpr uint32_t MipsCalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(MipsCalleeSaveCoreSpills(type)) /* gprs */ +
- POPCOUNT(MipsCalleeSaveFPSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
-}
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
+ }
-constexpr QuickMethodFrameInfo MipsCalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(MipsCalleeSaveFrameSize(type),
- MipsCalleeSaveCoreSpills(type),
- MipsCalleeSaveFPSpills(type));
-}
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
+
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMipsPointerSize);
+ }
+
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMipsPointerSize);
+ }
+
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kMipsPointerSize);
+ }
+};
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_ARCH_MIPS_QUICK_METHOD_FRAME_INFO_MIPS_H_
+#endif // ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index d5a9b1589e..7c8ac288c3 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -17,13 +17,13 @@
#include <sys/ucontext.h>
#include "fault_handler.h"
+#include "arch/mips/callee_save_frame_mips.h"
#include "art_method.h"
#include "base/callee_save_type.h"
#include "base/globals.h"
#include "base/hex_dump.h"
#include "base/logging.h" // For VLOG.
#include "base/macros.h"
-#include "quick_method_frame_info_mips.h"
#include "registers_mips.h"
#include "thread-current-inl.h"
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
index 520f6319d5..59529a0c7b 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/callee_save_frame_mips64.h
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#ifndef ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
+#define ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/globals.h"
#include "quick/quick_method_frame_info.h"
#include "registers_mips64.h"
@@ -71,37 +72,56 @@ static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
(1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
(1 << art::mips64::F30) | (1 << art::mips64::F31);
-constexpr uint32_t Mips64CalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
-}
+class Mips64CalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
+ }
-constexpr uint32_t Mips64CalleeSaveFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kMips64CalleeSaveFpRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
-}
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kMips64CalleeSaveFpRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
+ }
-constexpr uint32_t Mips64CalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(Mips64CalleeSaveCoreSpills(type)) /* gprs */ +
- POPCOUNT(Mips64CalleeSaveFpSpills(type)) /* fprs */ +
- + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
-}
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
+ }
-constexpr QuickMethodFrameInfo Mips64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(Mips64CalleeSaveFrameSize(type),
- Mips64CalleeSaveCoreSpills(type),
- Mips64CalleeSaveFpSpills(type));
-}
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
+
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMips64PointerSize);
+ }
+
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMips64PointerSize);
+ }
+
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kMips64PointerSize);
+ }
+};
} // namespace mips64
} // namespace art
-#endif // ART_RUNTIME_ARCH_MIPS64_QUICK_METHOD_FRAME_INFO_MIPS64_H_
+#endif // ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 695da4794b..85f3528ec4 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -18,13 +18,13 @@
#include <sys/ucontext.h>
+#include "arch/mips64/callee_save_frame_mips64.h"
#include "art_method.h"
#include "base/callee_save_type.h"
#include "base/globals.h"
#include "base/hex_dump.h"
#include "base/logging.h" // For VLOG.
#include "base/macros.h"
-#include "quick_method_frame_info_mips64.h"
#include "registers_mips64.h"
#include "thread-current-inl.h"
diff --git a/runtime/arch/x86/callee_save_frame_x86.h b/runtime/arch/x86/callee_save_frame_x86.h
new file mode 100644
index 0000000000..f336f43aa3
--- /dev/null
+++ b/runtime/arch/x86/callee_save_frame_x86.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+#define ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/callee_save_type.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "quick/quick_method_frame_info.h"
+#include "registers_x86.h"
+
+namespace art {
+namespace x86 {
+
+static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
+ (1 << art::x86::kNumberOfCpuRegisters); // Fake return address callee save.
+static constexpr uint32_t kX86CalleeSaveRefSpills =
+ (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
+static constexpr uint32_t kX86CalleeSaveArgSpills =
+ (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+static constexpr uint32_t kX86CalleeSaveEverythingSpills =
+ (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
+
+static constexpr uint32_t kX86CalleeSaveFpArgSpills =
+ (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+ (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
+static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
+ (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
+ (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
+ (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
+ (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
+
+class X86CalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
+ }
+
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
+ }
+
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ 2 * POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
+ }
+
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
+
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ 2 * POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86PointerSize);
+ }
+
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86PointerSize);
+ }
+
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kX86PointerSize);
+ }
+};
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_CALLEE_SAVE_FRAME_X86_H_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8ab4ce160f..b89d45f617 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1292,7 +1292,7 @@ DEFINE_FUNCTION art_quick_lock_object
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx // test the 2 high bits.
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
@@ -1362,7 +1362,7 @@ DEFINE_FUNCTION art_quick_unlock_object
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
diff --git a/runtime/arch/x86/quick_method_frame_info_x86.h b/runtime/arch/x86/quick_method_frame_info_x86.h
deleted file mode 100644
index 9a6633365c..0000000000
--- a/runtime/arch/x86/quick_method_frame_info_x86.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-#define ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_x86.h"
-
-namespace art {
-namespace x86 {
-
-enum XMM {
- XMM0 = 0,
- XMM1 = 1,
- XMM2 = 2,
- XMM3 = 3,
- XMM4 = 4,
- XMM5 = 5,
- XMM6 = 6,
- XMM7 = 7,
-};
-
-static constexpr uint32_t kX86CalleeSaveAlwaysSpills =
- (1 << art::x86::kNumberOfCpuRegisters); // Fake return address callee save.
-static constexpr uint32_t kX86CalleeSaveRefSpills =
- (1 << art::x86::EBP) | (1 << art::x86::ESI) | (1 << art::x86::EDI);
-static constexpr uint32_t kX86CalleeSaveArgSpills =
- (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-static constexpr uint32_t kX86CalleeSaveEverythingSpills =
- (1 << art::x86::EAX) | (1 << art::x86::ECX) | (1 << art::x86::EDX) | (1 << art::x86::EBX);
-
-static constexpr uint32_t kX86CalleeSaveFpArgSpills =
- (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
- (1 << art::x86::XMM2) | (1 << art::x86::XMM3);
-static constexpr uint32_t kX86CalleeSaveFpEverythingSpills =
- (1 << art::x86::XMM0) | (1 << art::x86::XMM1) |
- (1 << art::x86::XMM2) | (1 << art::x86::XMM3) |
- (1 << art::x86::XMM4) | (1 << art::x86::XMM5) |
- (1 << art::x86::XMM6) | (1 << art::x86::XMM7);
-
-constexpr uint32_t X86CalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kX86CalleeSaveAlwaysSpills | kX86CalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return (type == CalleeSaveType::kSaveRefsAndArgs ? kX86CalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kX86CalleeSaveFpEverythingSpills : 0);
-}
-
-constexpr uint32_t X86CalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(X86CalleeSaveCoreSpills(type)) /* gprs */ +
- 2 * POPCOUNT(X86CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kX86PointerSize), kStackAlignment);
-}
-
-constexpr QuickMethodFrameInfo X86CalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(X86CalleeSaveFrameSize(type),
- X86CalleeSaveCoreSpills(type),
- X86CalleeSaveFpSpills(type));
-}
-
-} // namespace x86
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_X86_QUICK_METHOD_FRAME_INFO_X86_H_
diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h
index 5a5d226319..d3b959fc53 100644
--- a/runtime/arch/x86/registers_x86.h
+++ b/runtime/arch/x86/registers_x86.h
@@ -42,6 +42,20 @@ enum Register {
};
std::ostream& operator<<(std::ostream& os, const Register& rhs);
+enum XmmRegister {
+ XMM0 = 0,
+ XMM1 = 1,
+ XMM2 = 2,
+ XMM3 = 3,
+ XMM4 = 4,
+ XMM5 = 5,
+ XMM6 = 6,
+ XMM7 = 7,
+ kNumberOfXmmRegisters = 8,
+ kNoXmmRegister = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg);
+
} // namespace x86
} // namespace art
diff --git a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h b/runtime/arch/x86_64/callee_save_frame_x86_64.h
index ebf976ef71..228a902d38 100644
--- a/runtime/arch/x86_64/quick_method_frame_info_x86_64.h
+++ b/runtime/arch/x86_64/callee_save_frame_x86_64.h
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
-#define ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+#ifndef ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
+#include "base/globals.h"
#include "quick/quick_method_frame_info.h"
#include "registers_x86_64.h"
@@ -55,35 +56,54 @@ static constexpr uint32_t kX86_64CalleeSaveFpEverythingSpills =
(1 << art::x86_64::XMM8) | (1 << art::x86_64::XMM9) |
(1 << art::x86_64::XMM10) | (1 << art::x86_64::XMM11);
-constexpr uint32_t X86_64CalleeSaveCoreSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
-}
+class X86_64CalleeSaveFrame {
+ public:
+ static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kX86_64CalleeSaveAlwaysSpills | kX86_64CalleeSaveRefSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveEverythingSpills : 0);
+ }
-constexpr uint32_t X86_64CalleeSaveFpSpills(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return kX86_64CalleeSaveFpSpills |
- (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
- (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
-}
+ static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return kX86_64CalleeSaveFpSpills |
+ (type == CalleeSaveType::kSaveRefsAndArgs ? kX86_64CalleeSaveFpArgSpills : 0) |
+ (type == CalleeSaveType::kSaveEverything ? kX86_64CalleeSaveFpEverythingSpills : 0);
+ }
-constexpr uint32_t X86_64CalleeSaveFrameSize(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return RoundUp((POPCOUNT(X86_64CalleeSaveCoreSpills(type)) /* gprs */ +
- POPCOUNT(X86_64CalleeSaveFpSpills(type)) /* fprs */ +
- 1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
-}
+ static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
+ POPCOUNT(GetFpSpills(type)) /* fprs */ +
+ 1 /* Method* */) * static_cast<size_t>(kX86_64PointerSize), kStackAlignment);
+ }
-constexpr QuickMethodFrameInfo X86_64CalleeSaveMethodFrameInfo(CalleeSaveType type) {
- type = GetCanonicalCalleeSaveType(type);
- return QuickMethodFrameInfo(X86_64CalleeSaveFrameSize(type),
- X86_64CalleeSaveCoreSpills(type),
- X86_64CalleeSaveFpSpills(type));
-}
+ static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
+ }
+
+ static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ (POPCOUNT(GetCoreSpills(type)) +
+ POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kX86_64PointerSize);
+ }
+
+ static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) -
+ POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kX86_64PointerSize);
+ }
+
+ static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
+ type = GetCanonicalCalleeSaveType(type);
+ return GetFrameSize(type) - static_cast<size_t>(kX86_64PointerSize);
+ }
+};
} // namespace x86_64
} // namespace art
-#endif // ART_RUNTIME_ARCH_X86_64_QUICK_METHOD_FRAME_INFO_X86_64_H_
+#endif // ART_RUNTIME_ARCH_X86_64_CALLEE_SAVE_FRAME_X86_64_H_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index eb945ed366..c179033e6b 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1312,7 +1312,7 @@ DEFINE_FUNCTION art_quick_lock_object
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word.
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx // Test the 2 high bits.
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
@@ -1362,7 +1362,7 @@ DEFINE_FUNCTION art_quick_unlock_object
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e2ad7fd83f..6917899bff 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -64,20 +64,19 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ CodeInfo code_info(GetCurrentOatQuickMethodHeader());
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK_LT(reg, accessor.RegistersSize());
DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
- reg, number_of_dex_registers, code_info, encoding);
+ reg, number_of_dex_registers, code_info);
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 8fd95ed890..657a78bd2f 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -880,11 +880,14 @@ void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> expected_type,
ObjPtr<mirror::MethodType> actual_type) {
- ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
- nullptr,
- StringPrintf("Expected %s but was %s",
- expected_type->PrettyDescriptor().c_str(),
- actual_type->PrettyDescriptor().c_str()).c_str());
+ ThrowWrongMethodTypeException(expected_type->PrettyDescriptor(), actual_type->PrettyDescriptor());
+}
+
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+ const std::string& actual_descriptor) {
+ std::ostringstream msg;
+ msg << "Expected " << expected_descriptor << " but was " << actual_descriptor;
+ ThrowException("Ljava/lang/invoke/WrongMethodTypeException;", nullptr, msg.str().c_str());
}
} // namespace art
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 29a056e9ea..6acff6f222 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -274,6 +274,10 @@ void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> callee_type,
ObjPtr<mirror::MethodType> callsite_type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+ const std::string& actual_descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_COMMON_THROWS_H_
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index c399b1c7fe..95b42d27d2 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -29,6 +29,7 @@
#include "mirror/field.h"
#include "mirror/method.h"
#include "oat_file.h"
+#include "obj_ptr-inl.h"
#include "reflection.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -116,9 +117,9 @@ class ClassData {
DISALLOW_COPY_AND_ASSIGN(ClassData);
};
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
- Handle<mirror::Class> annotation_class,
- const uint8_t** annotation)
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+ Handle<mirror::Class> annotation_class,
+ const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) {
@@ -333,7 +334,7 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& kla
return dex_file.GetClassAnnotationSet(annotations_dir);
}
-mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
+ObjPtr<mirror::Object> ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t type_index = DecodeUnsignedLeb128(annotation);
uint32_t size = DecodeUnsignedLeb128(annotation);
@@ -355,13 +356,13 @@ mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t**
}
ObjPtr<mirror::Class> annotation_member_class =
- soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember).Ptr();
- mirror::Class* annotation_member_array_class =
+ soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember);
+ ObjPtr<mirror::Class> annotation_member_array_class =
class_linker->FindArrayClass(self, &annotation_member_class);
if (annotation_member_array_class == nullptr) {
return nullptr;
}
- mirror::ObjectArray<mirror::Object>* element_array = nullptr;
+ ObjPtr<mirror::ObjectArray<mirror::Object>> element_array = nullptr;
if (size > 0) {
element_array =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
@@ -373,7 +374,7 @@ mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t**
Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array));
for (uint32_t i = 0; i < size; ++i) {
- mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation);
+ ObjPtr<mirror::Object> new_member = CreateAnnotationMember(klass, annotation_class, annotation);
if (new_member == nullptr) {
return nullptr;
}
@@ -605,7 +606,7 @@ bool ProcessAnnotationValue(const ClassData& klass,
return false;
}
if (!component_type->IsPrimitive()) {
- mirror::Object* obj = new_annotation_value.value_.GetL();
+ ObjPtr<mirror::Object> obj = new_annotation_value.value_.GetL();
new_array->AsObjectArray<mirror::Object>()->
SetWithoutChecks<kTransactionActive>(i, obj);
} else {
@@ -682,20 +683,20 @@ bool ProcessAnnotationValue(const ClassData& klass,
*annotation_ptr = annotation;
if (result_style == DexFile::kAllObjects && primitive_type != Primitive::kPrimVoid) {
- element_object = BoxPrimitive(primitive_type, annotation_value->value_).Ptr();
+ element_object = BoxPrimitive(primitive_type, annotation_value->value_);
set_object = true;
}
if (set_object) {
- annotation_value->value_.SetL(element_object.Ptr());
+ annotation_value->value_.SetL(element_object);
}
return true;
}
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
- Handle<mirror::Class> annotation_class,
- const uint8_t** annotation) {
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+ Handle<mirror::Class> annotation_class,
+ const uint8_t** annotation) {
const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
@@ -799,7 +800,7 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
return nullptr;
}
-mirror::Object* GetAnnotationObjectFromAnnotationSet(
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility,
@@ -814,11 +815,11 @@ mirror::Object* GetAnnotationObjectFromAnnotationSet(
return ProcessEncodedAnnotation(klass, &annotation);
}
-mirror::Object* GetAnnotationValue(const ClassData& klass,
- const DexFile::AnnotationItem* annotation_item,
- const char* annotation_name,
- Handle<mirror::Class> array_class,
- uint32_t expected_type)
+ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
+ const DexFile::AnnotationItem* annotation_item,
+ const char* annotation_name,
+ Handle<mirror::Class> array_class,
+ uint32_t expected_type)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
const uint8_t* annotation =
@@ -864,7 +865,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass,
if (string_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(klass, annotation_item, "value", string_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -873,8 +874,9 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass,
return obj->AsObjectArray<mirror::String>();
}
-mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
+ const ClassData& klass,
+ const DexFile::AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
StackHandleScope<1> hs(Thread::Current());
@@ -890,7 +892,7 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(klass, annotation_item, "value", class_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -899,7 +901,7 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
return obj->AsObjectArray<mirror::Class>();
}
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility)
@@ -930,7 +932,7 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
continue;
}
const uint8_t* annotation = annotation_item->annotation_;
- mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
+ ObjPtr<mirror::Object> annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
if (annotation_obj != nullptr) {
result->SetWithoutChecks<false>(dest_index, annotation_obj);
++dest_index;
@@ -943,21 +945,21 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
return result.Get();
}
- mirror::ObjectArray<mirror::Object>* trimmed_result =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> trimmed_result =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
if (trimmed_result == nullptr) {
return nullptr;
}
for (uint32_t i = 0; i < dest_index; ++i) {
- mirror::Object* obj = result->GetWithoutChecks(i);
+ ObjPtr<mirror::Object> obj = result->GetWithoutChecks(i);
trimmed_result->SetWithoutChecks<false>(i, obj);
}
return trimmed_result;
}
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
const ClassData& klass,
const DexFile::AnnotationSetRefList* set_ref_list,
uint32_t size)
@@ -968,7 +970,7 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
StackHandleScope<1> hs(self);
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::Class* annotation_array_array_class =
+ ObjPtr<mirror::Class> annotation_array_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
if (annotation_array_array_class == nullptr) {
return nullptr;
@@ -982,8 +984,9 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
for (uint32_t index = 0; index < size; ++index) {
const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
- mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item,
- DexFile::kDexVisibilityRuntime);
+ ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
+ set_item,
+ DexFile::kDexVisibilityRuntime);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -995,7 +998,8 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
namespace annotations {
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+ Handle<mirror::Class> annotation_class) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
return nullptr;
@@ -1008,7 +1012,7 @@ mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> ann
annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
StackHandleScope<1> hs(Thread::Current());
const ClassData field_class(hs, field);
@@ -1037,7 +1041,7 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_
return annotation_item != nullptr;
}
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
const ClassData klass(method);
const DexFile* dex_file = &klass.GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
@@ -1081,7 +1085,8 @@ mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
return annotation_value.value_.GetL();
}
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+ Handle<mirror::Class> annotation_class) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
@@ -1090,14 +1095,14 @@ mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class>
DexFile::kDexVisibilityRuntime, annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
return ProcessAnnotationSet(ClassData(method),
annotation_set,
DexFile::kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
@@ -1105,7 +1110,7 @@ mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method
return GetThrowsValue(ClassData(method), annotation_set);
}
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
const DexFile* dex_file = method->GetDexFile();
const DexFile::ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
@@ -1136,9 +1141,9 @@ uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method) {
return set_ref_list->size_;
}
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
- uint32_t parameter_idx,
- Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+ uint32_t parameter_idx,
+ Handle<mirror::Class> annotation_class) {
const DexFile* dex_file = method->GetDexFile();
const DexFile::ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
@@ -1307,8 +1312,8 @@ uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
return access_flags;
}
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
- Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1320,13 +1325,13 @@ mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1345,7 +1350,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(data, annotation_item, "value", class_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -1354,7 +1359,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
return obj->AsObjectArray<mirror::Class>();
}
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1366,17 +1371,19 @@ mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value",
- ScopedNullHandle<mirror::Class>(),
- DexFile::kDexAnnotationType);
+ ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+ annotation_item,
+ "value",
+ ScopedNullHandle<mirror::Class>(),
+ DexFile::kDexAnnotationType);
if (obj == nullptr) {
return nullptr;
}
return obj->AsClass();
}
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
- mirror::Class* declaring_class = GetDeclaringClass(klass);
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass) {
+ ObjPtr<mirror::Class> declaring_class = GetDeclaringClass(klass);
if (declaring_class != nullptr) {
return declaring_class;
}
@@ -1420,7 +1427,7 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
return method->GetDeclaringClass();
}
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1438,7 +1445,7 @@ mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
DexFile::kDexAnnotationMethod);
}
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index 4bb0d75a57..9645a7febd 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -22,6 +22,7 @@
#include "handle.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array.h"
+#include "obj_ptr.h"
namespace art {
@@ -35,9 +36,10 @@ class ClassLinker;
namespace annotations {
// Field annotations.
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -45,21 +47,22 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_
REQUIRES_SHARED(Locks::mutator_lock_);
// Method annotations.
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method)
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
- uint32_t parameter_idx,
- Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+ uint32_t parameter_idx,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
bool GetParametersMetadataForMethod(ArtMethod* method,
MutableHandle<mirror::ObjectArray<mirror::String>>* names,
@@ -85,20 +88,20 @@ uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
uint32_t method_index);
// Class annotations.
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass)
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name)
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name)
REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index d4e7492f00..f6b1c73230 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -47,7 +47,6 @@ namespace art {
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
const MethodInfo& method_info,
const InlineInfo& inline_info,
- const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!outer_method->IsObsolete());
@@ -57,12 +56,12 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
// suspended while executing it.
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
- if (inline_info.EncodesArtMethodAtDepth(encoding, inlining_depth)) {
- return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
+ if (inline_info.EncodesArtMethodAtDepth(inlining_depth)) {
+ return inline_info.GetArtMethodAtDepth(inlining_depth);
}
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
- if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, inlining_depth);
+ if (inline_info.GetDexPcAtDepth(inlining_depth) == static_cast<uint32_t>(-1)) {
// "charAt" special case. It is the only non-leaf method we inline across dex files.
ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
@@ -73,9 +72,9 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtMethod* method = outer_method;
for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
- DCHECK(!inline_info.EncodesArtMethodAtDepth(encoding, depth));
- DCHECK_NE(inline_info.GetDexPcAtDepth(encoding, depth), static_cast<uint32_t>(-1));
- method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, depth);
+ DCHECK(!inline_info.EncodesArtMethodAtDepth(depth));
+ DCHECK_NE(inline_info.GetDexPcAtDepth(depth), static_cast<uint32_t>(-1));
+ method_index = inline_info.GetMethodIndexAtDepth(method_info, depth);
ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
method->GetDexCache(),
method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 7fc8db375b..7f9b385a0a 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -180,10 +180,10 @@ static inline std::pair<ArtMethod*, uintptr_t> DoGetCalleeSaveMethodOuterCallerA
ArtMethod** sp, CalleeSaveType type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+ const size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+ const size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
@@ -201,18 +201,16 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
DCHECK(current_code != nullptr);
DCHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
caller = GetResolvedMethod(outer_method,
method_info,
inline_info,
- encoding.inline_info.encoding,
- inline_info.GetDepth(encoding.inline_info.encoding) - 1);
+ inline_info.GetDepth() - 1);
}
}
if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index ef27ca325c..6f1bbaa093 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -21,16 +21,17 @@
#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/mutex.h"
+#include "quick/quick_method_frame_info.h"
#include "thread-inl.h"
// Specific frame size code is in architecture-specific files. We include this to compile-time
// specialize the code.
-#include "arch/arm/quick_method_frame_info_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
+#include "arch/arm/callee_save_frame_arm.h"
+#include "arch/arm64/callee_save_frame_arm64.h"
+#include "arch/mips/callee_save_frame_mips.h"
+#include "arch/mips64/callee_save_frame_mips64.h"
+#include "arch/x86/callee_save_frame_x86.h"
+#include "arch/x86_64/callee_save_frame_x86_64.h"
namespace art {
class ArtMethod;
@@ -67,57 +68,28 @@ class ScopedQuickEntrypointChecks {
bool exit_check_;
};
-static constexpr size_t GetCalleeSaveFrameSize(InstructionSet isa, CalleeSaveType type) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- return arm::ArmCalleeSaveFrameSize(type);
- case InstructionSet::kArm64:
- return arm64::Arm64CalleeSaveFrameSize(type);
- case InstructionSet::kMips:
- return mips::MipsCalleeSaveFrameSize(type);
- case InstructionSet::kMips64:
- return mips64::Mips64CalleeSaveFrameSize(type);
- case InstructionSet::kX86:
- return x86::X86CalleeSaveFrameSize(type);
- case InstructionSet::kX86_64:
- return x86_64::X86_64CalleeSaveFrameSize(type);
- case InstructionSet::kNone:
- LOG(FATAL) << "kNone has no frame size";
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
-}
-
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr PointerSize GetConstExprPointerSize(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- return kArmPointerSize;
- case InstructionSet::kArm64:
- return kArm64PointerSize;
- case InstructionSet::kMips:
- return kMipsPointerSize;
- case InstructionSet::kMips64:
- return kMips64PointerSize;
- case InstructionSet::kX86:
- return kX86PointerSize;
- case InstructionSet::kX86_64:
- return kX86_64PointerSize;
- case InstructionSet::kNone:
- LOG(FATAL) << "kNone has no pointer size";
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
-}
-
-// Note: this specialized statement is sanity-checked in the quick-trampoline gtest.
-static constexpr size_t GetCalleeSaveReturnPcOffset(InstructionSet isa, CalleeSaveType type) {
- return GetCalleeSaveFrameSize(isa, type) - static_cast<size_t>(GetConstExprPointerSize(isa));
-}
+namespace detail_ {
+
+template <InstructionSet>
+struct CSFSelector; // No definition for unspecialized callee save frame selector.
+
+// Note: kThumb2 is never the kRuntimeISA.
+template <>
+struct CSFSelector<InstructionSet::kArm> { using type = arm::ArmCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips> { using type = mips::MipsCalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kMips64> { using type = mips64::Mips64CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
+template <>
+struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
+
+} // namespace detail_
+
+using RuntimeCalleeSaveFrame = detail_::CSFSelector<kRuntimeISA>::type;
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 0a186f4dc5..ff85f477ec 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -26,6 +26,7 @@
#include "dex/dex_instruction-inl.h"
#include "dex/method_reference.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/card_table-inl.h"
#include "imt_conflict_table.h"
@@ -61,7 +62,16 @@ class QuickArgumentVisitor {
static constexpr size_t kBytesStackArgLocation = 4;
// Frame size in bytes of a callee-save frame for RefsAndArgs.
static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_FrameSize =
- GetCalleeSaveFrameSize(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs);
+ RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
+ // Offset of first GPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+ RuntimeCalleeSaveFrame::GetGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+ // Offset of first FPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+ RuntimeCalleeSaveFrame::GetFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
+ // Offset of return address.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset =
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs);
#if defined(__arm__)
// The callee save frame is pointed to by SP.
// | argN | |
@@ -89,12 +99,6 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickGprArgs = 3;
static constexpr size_t kNumQuickFprArgs = 16;
static constexpr bool kGprFprLockstep = false;
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm::ArmCalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm::ArmCalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs); // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm::ArmCalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -127,15 +131,6 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = false;
- // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
- arm64::Arm64CalleeSaveFpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
- // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
- arm64::Arm64CalleeSaveGpr1Offset(CalleeSaveType::kSaveRefsAndArgs);
- // Offset of return address.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
- arm64::Arm64CalleeSaveLrOffset(CalleeSaveType::kSaveRefsAndArgs);
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -179,9 +174,6 @@ class QuickArgumentVisitor {
// passed only in even numbered registers and each
// double occupies two registers.
static constexpr bool kGprFprLockstep = false;
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 8; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 56; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 108; // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -223,9 +215,6 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickFprArgs = 7; // 7 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = true;
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 24; // Offset of first FPR arg (F13).
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80; // Offset of first GPR arg (A1).
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 200; // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -256,9 +245,6 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 4; // 4 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = false;
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 4; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 4 + 4*8; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 28 + 4*8; // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -298,9 +284,6 @@ class QuickArgumentVisitor {
static constexpr size_t kNumQuickGprArgs = 5; // 5 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
static constexpr bool kGprFprLockstep = false;
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 80 + 4*8; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 168 + 4*8; // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
switch (gpr_index) {
case 0: return (4 * GetBytesPerGprSpillLocation(kRuntimeISA));
@@ -347,8 +330,8 @@ class QuickArgumentVisitor {
static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
- CalleeSaveType::kSaveRefsAndArgs);
+ constexpr size_t callee_frame_size =
+ RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -356,16 +339,14 @@ class QuickArgumentVisitor {
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
if (current_code->IsOptimized()) {
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
+ CodeInfo code_info(current_code);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
- inline_info.GetDepth(encoding.inline_info.encoding)-1);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ return inline_info.GetDexPcAtDepth(inline_info.GetDepth()-1);
} else {
- return stack_map.GetDexPc(encoding.stack_map.encoding);
+ return stack_map.GetDexPc();
}
} else {
return current_code->ToDexPc(*caller_sp, outer_pc);
@@ -375,8 +356,8 @@ class QuickArgumentVisitor {
static bool GetInvokeType(ArtMethod** sp, InvokeType* invoke_type, uint32_t* dex_method_index)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA,
- CalleeSaveType::kSaveRefsAndArgs);
+ constexpr size_t callee_frame_size =
+ RuntimeCalleeSaveFrame::GetFrameSize(CalleeSaveType::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
uintptr_t outer_pc = QuickArgumentVisitor::GetCallingPc(sp);
@@ -385,13 +366,12 @@ class QuickArgumentVisitor {
return false;
}
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
+ InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset));
if (invoke.IsValid()) {
- *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
- *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
+ *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType());
+ *dex_method_index = invoke.GetMethodIndex(method_info);
return true;
}
return false;
@@ -400,8 +380,9 @@ class QuickArgumentVisitor {
// For the given quick ref and args quick frame, return the caller's PC.
static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
- uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
- return *reinterpret_cast<uintptr_t*>(lr);
+ uint8_t* return_adress_spill =
+ reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
+ return *reinterpret_cast<uintptr_t*>(return_adress_spill);
}
QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
@@ -1159,8 +1140,8 @@ extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self,
CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
<< self->GetException()->Dump();
// Compute address of return PC and sanity check that it currently holds 0.
- size_t return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA,
- CalleeSaveType::kSaveEverything);
+ constexpr size_t return_pc_offset =
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
return_pc_offset);
CHECK_EQ(*return_pc, 0U);
@@ -1212,10 +1193,10 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
constexpr CalleeSaveType type = CalleeSaveType::kSaveRefsAndArgs;
CHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
- const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
+ constexpr size_t callee_frame_size = RuntimeCalleeSaveFrame::GetFrameSize(type);
auto** caller_sp = reinterpret_cast<ArtMethod**>(
reinterpret_cast<uintptr_t>(sp) + callee_frame_size);
- const size_t callee_return_pc_offset = GetCalleeSaveReturnPcOffset(kRuntimeISA, type);
+ constexpr size_t callee_return_pc_offset = RuntimeCalleeSaveFrame::GetReturnPcOffset(type);
uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(
(reinterpret_cast<uint8_t*>(sp) + callee_return_pc_offset));
ArtMethod* outer_method = *caller_sp;
@@ -1230,12 +1211,11 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
CHECK(current_code != nullptr);
CHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CHECK(stack_map.IsValid());
- uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+ uint32_t dex_pc = stack_map.GetDexPc();
// Log the outer method and its associated dex file and class table pointer which can be used
// to find out if the inlined methods were defined by other dex file(s) or class loader(s).
@@ -1249,20 +1229,17 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
ArtMethod* caller = outer_method;
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- const InlineInfoEncoding& inline_info_encoding = encoding.inline_info.encoding;
- size_t depth = inline_info.GetDepth(inline_info_encoding);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ size_t depth = inline_info.GetDepth();
for (size_t d = 0; d < depth; ++d) {
const char* tag = "";
- dex_pc = inline_info.GetDexPcAtDepth(inline_info_encoding, d);
- if (inline_info.EncodesArtMethodAtDepth(inline_info_encoding, d)) {
+ dex_pc = inline_info.GetDexPcAtDepth(d);
+ if (inline_info.EncodesArtMethodAtDepth(d)) {
tag = "encoded ";
- caller = inline_info.GetArtMethodAtDepth(inline_info_encoding, d);
+ caller = inline_info.GetArtMethodAtDepth(d);
} else {
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info_encoding,
- method_info,
- d);
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, d);
if (dex_pc == static_cast<uint32_t>(-1)) {
tag = "special ";
CHECK_EQ(d + 1u, depth);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index 77b3132bdc..89694e351a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -54,15 +54,6 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
return save_method;
}
- static void CheckFrameSize(InstructionSet isa, CalleeSaveType type, uint32_t save_size)
- NO_THREAD_SAFETY_ANALYSIS {
- ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
- QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(save_method);
- EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
- << type << " core spills=" << std::hex << frame_info.CoreSpillMask() << " fp spills="
- << frame_info.FpSpillMask() << std::dec << " ISA " << isa;
- }
-
static void CheckPCOffset(InstructionSet isa, CalleeSaveType type, size_t pc_offset)
NO_THREAD_SAFETY_ANALYSIS {
ArtMethod* save_method = CreateCalleeSaveMethod(isa, type);
@@ -74,79 +65,36 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
}
};
-// Note: these tests are all runtime tests. They let the Runtime create the corresponding ArtMethod
-// and check against it. Technically we know and expect certain values, but the Runtime code is
-// not constexpr, so we cannot make this compile-time checks (and I want the Runtime code tested).
-
-// This test ensures that kQuickCalleeSaveFrame_RefAndArgs_FrameSize is correct.
-TEST_F(QuickTrampolineEntrypointsTest, FrameSize) {
- // We have to use a define here as the callee_save_frame.h functions are constexpr.
-#define CHECK_FRAME_SIZE(isa) \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveRefsAndArgs, \
- GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsAndArgs)); \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveRefsOnly, \
- GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveRefsOnly)); \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveAllCalleeSaves, \
- GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveAllCalleeSaves)); \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveEverything, \
- GetCalleeSaveFrameSize(isa, CalleeSaveType::kSaveEverything)); \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveEverythingForClinit, \
- GetCalleeSaveFrameSize(isa, \
- CalleeSaveType::kSaveEverythingForClinit)); \
- CheckFrameSize(isa, \
- CalleeSaveType::kSaveEverythingForSuspendCheck, \
- GetCalleeSaveFrameSize( \
- isa, CalleeSaveType::kSaveEverythingForSuspendCheck))
-
- CHECK_FRAME_SIZE(InstructionSet::kArm);
- CHECK_FRAME_SIZE(InstructionSet::kArm64);
- CHECK_FRAME_SIZE(InstructionSet::kMips);
- CHECK_FRAME_SIZE(InstructionSet::kMips64);
- CHECK_FRAME_SIZE(InstructionSet::kX86);
- CHECK_FRAME_SIZE(InstructionSet::kX86_64);
-}
-
-// This test ensures that GetConstExprPointerSize is correct with respect to
-// GetInstructionSetPointerSize.
-TEST_F(QuickTrampolineEntrypointsTest, PointerSize) {
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm),
- GetConstExprPointerSize(InstructionSet::kArm));
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kArm64),
- GetConstExprPointerSize(InstructionSet::kArm64));
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips),
- GetConstExprPointerSize(InstructionSet::kMips));
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kMips64),
- GetConstExprPointerSize(InstructionSet::kMips64));
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86),
- GetConstExprPointerSize(InstructionSet::kX86));
- EXPECT_EQ(GetInstructionSetPointerSize(InstructionSet::kX86_64),
- GetConstExprPointerSize(InstructionSet::kX86_64));
-}
-
// This test ensures that the constexpr specialization of the return PC offset computation in
// GetCalleeSavePCOffset is correct.
TEST_F(QuickTrampolineEntrypointsTest, ReturnPC) {
// Ensure that the computation in callee_save_frame.h correct.
// Note: we can only check against the kRuntimeISA, because the ArtMethod computation uses
// sizeof(void*), which is wrong when the target bitwidth is not the same as the host's.
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsAndArgs));
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveRefsOnly));
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveAllCalleeSaves));
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverything,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverything));
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit,
- GetCalleeSaveReturnPcOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForClinit));
- CheckPCOffset(kRuntimeISA, CalleeSaveType::kSaveEverythingForSuspendCheck,
- GetCalleeSaveReturnPcOffset(kRuntimeISA,
- CalleeSaveType::kSaveEverythingForSuspendCheck));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveRefsAndArgs,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsAndArgs));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveRefsOnly,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveRefsOnly));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveAllCalleeSaves,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveAllCalleeSaves));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveEverything,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveEverythingForClinit,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForClinit));
+ CheckPCOffset(
+ kRuntimeISA,
+ CalleeSaveType::kSaveEverythingForSuspendCheck,
+ RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverythingForSuspendCheck));
}
} // namespace art
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 46630dbeef..464c2b749f 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -90,16 +90,24 @@ DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
+#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
+#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
+#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
#define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
@@ -110,6 +118,8 @@ DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT),
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SIZE 2
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
#define LOCK_WORD_GC_STATE_SHIFT 28
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
#define LOCK_WORD_MARK_BIT_SHIFT 29
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 791ebf09b7..0e429a63f6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -510,7 +510,7 @@ void UnstartedRuntime::UnstartedClassIsAnonymousClass(
result->SetZ(false);
return;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
result->SetZ(false);
return;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 86e69f49a4..5d4b9e8cc9 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -473,11 +473,10 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
return false;
}
- CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(osr_method);
// Find stack map starting at the target dex_pc.
- StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+ StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
if (!stack_map.IsValid()) {
// There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
// hope that the next branch has one.
@@ -494,7 +493,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// We found a stack map, now fill the frame with dex register values from the interpreter's
// shadow frame.
DexRegisterMap vreg_map =
- code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
frame_size = osr_method->GetFrameSizeInBytes();
@@ -516,7 +515,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
} else {
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
DexRegisterLocation::Kind location =
- vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
if (location == DexRegisterLocation::Kind::kNone) {
// Dex register is dead or uninitialized.
continue;
@@ -532,15 +531,14 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
int32_t vreg_value = shadow_frame->GetVReg(vreg);
int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
DCHECK_GT(slot_offset, 0);
(reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
}
}
- native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
+ native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 09d856f89b..ce7fe34b22 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -75,16 +75,18 @@ class LockWord {
// Remaining bits are the recursive lock count.
kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
kMarkBitStateSize,
- // Thin lock bits. Owner in lowest bits.
+ // Thin lock bits. Owner in lowest bits.
kThinLockOwnerShift = 0,
kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
+ kThinLockOwnerMaskShifted = kThinLockOwnerMask << kThinLockOwnerShift,
kThinLockMaxOwner = kThinLockOwnerMask,
// Count in higher bits.
kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
kThinLockCountMask = (1 << kThinLockCountSize) - 1,
kThinLockMaxCount = kThinLockCountMask,
kThinLockCountOne = 1 << kThinLockCountShift, // == 65536 (0x10000)
+ kThinLockCountMaskShifted = kThinLockCountMask << kThinLockCountShift,
// State in the highest bits.
kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
diff --git a/runtime/method_info.h b/runtime/method_info.h
index b00ddc660f..6f74678e42 100644
--- a/runtime/method_info.h
+++ b/runtime/method_info.h
@@ -21,7 +21,7 @@
#include "base/leb128.h"
#include "base/macros.h"
-#include "base/memory_region.h"
+#include "base/bit_memory_region.h"
namespace art {
@@ -35,8 +35,8 @@ class MethodInfo {
explicit MethodInfo(const uint8_t* ptr) {
if (ptr != nullptr) {
num_method_indices_ = DecodeUnsignedLeb128(&ptr);
- region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
- num_method_indices_ * sizeof(MethodIndexType));
+ region_ = BitMemoryRegion(
+ MemoryRegion(const_cast<uint8_t*>(ptr), num_method_indices_ * sizeof(MethodIndexType)));
}
}
@@ -44,7 +44,7 @@ class MethodInfo {
MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
DCHECK(ptr != nullptr);
ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
- region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+ region_ = BitMemoryRegion(MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)));
}
static size_t ComputeSize(size_t num_method_indices) {
@@ -71,7 +71,7 @@ class MethodInfo {
private:
size_t num_method_indices_ = 0u;
- MemoryRegion region_;
+ BitMemoryRegion region_;
};
} // namespace art
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index d31e06cf36..44c819aaf7 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1545,6 +1545,37 @@ MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, AccessMode acces
return GetMethodTypeForAccessMode(self, this, access_mode);
}
+std::string VarHandle::PrettyDescriptorForAccessMode(AccessMode access_mode) {
+ // Effect MethodType::PrettyDescriptor() without first creating a method type first.
+ std::ostringstream oss;
+ oss << '(';
+
+ AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+ ObjPtr<Class> var_type = GetVarType();
+ ObjPtr<Class> ctypes[2] = { GetCoordinateType0(), GetCoordinateType1() };
+ const int32_t ptypes_count = GetNumberOfParameters(access_mode_template, ctypes[0], ctypes[1]);
+ int32_t ptypes_done = 0;
+ for (ObjPtr<Class> ctype : ctypes) {
+ if (!ctype.IsNull()) {
+ if (ptypes_done != 0) {
+ oss << ", ";
+ }
+ oss << ctype->PrettyDescriptor();;
+ ptypes_done++;
+ }
+ }
+ while (ptypes_done != ptypes_count) {
+ if (ptypes_done != 0) {
+ oss << ", ";
+ }
+ oss << var_type->PrettyDescriptor();
+ ptypes_done++;
+ }
+ ObjPtr<Class> rtype = GetReturnType(access_mode_template, var_type);
+ oss << ')' << rtype->PrettyDescriptor();
+ return oss.str();
+}
+
bool VarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
const InstructionOperands* const operands,
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index eb3704ee48..5186d43830 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -124,6 +124,11 @@ class MANAGED VarHandle : public Object {
MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns a string representing the descriptor of the MethodType associated with
+ // this AccessMode.
+ std::string PrettyDescriptorForAccessMode(AccessMode access_mode)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
const InstructionOperands* const operands,
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 68024cd1c2..9f595b1c29 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -648,7 +648,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis)
// Return an empty array instead of a null pointer.
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::ObjectArray<mirror::Object>* empty_array =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
annotation_array_class.Ptr(),
0);
@@ -661,7 +661,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
- mirror::ObjectArray<mirror::Class>* classes = nullptr;
+ ObjPtr<mirror::ObjectArray<mirror::Class>> classes = nullptr;
if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
classes = annotations::GetDeclaredClasses(klass);
}
@@ -738,7 +738,7 @@ static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) {
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return nullptr;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
return nullptr;
}
@@ -763,7 +763,7 @@ static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) {
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return false;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
return false;
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a5d6c9704d..13a8d28267 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,7 +38,7 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- mirror::ObjectArray<mirror::Class>* result_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer.
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2503b3cb44..52e04941c6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -62,7 +62,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
klass->GetProxyThrows()->Get(throws_index);
return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
} else {
- mirror::ObjectArray<mirror::Class>* result_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer
diff --git a/runtime/oat.h b/runtime/oat.h
index 6c683f1541..7b8f71a3f3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: compiler support const-method-handle
- static constexpr uint8_t kOatVersion[] = { '1', '4', '3', '\0' };
+ // Last oat version changed reason: Refactor stackmap encoding.
+ static constexpr uint8_t kOatVersion[] = { '1', '4', '4', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 98238e5600..aed6bc57b3 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -19,6 +19,7 @@
#include "art_method.h"
#include "dex/dex_file_types.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack_map.h"
#include "thread.h"
namespace art {
@@ -42,11 +43,10 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
const void* entry_point = GetEntryPoint();
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (IsOptimized()) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ CodeInfo code_info(this);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding.stack_map.encoding);
+ return stack_map.GetDexPc();
}
} else {
DCHECK(method->IsNative());
@@ -71,18 +71,17 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
DCHECK(!method->IsNative());
DCHECK(IsOptimized());
// Search for the dex-to-pc mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(this);
// All stack maps are stored in the same CodeItem section, safepoint stack
// maps first, then catch stack maps. We use `is_for_catch_handler` to select
// the order of iteration.
StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc)
+ : code_info.GetStackMapForDexPc(dex_pc);
if (stack_map.IsValid()) {
return reinterpret_cast<uintptr_t>(entry_point) +
- stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
+ stack_map.GetNativePcOffset(kRuntimeISA);
}
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index f0966b7bfa..d6762d6bc6 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -22,7 +22,6 @@
#include "base/utils.h"
#include "method_info.h"
#include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
namespace art {
@@ -75,10 +74,6 @@ class PACKED(4) OatQuickMethodHeader {
return code_ - vmap_table_offset_;
}
- CodeInfo GetOptimizedCodeInfo() const {
- return CodeInfo(GetOptimizedCodeInfoPtr());
- }
-
const void* GetOptimizedMethodInfoPtr() const {
DCHECK(IsOptimized());
return reinterpret_cast<const void*>(code_ - method_info_offset_);
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 077aa33925..c555fca23c 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -224,30 +224,29 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
- CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(handler_method_header_);
// Find stack map of the catch block.
- StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+ StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
DCHECK(catch_stack_map.IsValid());
DexRegisterMap catch_vreg_map =
- code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(catch_stack_map, number_of_vregs);
if (!catch_vreg_map.IsValid()) {
return;
}
// Find stack map of the throwing instruction.
StackMap throw_stack_map =
- code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
+ code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset());
DCHECK(throw_stack_map.IsValid());
DexRegisterMap throw_vreg_map =
- code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(throw_stack_map, number_of_vregs);
DCHECK(throw_vreg_map.IsValid());
// Copy values between them.
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
DexRegisterLocation::Kind catch_location =
- catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
if (catch_location == DexRegisterLocation::Kind::kNone) {
continue;
}
@@ -257,8 +256,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
uint32_t vreg_value;
VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg,
number_of_vregs,
- code_info,
- encoding));
+ code_info));
bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
vreg,
vreg_kind,
@@ -271,8 +269,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
// Copy value to the catch phi's stack slot.
int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
@@ -404,20 +401,18 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
const bool* updated_vregs)
REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+ CodeInfo code_info(method_header);
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CodeItemDataAccessor accessor(m->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
DexRegisterMap vreg_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
- code_info.GetInlineInfoOf(stack_map, encoding),
- encoding,
+ code_info.GetInlineInfoOf(stack_map),
number_of_vregs)
- : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
if (!vreg_map.IsValid()) {
return;
@@ -430,7 +425,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
DexRegisterLocation::Kind location =
- vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
static constexpr uint32_t kDeadValue = 0xEBADDE09;
uint32_t value = kDeadValue;
bool is_reference = false;
@@ -439,12 +434,11 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
- if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
+ if (bit < code_info.GetNumberOfStackMaskBits() && stack_mask.LoadBit(bit)) {
is_reference = true;
}
break;
@@ -453,7 +447,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case DexRegisterLocation::Kind::kInRegisterHigh:
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
- uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+ uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info);
bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
CHECK(result);
if (location == DexRegisterLocation::Kind::kInRegister) {
@@ -464,7 +458,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
break;
}
case DexRegisterLocation::Kind::kConstant: {
- value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+ value = vreg_map.GetConstant(vreg, number_of_vregs, code_info);
if (value == 0) {
// Make it a reference for extra safety.
is_reference = true;
@@ -479,8 +473,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
<< "Unexpected location kind "
<< vreg_map.GetLocationInternalKind(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
UNREACHABLE();
}
}
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 4584351a6a..374591eeb0 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -19,8 +19,10 @@
#include "runtime.h"
+#include "arch/instruction_set.h"
#include "art_method.h"
#include "base/callee_save_type.h"
+#include "entrypoints/quick/callee_save_frame.h"
#include "gc_root-inl.h"
#include "obj_ptr-inl.h"
@@ -38,21 +40,22 @@ inline mirror::Object* Runtime::GetClearedJniWeakGlobal() {
inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(ArtMethod* method) {
DCHECK(method != nullptr);
+ DCHECK_EQ(instruction_set_, kRuntimeISA);
// Cannot be imt-conflict-method or resolution-method.
DCHECK_NE(method, GetImtConflictMethod());
DCHECK_NE(method, GetResolutionMethod());
// Don't use GetCalleeSaveMethod(), some tests don't set all callee save methods.
if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsAndArgs)) {
- return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
} else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveAllCalleeSaves)) {
- return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveAllCalleeSaves);
} else if (method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveRefsOnly)) {
- return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsOnly);
} else {
DCHECK(method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverything) ||
method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForClinit) ||
method == GetCalleeSaveMethodUnchecked(CalleeSaveType::kSaveEverythingForSuspendCheck));
- return GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveEverything);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveEverything);
}
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 4142cb0c95..14027493d8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -39,18 +39,12 @@
#include "android-base/strings.h"
#include "aot_class_linker.h"
-#include "arch/arm/quick_method_frame_info_arm.h"
#include "arch/arm/registers_arm.h"
-#include "arch/arm64/quick_method_frame_info_arm64.h"
#include "arch/arm64/registers_arm64.h"
#include "arch/instruction_set_features.h"
-#include "arch/mips/quick_method_frame_info_mips.h"
#include "arch/mips/registers_mips.h"
-#include "arch/mips64/quick_method_frame_info_mips64.h"
#include "arch/mips64/registers_mips64.h"
-#include "arch/x86/quick_method_frame_info_x86.h"
#include "arch/x86/registers_x86.h"
-#include "arch/x86_64/quick_method_frame_info_x86_64.h"
#include "arch/x86_64/registers_x86_64.h"
#include "art_field-inl.h"
#include "art_method-inl.h"
@@ -2203,38 +2197,21 @@ void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
void Runtime::SetInstructionSet(InstructionSet instruction_set) {
instruction_set_ = instruction_set;
- if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
- }
- } else if (instruction_set_ == InstructionSet::kMips) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
- }
- } else if (instruction_set_ == InstructionSet::kMips64) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
- }
- } else if (instruction_set_ == InstructionSet::kX86) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
- }
- } else if (instruction_set_ == InstructionSet::kX86_64) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
- }
- } else if (instruction_set_ == InstructionSet::kArm64) {
- for (int i = 0; i != kCalleeSaveSize; ++i) {
- CalleeSaveType type = static_cast<CalleeSaveType>(i);
- callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
- }
- } else {
- UNIMPLEMENTED(FATAL) << instruction_set_;
+ switch (instruction_set) {
+ case InstructionSet::kThumb2:
+ // kThumb2 is the same as kArm, use the canonical value.
+ instruction_set_ = InstructionSet::kArm;
+ break;
+ case InstructionSet::kArm:
+ case InstructionSet::kArm64:
+ case InstructionSet::kMips:
+ case InstructionSet::kMips64:
+ case InstructionSet::kX86:
+ case InstructionSet::kX86_64:
+ break;
+ default:
+ UNIMPLEMENTED(FATAL) << instruction_set_;
+ UNREACHABLE();
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 953acbb948..10f72e7c5b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -399,10 +399,6 @@ class Runtime {
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
- QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
- return callee_save_method_frame_infos_[static_cast<size_t>(type)];
- }
-
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -831,7 +827,6 @@ class Runtime {
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;
- QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 229238e0f7..7d1cb5cc4b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -25,6 +25,7 @@
#include "base/hex_dump.h"
#include "dex/dex_file_types.h"
#include "entrypoints/entrypoint_utils-inl.h"
+#include "entrypoints/quick/callee_save_frame.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -75,15 +76,14 @@ StackVisitor::StackVisitor(Thread* thread,
}
}
-static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header,
+static InlineInfo GetCurrentInlineInfo(CodeInfo& code_info,
+ const OatQuickMethodHeader* method_header,
uintptr_t cur_quick_frame_pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- return code_info.GetInlineInfoOf(stack_map, encoding);
+ return code_info.GetInlineInfoOf(stack_map);
}
ArtMethod* StackVisitor::GetMethod() const {
@@ -92,16 +92,16 @@ ArtMethod* StackVisitor::GetMethod() const {
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
- InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(),
- cur_quick_frame_pc_);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+ CodeInfo code_info(method_header);
+ InlineInfo inline_info = GetCurrentInlineInfo(code_info,
+ method_header,
+ cur_quick_frame_pc_);
MethodInfo method_info = method_header->GetOptimizedMethodInfo();
DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
return GetResolvedMethod(*GetCurrentQuickFrame(),
method_info,
inline_info,
- encoding.inline_info.encoding,
depth_in_stack_map);
} else {
return *cur_quick_frame_;
@@ -115,11 +115,11 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
- size_t depth_in_stack_map = current_inlining_depth_ - 1;
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
- return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_).
- GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map);
+ CodeInfo code_info(method_header);
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentInlineInfo(code_info, method_header, cur_quick_frame_pc_).
+ GetDexPcAtDepth(depth_in_stack_map);
} else if (cur_oat_quick_method_header_ == nullptr) {
return dex::kDexNoIndex;
} else {
@@ -229,32 +229,29 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
uint16_t number_of_dex_registers = accessor.RegistersSize();
DCHECK_LT(vreg, number_of_dex_registers);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(method_header);
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
DexRegisterMap dex_register_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
- code_info.GetInlineInfoOf(stack_map, encoding),
- encoding,
+ code_info.GetInlineInfoOf(stack_map),
number_of_dex_registers)
- : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
if (!dex_register_map.IsValid()) {
return false;
}
DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
@@ -264,11 +261,11 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
uint32_t reg =
- dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
+ dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
+ *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
return true;
case DexRegisterLocation::Kind::kNone:
return false;
@@ -277,8 +274,7 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
<< "Unexpected location kind "
<< dex_register_map.GetLocationInternalKind(vreg,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
UNREACHABLE();
}
}
@@ -718,7 +714,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
Runtime* runtime = Runtime::Current();
if (method->IsAbstract()) {
- return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
}
// This goes before IsProxyMethod since runtime methods have a null declaring class.
@@ -732,7 +728,7 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
// compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
DCHECK(!method->IsDirect() && !method->IsConstructor())
<< "Constructors of proxy classes must have a OatQuickMethodHeader";
- return runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+ return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
}
// The only remaining case is if the method is native and uses the generic JNI stub,
@@ -751,8 +747,8 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
// Generic JNI frame.
uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
size_t scope_size = HandleScope::SizeOf(handle_refs);
- QuickMethodFrameInfo callee_info =
- runtime->GetCalleeSaveMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
+ constexpr QuickMethodFrameInfo callee_info =
+ RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
// Callee saves + handle scope + method ref + alignment
// Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
@@ -830,15 +826,14 @@ void StackVisitor::WalkStack(bool include_transitions) {
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
&& (cur_oat_quick_method_header_ != nullptr)
&& cur_oat_quick_method_header_->IsOptimized()) {
- CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(cur_oat_quick_method_header_);
uint32_t native_pc_offset =
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
DCHECK_EQ(current_inlining_depth_, 0u);
- for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
+ for (current_inlining_depth_ = inline_info.GetDepth();
current_inlining_depth_ != 0;
--current_inlining_depth_) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9c7b6875cf..2b7e8dd748 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -25,8 +25,6 @@
namespace art {
constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
-constexpr uint32_t StackMap::kNoDexRegisterMap;
-constexpr uint32_t StackMap::kNoInlineInfo;
std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind) {
using Kind = DexRegisterLocation::Kind;
@@ -56,27 +54,25 @@ std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind&
DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog(enc);
+ code_info.GetDexRegisterLocationCatalog();
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries(enc));
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
}
DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog(enc);
+ code_info.GetDexRegisterLocationCatalog();
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries(enc));
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
}
@@ -90,27 +86,28 @@ static void DumpRegisterMapping(std::ostream& os,
<< " (" << location.GetValue() << ")" << suffix << '\n';
}
-void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void StackMap::DumpEncoding(const BitTable<6>& table,
+ VariableIndentationOutputStream* vios) {
vios->Stream()
<< "StackMapEncoding"
- << " (native_pc_bit_offset=" << static_cast<uint32_t>(kNativePcBitOffset)
- << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
- << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
- << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
- << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_)
- << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_)
- << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+ << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset)
+ << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+ << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
+ << ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex)
+ << ", RegisterMaskIndexBits=" << table.NumColumnBits(kRegisterMaskIndex)
+ << ", StackMaskIndexBits=" << table.NumColumnBits(kStackMaskIndex)
<< ")\n";
}
-void InlineInfoEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void InlineInfo::DumpEncoding(const BitTable<5>& table,
+ VariableIndentationOutputStream* vios) {
vios->Stream()
<< "InlineInfoEncoding"
- << " (method_index_bit_offset=" << static_cast<uint32_t>(kMethodIndexBitOffset)
- << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
- << ", extra_data_bit_offset=" << static_cast<uint32_t>(extra_data_bit_offset_)
- << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
- << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+ << " (IsLastBits=" << table.NumColumnBits(kIsLast)
+ << ", MethodIndexIdxBits=" << table.NumColumnBits(kMethodIndexIdx)
+ << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+ << ", ExtraDataBits=" << table.NumColumnBits(kExtraData)
+ << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
<< ")\n";
}
@@ -120,26 +117,24 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
bool dump_stack_maps,
InstructionSet instruction_set,
const MethodInfo& method_info) const {
- CodeInfoEncoding encoding = ExtractEncoding();
- size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
+ size_t number_of_stack_maps = GetNumberOfStackMaps();
vios->Stream()
<< "Optimized CodeInfo (number_of_dex_registers=" << number_of_dex_registers
<< ", number_of_stack_maps=" << number_of_stack_maps
<< ")\n";
ScopedIndentation indent1(vios);
- encoding.stack_map.encoding.Dump(vios);
- if (HasInlineInfo(encoding)) {
- encoding.inline_info.encoding.Dump(vios);
+ StackMap::DumpEncoding(stack_maps_, vios);
+ if (HasInlineInfo()) {
+ InlineInfo::DumpEncoding(inline_infos_, vios);
}
// Display the Dex register location catalog.
- GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
+ GetDexRegisterLocationCatalog().Dump(vios, *this);
// Display stack maps along with (live) Dex register maps.
if (dump_stack_maps) {
for (size_t i = 0; i < number_of_stack_maps; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
+ StackMap stack_map = GetStackMapAt(i);
stack_map.Dump(vios,
*this,
- encoding,
method_info,
code_offset,
number_of_dex_registers,
@@ -153,9 +148,8 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info) {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
- size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+ size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize();
vios->Stream()
<< "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
<< ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
@@ -169,8 +163,7 @@ void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
// TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
if (IsDexRegisterLive(j)) {
@@ -178,8 +171,7 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
j, number_of_dex_registers, number_of_location_catalog_entries);
DexRegisterLocation location = GetDexRegisterLocation(j,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
ScopedIndentation indent1(vios);
DumpRegisterMapping(
vios->Stream(), j, location, "v",
@@ -190,38 +182,35 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const CodeInfoEncoding& encoding,
const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
const std::string& header_suffix) const {
- StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
- const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
+ const uint32_t pc_offset = GetNativePcOffset(instruction_set);
vios->Stream()
<< "StackMap" << header_suffix
<< std::hex
<< " [native_pc=0x" << code_offset + pc_offset << "]"
- << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
- << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
+ << " (dex_pc=0x" << GetDexPc()
<< ", native_pc_offset=0x" << pc_offset
- << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
- << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
- << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
+ << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset()
+ << ", inline_info_offset=0x" << GetInlineInfoIndex()
+ << ", register_mask=0x" << code_info.GetRegisterMaskOf(*this)
<< std::dec
<< ", stack_mask=0b";
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
- for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(*this);
+ for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(); i < e; ++i) {
vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
vios->Stream() << ")\n";
- if (HasDexRegisterMap(stack_map_encoding)) {
+ if (HasDexRegisterMap()) {
DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
- *this, encoding, number_of_dex_registers);
+ *this, number_of_dex_registers);
dex_register_map.Dump(vios, code_info, number_of_dex_registers);
}
- if (HasInlineInfo(stack_map_encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
+ if (HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(*this);
// We do not know the length of the dex register maps of inlined frames
// at this level, so we just pass null to `InlineInfo::Dump` to tell
// it not to look at these maps.
@@ -233,29 +222,27 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const MethodInfo& method_info,
uint16_t number_of_dex_registers[]) const {
- InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
vios->Stream() << "InlineInfo with depth "
- << static_cast<uint32_t>(GetDepth(inline_info_encoding))
+ << static_cast<uint32_t>(GetDepth())
<< "\n";
- for (size_t i = 0; i < GetDepth(inline_info_encoding); ++i) {
+ for (size_t i = 0; i < GetDepth(); ++i) {
vios->Stream()
<< " At depth " << i
<< std::hex
- << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i);
- if (EncodesArtMethodAtDepth(inline_info_encoding, i)) {
+ << " (dex_pc=0x" << GetDexPcAtDepth(i);
+ if (EncodesArtMethodAtDepth(i)) {
ScopedObjectAccess soa(Thread::Current());
- vios->Stream() << ", method=" << GetArtMethodAtDepth(inline_info_encoding, i)->PrettyMethod();
+ vios->Stream() << ", method=" << GetArtMethodAtDepth(i)->PrettyMethod();
} else {
vios->Stream()
<< std::dec
- << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
+ << ", method_index=" << GetMethodIndexAtDepth(method_info, i);
}
vios->Stream() << ")\n";
- if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
+ code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
ScopedIndentation indent1(vios);
dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]);
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 38397643b6..91cecf0690 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -21,12 +21,14 @@
#include "arch/code_offset.h"
#include "base/bit_memory_region.h"
+#include "base/bit_table.h"
#include "base/bit_utils.h"
#include "base/bit_vector.h"
#include "base/leb128.h"
#include "base/memory_region.h"
#include "dex/dex_file_types.h"
#include "method_info.h"
+#include "oat_quick_method_header.h"
namespace art {
@@ -37,13 +39,8 @@ class VariableIndentationOutputStream;
// (signed) values.
static constexpr ssize_t kFrameSlotSize = 4;
-// Size of Dex virtual registers.
-static constexpr size_t kVRegSize = 4;
-
class ArtMethod;
class CodeInfo;
-class StackMapEncoding;
-struct CodeInfoEncoding;
/**
* Classes in the following file are wrapper on stack map information backed
@@ -452,35 +449,31 @@ class DexRegisterMap {
explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
DexRegisterMap() {}
- bool IsValid() const { return region_.pointer() != nullptr; }
+ bool IsValid() const { return region_.IsValid(); }
// Get the surface kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
return DexRegisterLocation::ConvertToSurfaceKind(
- GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
+ GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
}
// Get the internal kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const;
+ const CodeInfo& code_info) const;
// Get the Dex register location `dex_register_number`.
DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const;
+ const CodeInfo& code_info) const;
int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
// GetDexRegisterLocation returns the offset in bytes.
return location.GetValue();
@@ -488,20 +481,18 @@ class DexRegisterMap {
int32_t GetConstant(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
return location.GetValue();
}
int32_t GetMachineRegister(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
@@ -627,7 +618,7 @@ class DexRegisterMap {
// Return the size of the DexRegisterMap object, in bytes.
size_t Size() const {
- return region_.size();
+ return BitsToBytesRoundUp(region_.size_in_bits());
}
void Dump(VariableIndentationOutputStream* vios,
@@ -650,143 +641,12 @@ class DexRegisterMap {
static constexpr int kFixedSize = 0;
- MemoryRegion region_;
+ BitMemoryRegion region_;
friend class CodeInfo;
friend class StackMapStream;
};
-// Represents bit range of bit-packed integer field.
-// We reuse the idea from ULEB128p1 to support encoding of -1 (aka 0xFFFFFFFF).
-// If min_value is set to -1, we implicitly subtract one from any loaded value,
-// and add one to any stored value. This is generalized to any negative values.
-// In other words, min_value acts as a base and the stored value is added to it.
-struct FieldEncoding {
- FieldEncoding(size_t start_offset, size_t end_offset, int32_t min_value = 0)
- : start_offset_(start_offset), end_offset_(end_offset), min_value_(min_value) {
- DCHECK_LE(start_offset_, end_offset_);
- DCHECK_LE(BitSize(), 32u);
- }
-
- ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
-
- template <typename Region>
- ALWAYS_INLINE int32_t Load(const Region& region) const {
- DCHECK_LE(end_offset_, region.size_in_bits());
- return static_cast<int32_t>(region.LoadBits(start_offset_, BitSize())) + min_value_;
- }
-
- template <typename Region>
- ALWAYS_INLINE void Store(Region region, int32_t value) const {
- region.StoreBits(start_offset_, value - min_value_, BitSize());
- DCHECK_EQ(Load(region), value);
- }
-
- private:
- size_t start_offset_;
- size_t end_offset_;
- int32_t min_value_;
-};
-
-class StackMapEncoding {
- public:
- StackMapEncoding()
- : dex_pc_bit_offset_(0),
- dex_register_map_bit_offset_(0),
- inline_info_bit_offset_(0),
- register_mask_index_bit_offset_(0),
- stack_mask_index_bit_offset_(0),
- total_bit_size_(0) {}
-
- // Set stack map bit layout based on given sizes.
- // Returns the size of stack map in bits.
- size_t SetFromSizes(size_t native_pc_max,
- size_t dex_pc_max,
- size_t dex_register_map_size,
- size_t number_of_inline_info,
- size_t number_of_register_masks,
- size_t number_of_stack_masks) {
- total_bit_size_ = 0;
- DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(native_pc_max);
-
- dex_pc_bit_offset_ = total_bit_size_;
- // Note: We're not encoding the dex pc if there is none. That's the case
- // for an intrinsified native method, such as String.charAt().
- if (dex_pc_max != dex::kDexNoIndex) {
- total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
- }
-
- // We also need +1 for kNoDexRegisterMap, but since the size is strictly
- // greater than any offset we might try to encode, we already implicitly have it.
- dex_register_map_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-
- // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
- // greater than the offset we might try to encode, we already implicitly have it.
- // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
- inline_info_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
-
- register_mask_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
-
- stack_mask_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_stack_masks);
-
- return total_bit_size_;
- }
-
- ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
- return FieldEncoding(kNativePcBitOffset, dex_pc_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
- return FieldEncoding(dex_pc_bit_offset_, dex_register_map_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
- return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
- return FieldEncoding(inline_info_bit_offset_,
- register_mask_index_bit_offset_,
- -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const {
- return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const {
- return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_);
- }
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- // Encode the encoding into the vector.
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- // Decode the encoding from a pointer, updates the pointer.
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- void Dump(VariableIndentationOutputStream* vios) const;
-
- private:
- static constexpr size_t kNativePcBitOffset = 0;
- uint8_t dex_pc_bit_offset_;
- uint8_t dex_register_map_bit_offset_;
- uint8_t inline_info_bit_offset_;
- uint8_t register_mask_index_bit_offset_;
- uint8_t stack_mask_index_bit_offset_;
- uint8_t total_bit_size_;
-};
-
/**
* A Stack Map holds compilation information for a specific PC necessary for:
* - Mapping it to a dex PC,
@@ -794,248 +654,101 @@ class StackMapEncoding {
* - Knowing which registers hold objects,
* - Knowing the inlining information,
* - Knowing the values of dex registers.
- *
- * The information is of the form:
- *
- * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
- * stack_mask_index].
*/
-class StackMap {
+class StackMap : public BitTable<6>::Accessor {
public:
- StackMap() {}
- explicit StackMap(BitMemoryRegion region) : region_(region) {}
-
- ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
-
- ALWAYS_INLINE uint32_t GetDexPc(const StackMapEncoding& encoding) const {
- return encoding.GetDexPcEncoding().Load(region_);
- }
+ enum Field {
+ kNativePcOffset,
+ kDexPc,
+ kDexRegisterMapOffset,
+ kInlineInfoIndex,
+ kRegisterMaskIndex,
+ kStackMaskIndex,
+ kCount,
+ };
- ALWAYS_INLINE void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
- encoding.GetDexPcEncoding().Store(region_, dex_pc);
- }
+ StackMap() : BitTable<kCount>::Accessor(nullptr, -1) {}
+ StackMap(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding,
- InstructionSet instruction_set) const {
- CodeOffset offset(
- CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+ ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+ CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
return offset.Uint32Value(instruction_set);
}
- ALWAYS_INLINE void SetNativePcCodeOffset(const StackMapEncoding& encoding,
- CodeOffset native_pc_offset) {
- encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
- }
-
- ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
- return encoding.GetDexRegisterMapEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
- encoding.GetDexRegisterMapEncoding().Store(region_, offset);
- }
-
- ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
- return encoding.GetInlineInfoEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
- encoding.GetInlineInfoEncoding().Store(region_, index);
- }
-
- ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
- return encoding.GetRegisterMaskIndexEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
- encoding.GetRegisterMaskIndexEncoding().Store(region_, mask);
- }
+ uint32_t GetDexPc() const { return Get<kDexPc>(); }
- ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const {
- return encoding.GetStackMaskIndexEncoding().Load(region_);
- }
+ uint32_t GetDexRegisterMapOffset() const { return Get<kDexRegisterMapOffset>(); }
+ bool HasDexRegisterMap() const { return GetDexRegisterMapOffset() != kNoValue; }
- ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
- encoding.GetStackMaskIndexEncoding().Store(region_, mask);
- }
+ uint32_t GetInlineInfoIndex() const { return Get<kInlineInfoIndex>(); }
+ bool HasInlineInfo() const { return GetInlineInfoIndex() != kNoValue; }
- ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
- return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
- }
+ uint32_t GetRegisterMaskIndex() const { return Get<kRegisterMaskIndex>(); }
- ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
- return GetInlineInfoIndex(encoding) != kNoInlineInfo;
- }
-
- ALWAYS_INLINE bool Equals(const StackMap& other) const {
- return region_.pointer() == other.region_.pointer() &&
- region_.size() == other.region_.size() &&
- region_.BitOffset() == other.region_.BitOffset();
- }
+ uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); }
+ static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios);
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const CodeInfoEncoding& encoding,
const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
const std::string& header_suffix = "") const;
-
- // Special (invalid) offset for the DexRegisterMapOffset field meaning
- // that there is no Dex register map for this stack map.
- static constexpr uint32_t kNoDexRegisterMap = -1;
-
- // Special (invalid) offset for the InlineDescriptorOffset field meaning
- // that there is no inline info for this stack map.
- static constexpr uint32_t kNoInlineInfo = -1;
-
- private:
- static constexpr int kFixedSize = 0;
-
- BitMemoryRegion region_;
-
- friend class StackMapStream;
-};
-
-class InlineInfoEncoding {
- public:
- void SetFromSizes(size_t method_index_idx_max,
- size_t dex_pc_max,
- size_t extra_data_max,
- size_t dex_register_map_size) {
- total_bit_size_ = kMethodIndexBitOffset;
- total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
-
- dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- // Note: We're not encoding the dex pc if there is none. That's the case
- // for an intrinsified native method, such as String.charAt().
- if (dex_pc_max != dex::kDexNoIndex) {
- total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
- }
-
- extra_data_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(extra_data_max);
-
- // We also need +1 for kNoDexRegisterMap, but since the size is strictly
- // greater than any offset we might try to encode, we already implicitly have it.
- dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
- }
-
- ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
- return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
- return FieldEncoding(dex_pc_bit_offset_, extra_data_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetExtraDataEncoding() const {
- return FieldEncoding(extra_data_bit_offset_, dex_register_map_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
- return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
- }
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- void Dump(VariableIndentationOutputStream* vios) const;
-
- // Encode the encoding into the vector.
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- // Decode the encoding from a pointer, updates the pointer.
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- private:
- static constexpr uint8_t kIsLastBitOffset = 0;
- static constexpr uint8_t kMethodIndexBitOffset = 1;
- uint8_t dex_pc_bit_offset_;
- uint8_t extra_data_bit_offset_;
- uint8_t dex_register_map_bit_offset_;
- uint8_t total_bit_size_;
};
/**
- * Inline information for a specific PC. The information is of the form:
- *
- * [is_last,
- * method_index (or ArtMethod high bits),
- * dex_pc,
- * extra_data (ArtMethod low bits or 1),
- * dex_register_map_offset]+.
+ * Inline information for a specific PC.
+ * The row referenced from the StackMap holds information at depth 0.
+ * Following rows hold information for further depths.
*/
-class InlineInfo {
+class InlineInfo : public BitTable<5>::Accessor {
public:
- explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
-
- ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
- size_t depth = 0;
- while (!GetRegionAtDepth(encoding, depth++).LoadBit(0)) { } // Check is_last bit.
- return depth;
- }
-
- ALWAYS_INLINE void SetDepth(const InlineInfoEncoding& encoding, uint32_t depth) {
- DCHECK_GT(depth, 0u);
- for (size_t d = 0; d < depth; ++d) {
- GetRegionAtDepth(encoding, d).StoreBit(0, d == depth - 1); // Set is_last bit.
- }
- }
+ enum Field {
+ kIsLast, // Determines if there are further rows for further depths.
+ kMethodIndexIdx, // Method index or ArtMethod high bits.
+ kDexPc,
+ kExtraData, // ArtMethod low bits or 1.
+ kDexRegisterMapOffset,
+ kCount,
+ };
+ static constexpr uint32_t kLast = -1;
+ static constexpr uint32_t kMore = 0;
- ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
- return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
- }
+ InlineInfo(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t index) {
- encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+ ALWAYS_INLINE InlineInfo AtDepth(uint32_t depth) const {
+ return InlineInfo(table_, this->row_ + depth);
}
-
- ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
- const MethodInfo& method_info,
- uint32_t depth) const {
- return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
+ uint32_t GetDepth() const {
+ size_t depth = 0;
+ while (AtDepth(depth++).Get<kIsLast>() == kMore) { }
+ return depth;
}
- ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return encoding.GetDexPcEncoding().Load(GetRegionAtDepth(encoding, depth));
+ uint32_t GetMethodIndexIdxAtDepth(uint32_t depth) const {
+ DCHECK(!EncodesArtMethodAtDepth(depth));
+ return AtDepth(depth).Get<kMethodIndexIdx>();
}
- ALWAYS_INLINE void SetDexPcAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t dex_pc) {
- encoding.GetDexPcEncoding().Store(GetRegionAtDepth(encoding, depth), dex_pc);
+ uint32_t GetMethodIndexAtDepth(const MethodInfo& method_info, uint32_t depth) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(depth));
}
- ALWAYS_INLINE bool EncodesArtMethodAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return (encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)) & 1) == 0;
+ uint32_t GetDexPcAtDepth(uint32_t depth) const {
+ return AtDepth(depth).Get<kDexPc>();
}
- ALWAYS_INLINE void SetExtraDataAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t extra_data) {
- encoding.GetExtraDataEncoding().Store(GetRegionAtDepth(encoding, depth), extra_data);
+ bool EncodesArtMethodAtDepth(uint32_t depth) const {
+ return (AtDepth(depth).Get<kExtraData>() & 1) == 0;
}
- ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
- uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
- GetRegionAtDepth(encoding, depth));
+ ArtMethod* GetArtMethodAtDepth(uint32_t depth) const {
+ uint32_t low_bits = AtDepth(depth).Get<kExtraData>();
+ uint32_t high_bits = AtDepth(depth).Get<kMethodIndexIdx>();
if (high_bits == 0) {
return reinterpret_cast<ArtMethod*>(low_bits);
} else {
@@ -1045,411 +758,132 @@ class InlineInfo {
}
}
- ALWAYS_INLINE uint32_t GetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return encoding.GetDexRegisterMapEncoding().Load(GetRegionAtDepth(encoding, depth));
- }
-
- ALWAYS_INLINE void SetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t offset) {
- encoding.GetDexRegisterMapEncoding().Store(GetRegionAtDepth(encoding, depth), offset);
+ uint32_t GetDexRegisterMapOffsetAtDepth(uint32_t depth) const {
+ return AtDepth(depth).Get<kDexRegisterMapOffset>();
}
- ALWAYS_INLINE bool HasDexRegisterMapAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return GetDexRegisterMapOffsetAtDepth(encoding, depth) != StackMap::kNoDexRegisterMap;
+ bool HasDexRegisterMapAtDepth(uint32_t depth) const {
+ return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoValue;
}
+ static void DumpEncoding(const BitTable<5>& table, VariableIndentationOutputStream* vios);
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& info,
const MethodInfo& method_info,
uint16_t* number_of_dex_registers) const;
-
- private:
- ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- size_t entry_size = encoding.BitSize();
- DCHECK_GT(entry_size, 0u);
- return region_.Subregion(depth * entry_size, entry_size);
- }
-
- BitMemoryRegion region_;
};
-// Bit sized region encoding, may be more than 255 bits.
-class BitRegionEncoding {
+class InvokeInfo : public BitTable<3>::Accessor {
public:
- uint32_t num_bits = 0;
-
- ALWAYS_INLINE size_t BitSize() const {
- return num_bits;
- }
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_bits); // Use leb in case num_bits is greater than 255.
- }
-
- void Decode(const uint8_t** ptr) {
- num_bits = DecodeUnsignedLeb128(ptr);
- }
-};
-
-// A table of bit sized encodings.
-template <typename Encoding>
-struct BitEncodingTable {
- static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
- // How the encoding is laid out (serialized).
- Encoding encoding;
-
- // Number of entries in the table (serialized).
- size_t num_entries;
-
- // Bit offset for the base of the table (computed).
- size_t bit_offset = kInvalidOffset;
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_entries);
- encoding.Encode(dest);
- }
-
- ALWAYS_INLINE void Decode(const uint8_t** ptr) {
- num_entries = DecodeUnsignedLeb128(ptr);
- encoding.Decode(ptr);
- }
-
- // Set the bit offset in the table and adds the space used by the table to offset.
- void UpdateBitOffset(size_t* offset) {
- DCHECK(offset != nullptr);
- bit_offset = *offset;
- *offset += encoding.BitSize() * num_entries;
- }
-
- // Return the bit region for the map at index i.
- ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
- DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
- DCHECK_LT(index, num_entries);
- const size_t map_size = encoding.BitSize();
- return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
- }
-};
-
-// A byte sized table of possible variable sized encodings.
-struct ByteSizedTable {
- static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-
- // Number of entries in the table (serialized).
- size_t num_entries = 0;
-
- // Number of bytes of the table (serialized).
- size_t num_bytes;
-
- // Bit offset for the base of the table (computed).
- size_t byte_offset = kInvalidOffset;
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_entries);
- EncodeUnsignedLeb128(dest, num_bytes);
- }
-
- ALWAYS_INLINE void Decode(const uint8_t** ptr) {
- num_entries = DecodeUnsignedLeb128(ptr);
- num_bytes = DecodeUnsignedLeb128(ptr);
- }
-
- // Set the bit offset of the table. Adds the total bit size of the table to offset.
- void UpdateBitOffset(size_t* offset) {
- DCHECK(offset != nullptr);
- DCHECK_ALIGNED(*offset, kBitsPerByte);
- byte_offset = *offset / kBitsPerByte;
- *offset += num_bytes * kBitsPerByte;
- }
-};
-
-// Format is [native pc, invoke type, method index].
-class InvokeInfoEncoding {
- public:
- void SetFromSizes(size_t native_pc_max,
- size_t invoke_type_max,
- size_t method_index_max) {
- total_bit_size_ = 0;
- DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(native_pc_max);
- invoke_type_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(invoke_type_max);
- method_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(method_index_max);
- }
-
- ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
- return FieldEncoding(kNativePcBitOffset, invoke_type_bit_offset_);
- }
-
- ALWAYS_INLINE FieldEncoding GetInvokeTypeEncoding() const {
- return FieldEncoding(invoke_type_bit_offset_, method_index_bit_offset_);
- }
-
- ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
- return FieldEncoding(method_index_bit_offset_, total_bit_size_);
- }
-
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(InvokeInfoEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const InvokeInfoEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- private:
- static constexpr uint8_t kNativePcBitOffset = 0;
- uint8_t invoke_type_bit_offset_;
- uint8_t method_index_bit_offset_;
- uint8_t total_bit_size_;
-};
+ enum Field {
+ kNativePcOffset,
+ kInvokeType,
+ kMethodIndexIdx,
+ kCount,
+ };
-class InvokeInfo {
- public:
- explicit InvokeInfo(BitMemoryRegion region) : region_(region) {}
+ InvokeInfo(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE uint32_t GetNativePcOffset(const InvokeInfoEncoding& encoding,
- InstructionSet instruction_set) const {
- CodeOffset offset(
- CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+ ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+ CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
return offset.Uint32Value(instruction_set);
}
- ALWAYS_INLINE void SetNativePcCodeOffset(const InvokeInfoEncoding& encoding,
- CodeOffset native_pc_offset) {
- encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
- }
-
- ALWAYS_INLINE uint32_t GetInvokeType(const InvokeInfoEncoding& encoding) const {
- return encoding.GetInvokeTypeEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetInvokeType(const InvokeInfoEncoding& encoding, uint32_t invoke_type) {
- encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
- }
-
- ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
- return encoding.GetMethodIndexEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
- uint32_t method_index_idx) {
- encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
- }
-
- ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
- MethodInfo method_info) const {
- return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
- }
-
- bool IsValid() const { return region_.pointer() != nullptr; }
-
- private:
- BitMemoryRegion region_;
-};
-
-// Most of the fields are encoded as ULEB128 to save space.
-struct CodeInfoEncoding {
- using SizeType = uint32_t;
-
- static constexpr SizeType kInvalidSize = std::numeric_limits<SizeType>::max();
-
- // Byte sized tables go first to avoid unnecessary alignment bits.
- ByteSizedTable dex_register_map;
- ByteSizedTable location_catalog;
- BitEncodingTable<StackMapEncoding> stack_map;
- BitEncodingTable<BitRegionEncoding> register_mask;
- BitEncodingTable<BitRegionEncoding> stack_mask;
- BitEncodingTable<InvokeInfoEncoding> invoke_info;
- BitEncodingTable<InlineInfoEncoding> inline_info;
-
- CodeInfoEncoding() {}
-
- explicit CodeInfoEncoding(const void* data) {
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
- dex_register_map.Decode(&ptr);
- location_catalog.Decode(&ptr);
- stack_map.Decode(&ptr);
- register_mask.Decode(&ptr);
- stack_mask.Decode(&ptr);
- invoke_info.Decode(&ptr);
- if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
- inline_info.Decode(&ptr);
- } else {
- inline_info = BitEncodingTable<InlineInfoEncoding>();
- }
- cache_header_size =
- dchecked_integral_cast<SizeType>(ptr - reinterpret_cast<const uint8_t*>(data));
- ComputeTableOffsets();
- }
-
- // Compress is not const since it calculates cache_header_size. This is used by PrepareForFillIn.
- template<typename Vector>
- void Compress(Vector* dest) {
- dex_register_map.Encode(dest);
- location_catalog.Encode(dest);
- stack_map.Encode(dest);
- register_mask.Encode(dest);
- stack_mask.Encode(dest);
- invoke_info.Encode(dest);
- if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
- inline_info.Encode(dest);
- }
- cache_header_size = dest->size();
- }
-
- ALWAYS_INLINE void ComputeTableOffsets() {
- // Skip the header.
- size_t bit_offset = HeaderSize() * kBitsPerByte;
- // The byte tables must be aligned so they must go first.
- dex_register_map.UpdateBitOffset(&bit_offset);
- location_catalog.UpdateBitOffset(&bit_offset);
- // Other tables don't require alignment.
- stack_map.UpdateBitOffset(&bit_offset);
- register_mask.UpdateBitOffset(&bit_offset);
- stack_mask.UpdateBitOffset(&bit_offset);
- invoke_info.UpdateBitOffset(&bit_offset);
- inline_info.UpdateBitOffset(&bit_offset);
- cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
- }
+ uint32_t GetInvokeType() const { return Get<kInvokeType>(); }
- ALWAYS_INLINE size_t HeaderSize() const {
- DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
- return cache_header_size;
- }
+ uint32_t GetMethodIndexIdx() const { return Get<kMethodIndexIdx>(); }
- ALWAYS_INLINE size_t NonHeaderSize() const {
- DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
- return cache_non_header_size;
+ uint32_t GetMethodIndex(MethodInfo method_info) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdx());
}
-
- private:
- // Computed fields (not serialized).
- // Header size in bytes, cached to avoid needing to re-decoding the encoding in HeaderSize.
- SizeType cache_header_size = kInvalidSize;
- // Non header size in bytes, cached to avoid needing to re-decoding the encoding in NonHeaderSize.
- SizeType cache_non_header_size = kInvalidSize;
};
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
*
- * [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
- * InlineInfo*]
- *
- * where CodeInfoEncoding is of the form:
+ * [BitTable<Header>, BitTable<StackMap>, BitTable<RegisterMask>, BitTable<InlineInfo>,
+ * BitTable<InvokeInfo>, BitTable<StackMask>, DexRegisterMap, DexLocationCatalog]
*
- * [ByteSizedTable(dex_register_map), ByteSizedTable(location_catalog),
- * BitEncodingTable<StackMapEncoding>, BitEncodingTable<BitRegionEncoding>,
- * BitEncodingTable<BitRegionEncoding>, BitEncodingTable<InlineInfoEncoding>]
*/
class CodeInfo {
public:
- explicit CodeInfo(MemoryRegion region) : region_(region) {
- }
-
explicit CodeInfo(const void* data) {
- CodeInfoEncoding encoding = CodeInfoEncoding(data);
- region_ = MemoryRegion(const_cast<void*>(data),
- encoding.HeaderSize() + encoding.NonHeaderSize());
+ Decode(reinterpret_cast<const uint8_t*>(data));
}
- CodeInfoEncoding ExtractEncoding() const {
- CodeInfoEncoding encoding(region_.begin());
- AssertValidStackMap(encoding);
- return encoding;
+ explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
+ DCHECK_EQ(size_, region.size());
}
- bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
+ explicit CodeInfo(const OatQuickMethodHeader* header)
+ : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
}
- DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
- return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
- encoding.location_catalog.num_bytes));
+ size_t Size() const {
+ return size_;
}
- ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_mask.encoding.BitSize();
+ bool HasInlineInfo() const {
+ return stack_maps_.NumColumnBits(StackMap::kInlineInfoIndex) != 0;
}
- ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
- return StackMap(encoding.stack_map.BitRegion(region_, index));
+ DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+ return DexRegisterLocationCatalog(location_catalog_);
}
- BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
- return encoding.stack_mask.BitRegion(region_, index);
+ ALWAYS_INLINE size_t GetNumberOfStackMaskBits() const {
+ return stack_mask_bits_;
}
- BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
- const StackMap& stack_map) const {
- return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
+ ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
+ return StackMap(&stack_maps_, index);
}
- BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
- return encoding.register_mask.BitRegion(region_, index);
+ BitMemoryRegion GetStackMask(size_t index) const {
+ return stack_masks_.Subregion(index * stack_mask_bits_, stack_mask_bits_);
}
- uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
- size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
- return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
+ BitMemoryRegion GetStackMaskOf(const StackMap& stack_map) const {
+ return GetStackMask(stack_map.GetStackMaskIndex());
}
- uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
- return encoding.location_catalog.num_entries;
+ uint32_t GetRegisterMaskOf(const StackMap& stack_map) const {
+ return register_masks_.Get(stack_map.GetRegisterMaskIndex());
}
- uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
- return encoding.location_catalog.num_bytes;
+ uint32_t GetNumberOfLocationCatalogEntries() const {
+ return location_catalog_entries_;
}
- uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.num_entries;
+ uint32_t GetDexRegisterLocationCatalogSize() const {
+ return location_catalog_.size();
}
- // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
- ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
+ uint32_t GetNumberOfStackMaps() const {
+ return stack_maps_.NumRows();
}
- InvokeInfo GetInvokeInfo(const CodeInfoEncoding& encoding, size_t index) const {
- return InvokeInfo(encoding.invoke_info.BitRegion(region_, index));
+ InvokeInfo GetInvokeInfo(size_t index) const {
+ return InvokeInfo(&invoke_infos_, index);
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
- const CodeInfoEncoding& encoding,
size_t number_of_dex_registers) const {
- if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+ if (!stack_map.HasDexRegisterMap()) {
return DexRegisterMap();
}
- const uint32_t offset = encoding.dex_register_map.byte_offset +
- stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
- size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ const uint32_t offset = stack_map.GetDexRegisterMapOffset();
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
}
- size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
- uint32_t number_of_dex_registers) const {
+ size_t GetDexRegisterMapsSize(uint32_t number_of_dex_registers) const {
size_t total = 0;
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- DexRegisterMap map(GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ DexRegisterMap map(GetDexRegisterMapOf(stack_map, number_of_dex_registers));
total += map.Size();
}
return total;
@@ -1458,38 +892,30 @@ class CodeInfo {
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
InlineInfo inline_info,
- const CodeInfoEncoding& encoding,
uint32_t number_of_dex_registers) const {
- if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
+ if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
return DexRegisterMap();
} else {
- uint32_t offset = encoding.dex_register_map.byte_offset +
- inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
- size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ uint32_t offset = inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
}
}
- InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
- // Since we do not know the depth, we just return the whole remaining map. The caller may
- // access the inline info for arbitrary depths. To return the precise inline info we would need
- // to count the depth before returning.
- // TODO: Clean this up.
- const size_t bit_offset = encoding.inline_info.bit_offset +
- index * encoding.inline_info.encoding.BitSize();
- return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+ InlineInfo GetInlineInfo(size_t index) const {
+ return InlineInfo(&inline_infos_, index);
}
- InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
- DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
- uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
- return GetInlineInfo(index, encoding);
+ InlineInfo GetInlineInfoOf(StackMap stack_map) const {
+ DCHECK(stack_map.HasInlineInfo());
+ uint32_t index = stack_map.GetInlineInfoIndex();
+ return GetInlineInfo(index);
}
- StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+ StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
return stack_map;
}
}
@@ -1498,40 +924,39 @@ class CodeInfo {
// Searches the stack map list backwards because catch stack maps are stored
// at the end.
- StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
- StackMap stack_map = GetStackMapAt(i - 1, encoding);
- if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+ StackMap GetCatchStackMapForDexPc(uint32_t dex_pc) const {
+ for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+ StackMap stack_map = GetStackMapAt(i - 1);
+ if (stack_map.GetDexPc() == dex_pc) {
return stack_map;
}
}
return StackMap();
}
- StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- size_t e = GetNumberOfStackMaps(encoding);
+ StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
+ size_t e = GetNumberOfStackMaps();
if (e == 0) {
// There cannot be OSR stack map if there is no stack map.
return StackMap();
}
// Walk over all stack maps. If two consecutive stack maps are identical, then we
// have found a stack map suitable for OSR.
- const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
for (size_t i = 0; i < e - 1; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
- StackMap other = GetStackMapAt(i + 1, encoding);
- if (other.GetDexPc(stack_map_encoding) == dex_pc &&
- other.GetNativePcOffset(stack_map_encoding, kRuntimeISA) ==
- stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA)) {
- DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
- stack_map.GetDexRegisterMapOffset(stack_map_encoding));
- DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
+ StackMap other = GetStackMapAt(i + 1);
+ if (other.GetDexPc() == dex_pc &&
+ other.GetNativePcOffset(kRuntimeISA) ==
+ stack_map.GetNativePcOffset(kRuntimeISA)) {
+ DCHECK_EQ(other.GetDexRegisterMapOffset(),
+ stack_map.GetDexRegisterMapOffset());
+ DCHECK(!stack_map.HasInlineInfo());
if (i < e - 2) {
// Make sure there are not three identical stack maps following each other.
DCHECK_NE(
- stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA),
- GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding, kRuntimeISA));
+ stack_map.GetNativePcOffset(kRuntimeISA),
+ GetStackMapAt(i + 2).GetNativePcOffset(kRuntimeISA));
}
return stack_map;
}
@@ -1540,30 +965,27 @@ class CodeInfo {
return StackMap();
}
- StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
- const CodeInfoEncoding& encoding) const {
+ StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
// TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
// maps are not. If we knew that the method does not have try/catch,
// we could do binary search.
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
- native_pc_offset) {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
return stack_map;
}
}
return StackMap();
}
- InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset,
- const CodeInfoEncoding& encoding) {
- for (size_t index = 0; index < encoding.invoke_info.num_entries; index++) {
- InvokeInfo item = GetInvokeInfo(encoding, index);
- if (item.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA) == native_pc_offset) {
+ InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
+ for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
+ InvokeInfo item = GetInvokeInfo(index);
+ if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
return item;
}
}
- return InvokeInfo(BitMemoryRegion());
+ return InvokeInfo(&invoke_infos_, -1);
}
// Dump this CodeInfo object on `os`. `code_offset` is the (absolute)
@@ -1578,23 +1000,10 @@ class CodeInfo {
InstructionSet instruction_set,
const MethodInfo& method_info) const;
- // Check that the code info has valid stack map and abort if it does not.
- void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
- if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
- LOG(FATAL) << region_.size() << "\n"
- << encoding.HeaderSize() << "\n"
- << encoding.NonHeaderSize() << "\n"
- << encoding.location_catalog.num_entries << "\n"
- << encoding.stack_map.num_entries << "\n"
- << encoding.stack_map.encoding.BitSize();
- }
- }
-
private:
// Compute the size of the Dex register map associated to the stack map at
// `dex_register_map_offset_in_code_info`.
- size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
- uint32_t dex_register_map_offset_in_code_info,
+ size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset,
uint16_t number_of_dex_registers) const {
// Offset where the actual mapping data starts within art::DexRegisterMap.
size_t location_mapping_data_offset_in_dex_register_map =
@@ -1602,12 +1011,12 @@ class CodeInfo {
// Create a temporary art::DexRegisterMap to be able to call
// art::DexRegisterMap::GetNumberOfLiveDexRegisters and
DexRegisterMap dex_register_map_without_locations(
- MemoryRegion(region_.Subregion(dex_register_map_offset_in_code_info,
- location_mapping_data_offset_in_dex_register_map)));
+ MemoryRegion(dex_register_maps_.Subregion(dex_register_map_offset,
+ location_mapping_data_offset_in_dex_register_map)));
size_t number_of_live_dex_registers =
dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
size_t location_mapping_data_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries(encoding))
+ DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
* number_of_live_dex_registers;
size_t location_mapping_data_size_in_bytes =
RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -1616,37 +1025,42 @@ class CodeInfo {
return dex_register_map_size;
}
- // Compute the size of a Dex register location catalog starting at offset `origin`
- // in `region_` and containing `number_of_dex_locations` entries.
- size_t ComputeDexRegisterLocationCatalogSize(uint32_t origin,
- uint32_t number_of_dex_locations) const {
- // TODO: Ideally, we would like to use art::DexRegisterLocationCatalog::Size or
- // art::DexRegisterLocationCatalog::FindLocationOffset, but the
- // DexRegisterLocationCatalog is not yet built. Try to factor common code.
- size_t offset = origin + DexRegisterLocationCatalog::kFixedSize;
-
- // Skip the first `number_of_dex_locations - 1` entries.
- for (uint16_t i = 0; i < number_of_dex_locations; ++i) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterLocationCatalog::ShortLocation first_byte =
- region_.LoadUnaligned<DexRegisterLocationCatalog::ShortLocation>(offset);
- DexRegisterLocation::Kind kind =
- DexRegisterLocationCatalog::ExtractKindFromShortLocation(first_byte);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += DexRegisterLocationCatalog::SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += DexRegisterLocationCatalog::SingleLargeEntrySize();
- }
- }
- size_t size = offset - origin;
- return size;
- }
-
- MemoryRegion region_;
- friend class StackMapStream;
+ MemoryRegion DecodeMemoryRegion(MemoryRegion& region, size_t* bit_offset) {
+ size_t length = DecodeVarintBits(BitMemoryRegion(region), bit_offset);
+ size_t offset = BitsToBytesRoundUp(*bit_offset);;
+ *bit_offset = (offset + length) * kBitsPerByte;
+ return region.Subregion(offset, length);
+ }
+
+ void Decode(const uint8_t* data) {
+ size_t non_header_size = DecodeUnsignedLeb128(&data);
+ MemoryRegion region(const_cast<uint8_t*>(data), non_header_size);
+ BitMemoryRegion bit_region(region);
+ size_t bit_offset = 0;
+ size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
+ dex_register_maps_ = DecodeMemoryRegion(region, &bit_offset);
+ location_catalog_entries_ = DecodeVarintBits(bit_region, &bit_offset);
+ location_catalog_ = DecodeMemoryRegion(region, &bit_offset);
+ stack_maps_.Decode(bit_region, &bit_offset);
+ invoke_infos_.Decode(bit_region, &bit_offset);
+ inline_infos_.Decode(bit_region, &bit_offset);
+ register_masks_.Decode(bit_region, &bit_offset);
+ stack_mask_bits_ = DecodeVarintBits(bit_region, &bit_offset);
+ stack_masks_ = bit_region.Subregion(bit_offset, non_header_size * kBitsPerByte - bit_offset);
+ }
+
+ size_t size_;
+ MemoryRegion dex_register_maps_;
+ uint32_t location_catalog_entries_;
+ MemoryRegion location_catalog_;
+ BitTable<StackMap::Field::kCount> stack_maps_;
+ BitTable<InvokeInfo::Field::kCount> invoke_infos_;
+ BitTable<InlineInfo::Field::kCount> inline_infos_;
+ BitTable<1> register_masks_;
+ uint32_t stack_mask_bits_ = 0;
+ BitMemoryRegion stack_masks_;
+
+ friend class OatDumper;
};
#undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/thread.cc b/runtime/thread.cc
index eada24d257..81ed722fcc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3559,16 +3559,15 @@ class ReferenceMapVisitor : public StackVisitor {
StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ CodeInfo code_info(method_header);
+ StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(map.IsValid());
- T vreg_info(m, code_info, encoding, map, visitor_);
+ T vreg_info(m, code_info, map, visitor_);
// Visit stack entries that hold pointers.
- const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map);
+ const size_t number_of_bits = code_info.GetNumberOfStackMaskBits();
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
for (size_t i = 0; i < number_of_bits; ++i) {
if (stack_mask.LoadBit(i)) {
StackReference<mirror::Object>* ref_addr = vreg_base + i;
@@ -3583,7 +3582,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(map);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
@@ -3631,7 +3630,6 @@ class ReferenceMapVisitor : public StackVisitor {
struct UndefinedVRegInfo {
UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
const CodeInfo& code_info ATTRIBUTE_UNUSED,
- const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED,
const StackMap& map ATTRIBUTE_UNUSED,
RootVisitor& _visitor)
: visitor(_visitor) {
@@ -3662,14 +3660,11 @@ class ReferenceMapVisitor : public StackVisitor {
struct StackMapVRegInfo {
StackMapVRegInfo(ArtMethod* method,
const CodeInfo& _code_info,
- const CodeInfoEncoding& _encoding,
const StackMap& map,
RootVisitor& _visitor)
: number_of_dex_registers(method->DexInstructionData().RegistersSize()),
code_info(_code_info),
- encoding(_encoding),
dex_register_map(code_info.GetDexRegisterMapOf(map,
- encoding,
number_of_dex_registers)),
visitor(_visitor) {
}
@@ -3684,7 +3679,7 @@ class ReferenceMapVisitor : public StackVisitor {
bool found = false;
for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
- dex_reg, number_of_dex_registers, code_info, encoding);
+ dex_reg, number_of_dex_registers, code_info);
if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
visitor(ref, dex_reg, stack_visitor);
found = true;
@@ -3718,7 +3713,6 @@ class ReferenceMapVisitor : public StackVisitor {
size_t number_of_dex_registers;
const CodeInfo& code_info;
- const CodeInfoEncoding& encoding;
DexRegisterMap dex_register_map;
RootVisitor& visitor;
};
diff --git a/runtime/var_handles.cc b/runtime/var_handles.cc
index e6730c6114..f08742fcd7 100644
--- a/runtime/var_handles.cc
+++ b/runtime/var_handles.cc
@@ -89,8 +89,8 @@ bool VarHandleInvokeAccessor(Thread* self,
result);
} else {
DCHECK_EQ(match_kind, mirror::VarHandle::MatchKind::kNone);
- ThrowWrongMethodTypeException(var_handle->GetMethodTypeForAccessMode(self, access_mode),
- callsite_type.Get());
+ ThrowWrongMethodTypeException(var_handle->PrettyDescriptorForAccessMode(access_mode),
+ callsite_type->PrettyDescriptor());
return false;
}
}