summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.bp15
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S267
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S113
-rw-r--r--runtime/arch/code_offset.h2
-rw-r--r--runtime/arch/instruction_set.cc201
-rw-r--r--runtime/arch/instruction_set.h302
-rw-r--r--runtime/arch/instruction_set_features.h2
-rw-r--r--runtime/arch/instruction_set_test.cc70
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S4
-rw-r--r--runtime/art_method.cc17
-rw-r--r--runtime/art_method.h4
-rw-r--r--runtime/base/file_utils.cc281
-rw-r--r--runtime/base/file_utils.h82
-rw-r--r--runtime/base/file_utils_test.cc104
-rw-r--r--runtime/check_reference_map_visitor.h13
-rw-r--r--runtime/class_linker.cc128
-rw-r--r--runtime/class_linker.h25
-rw-r--r--runtime/common_dex_operations.h1
-rw-r--r--runtime/common_throws.cc13
-rw-r--r--runtime/common_throws.h4
-rw-r--r--runtime/dex/art_dex_file_loader.cc547
-rw-r--r--runtime/dex/art_dex_file_loader.h140
-rw-r--r--runtime/dex/art_dex_file_loader_test.cc456
-rw-r--r--runtime/dex/dex_file_annotations.cc117
-rw-r--r--runtime/dex/dex_file_annotations.h37
-rw-r--r--runtime/elf_file.cc23
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h15
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc14
-rw-r--r--runtime/entrypoints/entrypoint_utils.h2
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc101
-rw-r--r--runtime/generated/asm_support_gen.h18
-rw-r--r--runtime/hidden_api.cc25
-rw-r--r--runtime/hidden_api.h16
-rw-r--r--runtime/interpreter/interpreter_common.cc99
-rw-r--r--runtime/interpreter/interpreter_common.h2
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc2
-rw-r--r--runtime/interpreter/mterp/mterp.cc3
-rw-r--r--runtime/interpreter/unstarted_runtime.cc2
-rw-r--r--runtime/jit/jit.cc93
-rw-r--r--runtime/jit/jit.h229
-rw-r--r--runtime/jvalue-inl.h2
-rw-r--r--runtime/jvalue.h3
-rw-r--r--runtime/lock_word.h4
-rw-r--r--runtime/method_handles-inl.h2
-rw-r--r--runtime/method_handles.cc1
-rw-r--r--runtime/method_info.h10
-rw-r--r--runtime/mirror/dex_cache-inl.h14
-rw-r--r--runtime/mirror/dex_cache.h6
-rw-r--r--runtime/mirror/dex_cache_test.cc4
-rw-r--r--runtime/mirror/var_handle.cc84
-rw-r--r--runtime/mirror/var_handle.h33
-rw-r--r--runtime/mirror/var_handle_test.cc264
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc5
-rw-r--r--runtime/native/java_lang_Class.cc8
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc2
-rw-r--r--runtime/native/java_lang_reflect_Field.cc1
-rw-r--r--runtime/native/java_lang_reflect_Method.cc2
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.cc8
-rw-r--r--runtime/oat_file_assistant.cc13
-rw-r--r--runtime/oat_file_assistant_test.cc43
-rw-r--r--runtime/oat_quick_method_header.cc17
-rw-r--r--runtime/oat_quick_method_header.h5
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/quick_exception_handler.cc47
-rw-r--r--runtime/reflection.cc1
-rw-r--r--runtime/runtime.cc2
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/stack.cc58
-rw-r--r--runtime/stack_map.cc113
-rw-r--r--runtime/stack_map.h1008
-rw-r--r--runtime/thread.cc20
-rw-r--r--runtime/var_handles.cc98
-rw-r--r--runtime/var_handles.h35
-rw-r--r--runtime/verifier/method_verifier.cc10
-rw-r--r--runtime/well_known_classes.cc59
-rw-r--r--runtime/well_known_classes.h6
79 files changed, 1568 insertions, 4023 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 64e6796ba0..92607f51a0 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -23,7 +23,7 @@ JIT_DEBUG_REGISTER_CODE_LDFLAGS = [
"-Wl,--keep-unique,__dex_debug_register_code"
]
-cc_defaults {
+libart_cc_defaults {
name: "libart_defaults",
defaults: ["art_defaults"],
host_supported: true,
@@ -32,7 +32,6 @@ cc_defaults {
"art_field.cc",
"art_method.cc",
"barrier.cc",
- "base/file_utils.cc",
"base/mem_map_arena_pool.cc",
"base/mutex.cc",
"base/quasi_atomic.cc",
@@ -46,7 +45,6 @@ cc_defaults {
"compiler_filter.cc",
"debug_print.cc",
"debugger.cc",
- "dex/art_dex_file_loader.cc",
"dex/dex_file_annotations.cc",
"dex_to_dex_decompiler.cc",
"elf_file.cc",
@@ -196,6 +194,7 @@ cc_defaults {
"ti/agent.cc",
"trace.cc",
"transaction.cc",
+ "var_handles.cc",
"vdex_file.cc",
"verifier/instruction_flags.cc",
"verifier/method_verifier.cc",
@@ -207,7 +206,6 @@ cc_defaults {
"well_known_classes.cc",
"arch/context.cc",
- "arch/instruction_set.cc",
"arch/instruction_set_features.cc",
"arch/memcmp16.cc",
"arch/arm/instruction_set_features_arm.cc",
@@ -396,7 +394,6 @@ cc_defaults {
"libbacktrace",
"liblz4",
"liblog",
- "libmetricslogger",
// For atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
"libcutils",
// For common macros.
@@ -420,7 +417,6 @@ gensrcs {
cmd: "$(location generate_operator_out) art/runtime $(in) > $(out)",
tools: ["generate_operator_out"],
srcs: [
- "arch/instruction_set.h",
"base/mutex.h",
"class_loader_context.h",
"class_status.h",
@@ -467,9 +463,9 @@ art_cc_library {
keep_symbols: true,
},
whole_static_libs: [
- "libartbase",
],
shared_libs: [
+ "libartbase",
"libdexfile",
"libprofile",
],
@@ -492,9 +488,9 @@ art_cc_library {
"libart_defaults",
],
whole_static_libs: [
- "libartbased",
],
shared_libs: [
+ "libartbased",
"libdexfiled",
"libprofiled",
],
@@ -531,7 +527,6 @@ art_cc_test {
],
srcs: [
"arch/arch_test.cc",
- "arch/instruction_set_test.cc",
"arch/instruction_set_features_test.cc",
"arch/memcmp16_test.cc",
"arch/stub_test.cc",
@@ -542,7 +537,6 @@ art_cc_test {
"arch/x86/instruction_set_features_x86_test.cc",
"arch/x86_64/instruction_set_features_x86_64_test.cc",
"barrier_test.cc",
- "base/file_utils_test.cc",
"base/mutex_test.cc",
"base/timing_logger_test.cc",
"cha_test.cc",
@@ -550,7 +544,6 @@ art_cc_test {
"class_loader_context_test.cc",
"class_table_test.cc",
"compiler_filter_test.cc",
- "dex/art_dex_file_loader_test.cc",
"entrypoints/math_entrypoints_test.cc",
"entrypoints/quick/quick_trampoline_entrypoints_test.cc",
"entrypoints_order_test.cc",
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index cd00125de5..311e838fb3 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -55,7 +55,7 @@
@ Load kSaveAllCalleeSaves Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
@@ -86,7 +86,7 @@
@ Load kSaveRefsOnly Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_REFS_ONLY != 28 + 4)
@@ -147,13 +147,13 @@
@ Load kSaveRefsAndArgs Method* into rTemp.
ldr \rTemp, [\rTemp, #RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
str r0, [sp, #0] @ Store ArtMethod* to bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
@@ -193,7 +193,7 @@
@ Load kSaveEverything Method* into rTemp.
ldr \rTemp, [\rTemp, #\runtime_method_offset]
str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
+ str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_SAVE_EVERYTHING != 56 + 128 + 8)
@@ -301,7 +301,7 @@
* exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
@@ -318,7 +318,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save all registers as basis for long jump context
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -327,7 +327,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r0 @ save all registers as basis for long jump context
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -336,7 +336,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r1 @ save all registers as basis for long jump context
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
@@ -345,13 +345,13 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_EVERYTHING_FRAME r2 @ save all registers as basis for long jump context
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \cxx_name @ \cxx_name(Thread*)
END \c_name
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
- ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET] // Get exception field.
+ ldr \reg, [rSELF, #THREAD_EXCEPTION_OFFSET] @ Get exception field.
cbnz \reg, 1f
bx lr
1:
@@ -377,7 +377,7 @@ END \c_name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -389,7 +389,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -401,7 +401,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
+ mov r3, rSELF @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, new_val, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
@@ -448,7 +448,7 @@ ENTRY art_quick_throw_null_pointer_exception_from_signal
@ save all registers as basis for long jump context
SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1
mov r0, lr @ pass the fault address stored in LR by the fault handler.
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artThrowNullPointerExceptionFromSignal @ (Thread*)
END art_quick_throw_null_pointer_exception_from_signal
@@ -494,7 +494,7 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFr
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
SETUP_SAVE_REFS_AND_ARGS_FRAME r2 @ save callee saves in case allocation triggers GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
mov r12, r1 @ save Method*->code_
@@ -682,50 +682,48 @@ TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode,
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
+ ldr r1, [rSELF, #THREAD_ID_OFFSET]
cbz r0, .Lslow_lock
.Lretry_lock:
- ldr r2, [r9, #THREAD_ID_OFFSET]
- ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- mov r3, r1
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- cbnz r3, .Lnot_unlocked @ already thin locked
- @ unlocked case - r1: original lock word that's zero except for the read barrier bits.
- orr r2, r1, r2 @ r2 holds thread id with count of 0 with preserved read barrier bits
- strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- cbnz r3, .Llock_strex_fail @ store failed, retry
- dmb ish @ full (LoadLoad|LoadStore) memory barrier
+ ldrex r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ eor r3, r2, r1 @ Prepare the value to store if unlocked
+ @ (thread id, count of 0 and preserved read barrier bits),
+ @ or prepare to compare thread id for recursive lock check
+ @ (lock_word.ThreadId() ^ self->ThreadId()).
+ ands ip, r2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ Test the non-gc bits.
+ bne .Lnot_unlocked @ Check if unlocked.
+ @ unlocked case - store r3: original lock word plus thread id, preserved read barrier bits.
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ cbnz r2, .Llock_strex_fail @ If store failed, retry.
+ dmb ish @ Full (LoadLoad|LoadStore) memory barrier.
bx lr
-.Lnot_unlocked: @ r1: original lock word, r2: thread_id with count of 0 and zero read barrier bits
- lsr r3, r1, LOCK_WORD_STATE_SHIFT
- cbnz r3, .Lslow_lock @ if either of the top two bits are set, go slow path
- eor r2, r1, r2 @ lock_word.ThreadId() ^ self->ThreadId()
- uxth r2, r2 @ zero top 16 bits
- cbnz r2, .Lslow_lock @ lock word and self thread id's match -> recursive lock
- @ else contention, go to slow path
- mov r3, r1 @ copy the lock word to check count overflow.
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits.
- add r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count in lock word placing in r2 to check overflow
- lsr r3, r2, #LOCK_WORD_GC_STATE_SHIFT @ if the first gc state bit is set, we overflowed.
- cbnz r3, .Lslow_lock @ if we overflow the count go slow path
- add r2, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ increment count for real
- strex r3, r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r3, .Llock_strex_fail @ strex failed, retry
+.Lnot_unlocked: @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+ @ Check lock word state and thread id together,
+ bfc r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+ cbnz r3, .Lslow_lock @ if either of the top two bits are set, or the lock word's
+ @ thread id did not match, go slow path.
+ add r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Increment the recursive lock count.
+ @ Extract the new thin lock count for overflow check.
+ ubfx r2, r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #LOCK_WORD_THIN_LOCK_COUNT_SIZE
+ cbz r2, .Lslow_lock @ Zero as the new count indicates overflow, go slow path.
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
+ cbnz r2, .Llock_strex_fail @ If strex failed, retry.
bx lr
.Llock_strex_fail:
b .Lretry_lock @ retry
-.Lslow_lock:
- SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
- mov r1, r9 @ pass Thread::Current
- bl artLockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_lock_object_no_inline (tail call).
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
+ // This is also the slow path for art_quick_lock_object. Note that we
+ // need a local label, the assembler complains about target being out of
+ // range if we try to jump to `art_quick_lock_object_no_inline`.
+.Lslow_lock:
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case we block
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -739,62 +737,59 @@ END art_quick_lock_object_no_inline
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
+ ldr r1, [rSELF, #THREAD_ID_OFFSET]
cbz r0, .Lslow_unlock
.Lretry_unlock:
#ifndef USE_READ_BARRIER
- ldr r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ ldr r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
- ldrex r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ Need to use atomic instructions for read barrier
+ @ Need to use atomic instructions for read barrier.
+ ldrex r2, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#endif
- lsr r2, r1, #LOCK_WORD_STATE_SHIFT
- cbnz r2, .Lslow_unlock @ if either of the top two bits are set, go slow path
- ldr r2, [r9, #THREAD_ID_OFFSET]
- mov r3, r1 @ copy lock word to check thread id equality
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- eor r3, r3, r2 @ lock_word.ThreadId() ^ self->ThreadId()
- uxth r3, r3 @ zero top 16 bits
- cbnz r3, .Lslow_unlock @ do lock word and self thread id's match?
- mov r3, r1 @ copy lock word to detect transition to unlocked
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ zero the gc bits
- cmp r3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
- bpl .Lrecursive_thin_unlock
- @ transition to unlocked
- mov r3, r1
- and r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED @ r3: zero except for the preserved gc bits
- dmb ish @ full (LoadStore|StoreStore) memory barrier
+ eor r3, r2, r1 @ Prepare the value to store if simply locked
+ @ (mostly 0s, and preserved read barrier bits),
+ @ or prepare to compare thread id for recursive lock check
+ @ (lock_word.ThreadId() ^ self->ThreadId()).
+ ands ip, r3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED @ Test the non-gc bits.
+ bne .Lnot_simply_locked @ Locked recursively or by other thread?
+ @ Transition to unlocked.
+ dmb ish @ Full (LoadStore|StoreStore) memory barrier.
#ifndef USE_READ_BARRIER
str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r2, .Lunlock_strex_fail @ store failed, retry
+ cbnz r2, .Lunlock_strex_fail @ If the store failed, retry.
#endif
bx lr
-.Lrecursive_thin_unlock: @ r1: original lock word
- sub r1, r1, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ decrement count
+.Lnot_simply_locked: @ r2: original lock word, r1: thread_id, r3: r2 ^ r1
+#if LOCK_WORD_THIN_LOCK_COUNT_SHIFT + LOCK_WORD_THIN_LOCK_COUNT_SIZE != LOCK_WORD_GC_STATE_SHIFT
+#error "Expecting thin lock count and gc state in consecutive bits."
+#endif
+ @ Check lock word state and thread id together,
+ bfc r3, #LOCK_WORD_THIN_LOCK_COUNT_SHIFT, #(LOCK_WORD_THIN_LOCK_COUNT_SIZE + LOCK_WORD_GC_STATE_SIZE)
+ cbnz r3, .Lslow_unlock @ if either of the top two bits are set, or the lock word's
+ @ thread id did not match, go slow path.
+ sub r3, r2, #LOCK_WORD_THIN_LOCK_COUNT_ONE @ Decrement recursive lock count.
#ifndef USE_READ_BARRIER
- str r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ str r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
#else
- strex r2, r1, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits
- cbnz r2, .Lunlock_strex_fail @ store failed, retry
+ strex r2, r3, [r0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] @ strex necessary for read barrier bits.
+ cbnz r2, .Lunlock_strex_fail @ If the store failed, retry.
#endif
bx lr
.Lunlock_strex_fail:
b .Lretry_unlock @ retry
-.Lslow_unlock:
- @ save callee saves in case exception allocation triggers GC
- SETUP_SAVE_REFS_ONLY_FRAME r1
- mov r1, r9 @ pass Thread::Current
- bl artUnlockObjectFromCode @ (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+// Note: the slow path is actually the art_quick_unlock_object_no_inline (tail call).
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
+ // This is also the slow path for art_quick_unlock_object. Note that we
+ // need a local label, the assembler complains about target being out of
+ // range if we try to jump to `art_quick_unlock_object_no_inline`.
+.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
SETUP_SAVE_REFS_ONLY_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -832,7 +827,7 @@ ENTRY art_quick_check_instance_of
.Lthrow_class_cast_exception_for_bitstring_check:
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
bkpt
END art_quick_check_instance_of
@@ -917,7 +912,7 @@ ENTRY art_quick_aput_obj
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
- ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+ ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
@@ -945,7 +940,7 @@ ENTRY art_quick_aput_obj
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
- ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
+ ldr r3, [rSELF, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
@@ -954,7 +949,7 @@ ENTRY art_quick_aput_obj
/* No need to repeat restore cfi directives, the ones above apply here. */
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
mov r1, r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
bkpt @ unreached
END art_quick_aput_obj
@@ -964,7 +959,7 @@ END art_quick_aput_obj
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r1 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -977,7 +972,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -990,7 +985,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r3 @ save callee saves in case of GC
- mov r3, r9 @ pass Thread::Current
+ mov r3, rSELF @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
@@ -1004,7 +999,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
add sp, #16 @ strip the extra frame
@@ -1023,7 +1018,7 @@ END \name
.extern \entrypoint
ENTRY \name
SETUP_SAVE_EVERYTHING_FRAME r1, \runtime_method_offset @ save everything in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t index, Thread*)
cbz r0, 1f @ If result is null, deliver the OOME.
.cfi_remember_state
@@ -1065,9 +1060,9 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode,
.extern artGet64StaticFromCompiledCode
ENTRY art_quick_get64_static
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
- bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ mov r1, rSELF @ pass Thread::Current
+ bl artGet64StaticFromCompiledCode @ (uint32_t field_idx, Thread*)
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
@@ -1091,9 +1086,9 @@ TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCo
.extern artGet64InstanceFromCompiledCode
ENTRY art_quick_get64_instance
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r2, r9 @ pass Thread::Current
- bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ mov r2, rSELF @ pass Thread::Current
+ bl artGet64InstanceFromCompiledCode @ (field_idx, Object*, Thread*)
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
@@ -1125,7 +1120,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiled
ENTRY art_quick_set64_instance
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64InstanceFromCompiledCode @ (field_idx, Object*, new_val, Thread*)
add sp, #16 @ release out args
@@ -1140,7 +1135,7 @@ END art_quick_set64_instance
ENTRY art_quick_set64_static
SETUP_SAVE_REFS_ONLY_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
- str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
+ str rSELF, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl artSet64StaticFromCompiledCode @ (field_idx, new_val, Thread*)
add sp, #16 @ release out args
@@ -1185,12 +1180,12 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
ENTRY \c_name
// Fast path rosalloc allocation.
- // r0: type/return value, r9: Thread::Current
+ // r0: type/return value, rSELF (r9): Thread::Current
// r1, r2, r3, r12: free.
- ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ ldr r3, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// TODO: consider using ldrd.
- ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ ldr r12, [rSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp r3, r12
bhs .Lslow_path\c_name
@@ -1208,7 +1203,7 @@ ENTRY \c_name
// from the size. Since the size is
// already aligned we can combine the
// two shifts together.
- add r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ add r12, rSELF, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
// Subtract pointer size since ther
// are no runs for 0 byte allocations
// and the size is already aligned.
@@ -1236,9 +1231,9 @@ ENTRY \c_name
// local allocation stack and
// increment the thread local
// allocation stack top.
- ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ ldr r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
- str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str r1, [rSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
// Decrement the size of the free list
// After this "STR" the object is published to the thread local allocation stack,
@@ -1287,7 +1282,7 @@ ENTRY \c_name
.Lslow_path\c_name:
SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1301,7 +1296,7 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
// and art_quick_alloc_object_resolved/initialized_region_tlab.
//
-// r0: type r9: Thread::Current, r1, r2, r3, r12: free.
+// r0: type, rSELF (r9): Thread::Current, r1, r2, r3, r12: free.
// Need to preserve r0 to the slow path.
//
// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
@@ -1313,7 +1308,7 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldrd r12, r3, [rSELF, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
ldr r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
cmp r3, r12 // Check if it fits.
@@ -1326,9 +1321,9 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
// Reload old thread_local_pos (r0)
// for the return value.
- ldr r2, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldr r2, [rSELF, #THREAD_LOCAL_POS_OFFSET]
add r1, r2, r3
- str r1, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ str r1, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
// After this "STR" the object is published to the thread local allocation stack,
// and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view.
// It is not yet visible to the running (user) compiled code until after the return.
@@ -1346,9 +1341,9 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
//
// (Note: The actual check is done by checking that the object's class pointer is non-null.
// Also, unlike rosalloc, the object can never be observed as null).
- ldr r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ ldr r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r1, r1, #1
- str r1, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ str r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r0
str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
// Fence. This is "ish" not "ishst" so
@@ -1375,12 +1370,12 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized
ENTRY \name
// Fast path tlab allocation.
- // r0: type, r9: Thread::Current
+ // r0: type, rSELF (r9): Thread::Current
// r1, r2, r3, r12: free.
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path\name, \isInitialized
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME r2 // Save callee saves in case of GC.
- mov r1, r9 // Pass Thread::Current.
+ mov r1, rSELF // Pass Thread::Current.
bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1397,7 +1392,7 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
// and art_quick_alloc_array_resolved/initialized_region_tlab.
//
-// r0: type r1: component_count r2: total_size r9: Thread::Current, r3, r12: free.
+// r0: type, r1: component_count, r2: total_size, rSELF (r9): Thread::Current, r3, r12: free.
// Need to preserve r0 and r1 to the slow path.
.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
and r2, r2, #OBJECT_ALIGNMENT_MASK_TOGGLED // Apply alignment mask
@@ -1409,7 +1404,7 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
#if !((THREAD_LOCAL_POS_OFFSET + 4 == THREAD_LOCAL_END_OFFSET) && (THREAD_LOCAL_POS_OFFSET % 8 == 0))
#error "Thread::thread_local_pos/end must be consecutive and are 8 byte aligned for performance"
#endif
- ldrd r3, r12, [r9, #THREAD_LOCAL_POS_OFFSET]
+ ldrd r3, r12, [rSELF, #THREAD_LOCAL_POS_OFFSET]
sub r12, r12, r3 // Compute the remaining buf size.
cmp r2, r12 // Check if the total_size fits.
// The array class is always initialized here. Unlike new-instance,
@@ -1417,10 +1412,10 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
add r2, r2, r3
- str r2, [r9, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
- ldr r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
+ str r2, [rSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ ldr r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add r2, r2, #1
- str r2, [r9, #THREAD_LOCAL_OBJECTS_OFFSET]
+ str r2, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r0
str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
@@ -1443,7 +1438,7 @@ ENTRY \name
// Fast path array allocation for region tlab allocation.
// r0: mirror::Class* type
// r1: int32_t component_count
- // r9: thread
+ // rSELF (r9): thread
// r2, r3, r12: free.
\size_setup .Lslow_path\name
ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name
@@ -1452,7 +1447,7 @@ ENTRY \name
// r1: int32_t component_count
// r2: Thread* self
SETUP_SAVE_REFS_ONLY_FRAME r2 // save callee saves in case of GC
- mov r2, r9 // pass Thread::Current
+ mov r2, rSELF // pass Thread::Current
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
@@ -1575,10 +1570,10 @@ END art_quick_implicit_suspend
.extern artQuickProxyInvokeHandler
ENTRY art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_R0
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
.cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1706,7 +1701,7 @@ END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
cbz r0, 1f @ is code pointer null? goto exception
@@ -1780,10 +1775,10 @@ ENTRY art_quick_generic_jni_trampoline
blx artQuickGenericJniEndTrampoline
// Restore self pointer.
- mov r9, r11
+ mov rSELF, r11
// Pending exceptions possible.
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
cbnz r2, .Lexception_in_native
// Tear down the alloca.
@@ -1804,7 +1799,7 @@ ENTRY art_quick_generic_jni_trampoline
.cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
.Lexception_in_native:
- ldr ip, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET]
+ ldr ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
add ip, ip, #-1 // Remove the GenericJNI tag. ADD/SUB writing directly to SP is UNPREDICTABLE.
mov sp, ip
.cfi_def_cfa_register sp
@@ -1815,10 +1810,10 @@ END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
SETUP_SAVE_REFS_AND_ARGS_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
- ldr r2, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr r2, [rSELF, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
// Tear down the callee-save frame. Skip arg registers.
add sp, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
.cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
@@ -1846,7 +1841,7 @@ ENTRY art_quick_instrumentation_entry
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
@ preserve r0 (not normally an arg) knowing there is a spare slot in kSaveRefsAndArgs.
str r0, [sp, #4]
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
blx artInstrumentationMethodEntryFromCode @ (Method*, Object*, Thread*, SP)
cbz r0, .Ldeliver_instrumentation_entry_exception
@@ -1872,7 +1867,7 @@ ENTRY art_quick_instrumentation_exit
add r3, sp, #8 @ store fpr_res pointer, in kSaveEverything frame
add r2, sp, #136 @ store gpr_res pointer, in kSaveEverything frame
mov r1, sp @ pass SP
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
blx artInstrumentationMethodExitFromCode @ (Thread*, SP, gpr_res*, fpr_res*)
cbz r0, .Ldo_deliver_instrumentation_exception
@@ -1901,7 +1896,7 @@ END art_quick_instrumentation_exit
.extern artDeoptimize
ENTRY art_quick_deoptimize
SETUP_SAVE_EVERYTHING_FRAME r0
- mov r0, r9 @ pass Thread::Current
+ mov r0, rSELF @ pass Thread::Current
blx artDeoptimize @ (Thread*)
END art_quick_deoptimize
@@ -1912,7 +1907,7 @@ END art_quick_deoptimize
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
SETUP_SAVE_EVERYTHING_FRAME r1
- mov r1, r9 @ pass Thread::Current
+ mov r1, rSELF @ pass Thread::Current
blx artDeoptimizeFromCompiledCode @ (DeoptimizationKind, Thread*)
END art_quick_deoptimize_from_compiled_code
@@ -2691,7 +2686,7 @@ END art_quick_read_barrier_mark_introspection
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME r2
- mov r2, r9 @ pass Thread::Current
+ mov r2, rSELF @ pass Thread::Current
mov r3, sp @ pass SP
mov r0, #0 @ initialize 64-bit JValue as zero.
str r0, [sp, #-4]!
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac5b2b8b88..14d0cc7db4 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1151,45 +1151,36 @@ END art_quick_do_long_jump
*/
.extern artLockObjectFromCode
ENTRY art_quick_lock_object
- cbz w0, .Lslow_lock
- add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
+ ldr w1, [xSELF, #THREAD_ID_OFFSET]
+ cbz w0, art_quick_lock_object_no_inline
+ // Exclusive load/store has no immediate anymore.
+ add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
.Lretry_lock:
- ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop?
- ldaxr w1, [x4] // acquire needed only in most common case
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- cbnz w3, .Lnot_unlocked // already thin locked
- // unlocked case - x1: original lock word that's zero except for the read barrier bits.
- orr x2, x1, x2 // x2 holds thread id with count of 0 with preserved read barrier bits
- stxr w3, w2, [x4]
- cbnz w3, .Llock_stxr_fail // store failed, retry
+ ldaxr w2, [x4] // Acquire needed only in most common case.
+ eor w3, w2, w1 // Prepare the value to store if unlocked
+ // (thread id, count of 0 and preserved read barrier bits),
+ // or prepare to compare thread id for recursive lock check
+ // (lock_word.ThreadId() ^ self->ThreadId()).
+ tst w2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits.
+ b.ne .Lnot_unlocked // Check if unlocked.
+ // unlocked case - store w3: original lock word plus thread id, preserved read barrier bits.
+ stxr w2, w3, [x4]
+ cbnz w2, .Lretry_lock // If the store failed, retry.
ret
-.Lnot_unlocked: // x1: original lock word
- lsr w3, w1, LOCK_WORD_STATE_SHIFT
- cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path
- eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId()
- uxth w2, w2 // zero top 16 bits
- cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock
- // else contention, go to slow path
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits.
- add w2, w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count in lock word placing in w2 to check overflow
- lsr w3, w2, #LOCK_WORD_GC_STATE_SHIFT // if the first gc state bit is set, we overflowed.
- cbnz w3, .Lslow_lock // if we overflow the count go slow path
- add w2, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // increment count for real
- stxr w3, w2, [x4]
- cbnz w3, .Llock_stxr_fail // store failed, retry
+.Lnot_unlocked: // w2: original lock word, w1: thread id, w3: w2 ^ w1
+ // Check lock word state and thread id together,
+ tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+ b.ne art_quick_lock_object_no_inline
+ add w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // Increment the recursive lock count.
+ tst w3, #LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED // Test the new thin lock count.
+ b.eq art_quick_lock_object_no_inline // Zero as the new count indicates overflow, go slow path.
+ stxr w2, w3, [x4]
+ cbnz w2, .Lretry_lock // If the store failed, retry.
ret
-.Llock_stxr_fail:
- b .Lretry_lock // retry
-.Lslow_lock:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
- mov x1, xSELF // pass Thread::Current
- bl artLockObjectFromCode // (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
+ // This is also the slow path for art_quick_lock_object.
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block
mov x1, xSELF // pass Thread::Current
bl artLockObjectFromCode // (Object* obj, Thread*)
@@ -1206,54 +1197,46 @@ END art_quick_lock_object_no_inline
*/
.extern artUnlockObjectFromCode
ENTRY art_quick_unlock_object
- cbz x0, .Lslow_unlock
- add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET // exclusive load/store has no immediate anymore
+ ldr w1, [xSELF, #THREAD_ID_OFFSET]
+ cbz x0, art_quick_unlock_object_no_inline
+ // Exclusive load/store has no immediate anymore.
+ add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET
.Lretry_unlock:
#ifndef USE_READ_BARRIER
- ldr w1, [x4]
+ ldr w2, [x4]
#else
- ldxr w1, [x4] // Need to use atomic instructions for read barrier
+ ldxr w2, [x4] // Need to use atomic instructions for read barrier.
#endif
- lsr w2, w1, LOCK_WORD_STATE_SHIFT
- cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path
- ldr w2, [xSELF, #THREAD_ID_OFFSET]
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- eor w3, w3, w2 // lock_word.ThreadId() ^ self->ThreadId()
- uxth w3, w3 // zero top 16 bits
- cbnz w3, .Lslow_unlock // do lock word and self thread id's match?
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // zero the gc bits
- cmp w3, #LOCK_WORD_THIN_LOCK_COUNT_ONE
- bpl .Lrecursive_thin_unlock
- // transition to unlocked
- and w3, w1, #LOCK_WORD_GC_STATE_MASK_SHIFTED // w3: zero except for the preserved read barrier bits
+ eor w3, w2, w1 // Prepare the value to store if simply locked
+ // (mostly 0s, and preserved read barrier bits),
+ // or prepare to compare thread id for recursive lock check
+ // (lock_word.ThreadId() ^ self->ThreadId()).
+ tst w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits.
+ b.ne .Lnot_simply_locked // Locked recursively or by other thread?
+ // Transition to unlocked.
#ifndef USE_READ_BARRIER
stlr w3, [x4]
#else
- stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier
- cbnz w2, .Lunlock_stxr_fail // store failed, retry
+ stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier.
+ cbnz w2, .Lretry_unlock // If the store failed, retry.
#endif
ret
-.Lrecursive_thin_unlock: // w1: original lock word
- sub w1, w1, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count
+.Lnot_simply_locked:
+ // Check lock word state and thread id together,
+ tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED)
+ b.ne art_quick_unlock_object_no_inline
+ sub w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count
#ifndef USE_READ_BARRIER
- str w1, [x4]
+ str w3, [x4]
#else
- stxr w2, w1, [x4] // Need to use atomic instructions for read barrier
- cbnz w2, .Lunlock_stxr_fail // store failed, retry
+ stxr w2, w3, [x4] // Need to use atomic instructions for read barrier.
+ cbnz w2, .Lretry_unlock // If the store failed, retry.
#endif
ret
-.Lunlock_stxr_fail:
- b .Lretry_unlock // retry
-.Lslow_unlock:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
- mov x1, xSELF // pass Thread::Current
- bl artUnlockObjectFromCode // (Object* obj, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- REFRESH_MARKING_REGISTER
- RETURN_IF_W0_IS_ZERO_OR_DELIVER
END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
+ // This is also the slow path for art_quick_unlock_object.
SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC
mov x1, xSELF // pass Thread::Current
bl artUnlockObjectFromCode // (Object* obj, Thread*)
diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h
index 8e8dde4c4c..f0c6d22ef2 100644
--- a/runtime/arch/code_offset.h
+++ b/runtime/arch/code_offset.h
@@ -21,9 +21,9 @@
#include <android-base/logging.h>
+#include "arch/instruction_set.h"
#include "base/bit_utils.h"
#include "base/macros.h"
-#include "instruction_set.h"
namespace art {
diff --git a/runtime/arch/instruction_set.cc b/runtime/arch/instruction_set.cc
deleted file mode 100644
index b848eb27fc..0000000000
--- a/runtime/arch/instruction_set.cc
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-// Explicitly include our own elf.h to avoid Linux and other dependencies.
-#include "../elf.h"
-#include "android-base/logging.h"
-#include "base/bit_utils.h"
-#include "base/globals.h"
-
-namespace art {
-
-void InstructionSetAbort(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- case InstructionSet::kArm64:
- case InstructionSet::kX86:
- case InstructionSet::kX86_64:
- case InstructionSet::kMips:
- case InstructionSet::kMips64:
- case InstructionSet::kNone:
- LOG(FATAL) << "Unsupported instruction set " << isa;
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
-}
-
-const char* GetInstructionSetString(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- return "arm";
- case InstructionSet::kArm64:
- return "arm64";
- case InstructionSet::kX86:
- return "x86";
- case InstructionSet::kX86_64:
- return "x86_64";
- case InstructionSet::kMips:
- return "mips";
- case InstructionSet::kMips64:
- return "mips64";
- case InstructionSet::kNone:
- return "none";
- }
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
-}
-
-InstructionSet GetInstructionSetFromString(const char* isa_str) {
- CHECK(isa_str != nullptr);
-
- if (strcmp("arm", isa_str) == 0) {
- return InstructionSet::kArm;
- } else if (strcmp("arm64", isa_str) == 0) {
- return InstructionSet::kArm64;
- } else if (strcmp("x86", isa_str) == 0) {
- return InstructionSet::kX86;
- } else if (strcmp("x86_64", isa_str) == 0) {
- return InstructionSet::kX86_64;
- } else if (strcmp("mips", isa_str) == 0) {
- return InstructionSet::kMips;
- } else if (strcmp("mips64", isa_str) == 0) {
- return InstructionSet::kMips64;
- }
-
- return InstructionSet::kNone;
-}
-
-InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
- switch (e_machine) {
- case EM_ARM:
- return InstructionSet::kArm;
- case EM_AARCH64:
- return InstructionSet::kArm64;
- case EM_386:
- return InstructionSet::kX86;
- case EM_X86_64:
- return InstructionSet::kX86_64;
- case EM_MIPS: {
- if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
- (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
- return InstructionSet::kMips;
- } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
- return InstructionSet::kMips64;
- }
- break;
- }
- }
- return InstructionSet::kNone;
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- // Fall-through.
- case InstructionSet::kThumb2:
- return kArmAlignment;
- case InstructionSet::kArm64:
- return kArm64Alignment;
- case InstructionSet::kX86:
- // Fall-through.
- case InstructionSet::kX86_64:
- return kX86Alignment;
- case InstructionSet::kMips:
- // Fall-through.
- case InstructionSet::kMips64:
- return kMipsAlignment;
- case InstructionSet::kNone:
- LOG(FATAL) << "ISA kNone does not have alignment.";
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown ISA " << isa;
- UNREACHABLE();
-}
-
-#if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
- !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
- !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
-#error "Missing defines for stack overflow gap"
-#endif
-
-static constexpr size_t kArmStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm;
-static constexpr size_t kArm64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm64;
-static constexpr size_t kMipsStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips;
-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
-static constexpr size_t kX86StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86;
-static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
-
-static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
-static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes),
- "Mips64 gap not page aligned");
-static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
-static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
- "X86_64 gap not page aligned");
-
-#if !defined(ART_FRAME_SIZE_LIMIT)
-#error "ART frame size limit missing"
-#endif
-
-// TODO: Should we require an extra page (RoundUp(SIZE) + kPageSize)?
-static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes,
- "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMipsStackOverflowReservedBytes,
- "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMips64StackOverflowReservedBytes,
- "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kX86StackOverflowReservedBytes,
- "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
- "Frame size limit too large");
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm: // Intentional fall-through.
- case InstructionSet::kThumb2:
- return kArmStackOverflowReservedBytes;
-
- case InstructionSet::kArm64:
- return kArm64StackOverflowReservedBytes;
-
- case InstructionSet::kMips:
- return kMipsStackOverflowReservedBytes;
-
- case InstructionSet::kMips64:
- return kMips64StackOverflowReservedBytes;
-
- case InstructionSet::kX86:
- return kX86StackOverflowReservedBytes;
-
- case InstructionSet::kX86_64:
- return kX86_64StackOverflowReservedBytes;
-
- case InstructionSet::kNone:
- LOG(FATAL) << "kNone has no stack overflow size";
- UNREACHABLE();
- }
- LOG(FATAL) << "Unknown instruction set" << isa;
- UNREACHABLE();
-}
-
-} // namespace art
diff --git a/runtime/arch/instruction_set.h b/runtime/arch/instruction_set.h
deleted file mode 100644
index 6434005dda..0000000000
--- a/runtime/arch/instruction_set.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
-#define ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
-
-#include <iosfwd>
-#include <string>
-
-#include "base/enums.h"
-#include "base/macros.h"
-
-namespace art {
-
-enum class InstructionSet {
- kNone,
- kArm,
- kArm64,
- kThumb2,
- kX86,
- kX86_64,
- kMips,
- kMips64,
- kLast = kMips64
-};
-std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
-
-#if defined(__arm__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm;
-#elif defined(__aarch64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64;
-#elif defined(__mips__) && !defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips;
-#elif defined(__mips__) && defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64;
-#elif defined(__i386__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86;
-#elif defined(__x86_64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86_64;
-#else
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kNone;
-#endif
-
-// Architecture-specific pointer sizes
-static constexpr PointerSize kArmPointerSize = PointerSize::k32;
-static constexpr PointerSize kArm64PointerSize = PointerSize::k64;
-static constexpr PointerSize kMipsPointerSize = PointerSize::k32;
-static constexpr PointerSize kMips64PointerSize = PointerSize::k64;
-static constexpr PointerSize kX86PointerSize = PointerSize::k32;
-static constexpr PointerSize kX86_64PointerSize = PointerSize::k64;
-
-// ARM instruction alignment. ARM processors require code to be 4-byte aligned,
-// but ARM ELF requires 8..
-static constexpr size_t kArmAlignment = 8;
-
-// ARM64 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kArm64Alignment = 16;
-
-// MIPS instruction alignment. MIPS processors require code to be 4-byte aligned,
-// but 64-bit literals must be 8-byte aligned.
-static constexpr size_t kMipsAlignment = 8;
-
-// X86 instruction alignment. This is the recommended alignment for maximum performance.
-static constexpr size_t kX86Alignment = 16;
-
-// Different than code alignment since code alignment is only first instruction of method.
-static constexpr size_t kThumb2InstructionAlignment = 2;
-static constexpr size_t kArm64InstructionAlignment = 4;
-static constexpr size_t kX86InstructionAlignment = 1;
-static constexpr size_t kX86_64InstructionAlignment = 1;
-static constexpr size_t kMipsInstructionAlignment = 4;
-static constexpr size_t kMips64InstructionAlignment = 4;
-
-const char* GetInstructionSetString(InstructionSet isa);
-
-// Note: Returns kNone when the string cannot be parsed to a known value.
-InstructionSet GetInstructionSetFromString(const char* instruction_set);
-
-InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags);
-
-// Fatal logging out of line to keep the header clean of logging.h.
-NO_RETURN void InstructionSetAbort(InstructionSet isa);
-
-constexpr PointerSize GetInstructionSetPointerSize(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- // Fall-through.
- case InstructionSet::kThumb2:
- return kArmPointerSize;
- case InstructionSet::kArm64:
- return kArm64PointerSize;
- case InstructionSet::kX86:
- return kX86PointerSize;
- case InstructionSet::kX86_64:
- return kX86_64PointerSize;
- case InstructionSet::kMips:
- return kMipsPointerSize;
- case InstructionSet::kMips64:
- return kMips64PointerSize;
-
- case InstructionSet::kNone:
- break;
- }
- InstructionSetAbort(isa);
-}
-
-constexpr size_t GetInstructionSetInstructionAlignment(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- // Fall-through.
- case InstructionSet::kThumb2:
- return kThumb2InstructionAlignment;
- case InstructionSet::kArm64:
- return kArm64InstructionAlignment;
- case InstructionSet::kX86:
- return kX86InstructionAlignment;
- case InstructionSet::kX86_64:
- return kX86_64InstructionAlignment;
- case InstructionSet::kMips:
- return kMipsInstructionAlignment;
- case InstructionSet::kMips64:
- return kMips64InstructionAlignment;
-
- case InstructionSet::kNone:
- break;
- }
- InstructionSetAbort(isa);
-}
-
-constexpr bool IsValidInstructionSet(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- case InstructionSet::kArm64:
- case InstructionSet::kX86:
- case InstructionSet::kX86_64:
- case InstructionSet::kMips:
- case InstructionSet::kMips64:
- return true;
-
- case InstructionSet::kNone:
- return false;
- }
- return false;
-}
-
-size_t GetInstructionSetAlignment(InstructionSet isa);
-
-constexpr bool Is64BitInstructionSet(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- case InstructionSet::kThumb2:
- case InstructionSet::kX86:
- case InstructionSet::kMips:
- return false;
-
- case InstructionSet::kArm64:
- case InstructionSet::kX86_64:
- case InstructionSet::kMips64:
- return true;
-
- case InstructionSet::kNone:
- break;
- }
- InstructionSetAbort(isa);
-}
-
-constexpr PointerSize InstructionSetPointerSize(InstructionSet isa) {
- return Is64BitInstructionSet(isa) ? PointerSize::k64 : PointerSize::k32;
-}
-
-constexpr size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- // Fall-through.
- case InstructionSet::kThumb2:
- return 4;
- case InstructionSet::kArm64:
- return 8;
- case InstructionSet::kX86:
- return 4;
- case InstructionSet::kX86_64:
- return 8;
- case InstructionSet::kMips:
- return 4;
- case InstructionSet::kMips64:
- return 8;
-
- case InstructionSet::kNone:
- break;
- }
- InstructionSetAbort(isa);
-}
-
-constexpr size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
- switch (isa) {
- case InstructionSet::kArm:
- // Fall-through.
- case InstructionSet::kThumb2:
- return 4;
- case InstructionSet::kArm64:
- return 8;
- case InstructionSet::kX86:
- return 8;
- case InstructionSet::kX86_64:
- return 8;
- case InstructionSet::kMips:
- return 4;
- case InstructionSet::kMips64:
- return 8;
-
- case InstructionSet::kNone:
- break;
- }
- InstructionSetAbort(isa);
-}
-
-size_t GetStackOverflowReservedBytes(InstructionSet isa);
-
-// The following definitions create return types for two word-sized entities that will be passed
-// in registers so that memory operations for the interface trampolines can be avoided. The entities
-// are the resolved method and the pointer to the code to be invoked.
-//
-// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
-// uint64_t or long long int.
-//
-// On x86_64, ARM64 and MIPS64, structs are decomposed for allocation, so we can create a structs of
-// two size_t-sized values.
-//
-// We need two operations:
-//
-// 1) A flag value that signals failure. The assembly stubs expect the lower part to be "0".
-// GetTwoWordFailureValue() will return a value that has lower part == 0.
-//
-// 2) A value that combines two word-sized values.
-// GetTwoWordSuccessValue() constructs this.
-//
-// IMPORTANT: If you use this to transfer object pointers, it is your responsibility to ensure
-// that the object does not move or the value is updated. Simple use of this is NOT SAFE
-// when the garbage collector can move objects concurrently. Ensure that required locks
-// are held when using!
-
-#if defined(__i386__) || defined(__arm__) || (defined(__mips__) && !defined(__LP64__))
-typedef uint64_t TwoWordReturn;
-
-// Encodes method_ptr==nullptr and code_ptr==nullptr
-static inline constexpr TwoWordReturn GetTwoWordFailureValue() {
- return 0;
-}
-
-// Use the lower 32b for the method pointer and the upper 32b for the code pointer.
-static inline constexpr TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference");
- uint32_t lo32 = lo;
- uint64_t hi64 = static_cast<uint64_t>(hi);
- return ((hi64 << 32) | lo32);
-}
-
-#elif defined(__x86_64__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
-
-// Note: TwoWordReturn can't be constexpr for 64-bit targets. We'd need a constexpr constructor,
-// which would violate C-linkage in the entrypoint functions.
-
-struct TwoWordReturn {
- uintptr_t lo;
- uintptr_t hi;
-};
-
-// Encodes method_ptr==nullptr. Leaves random value in code pointer.
-static inline TwoWordReturn GetTwoWordFailureValue() {
- TwoWordReturn ret;
- ret.lo = 0;
- return ret;
-}
-
-// Write values into their respective members.
-static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) {
- TwoWordReturn ret;
- ret.lo = lo;
- ret.hi = hi;
- return ret;
-}
-#else
-#error "Unsupported architecture"
-#endif
-
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_INSTRUCTION_SET_H_
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index 5f1a507f7a..c31c927668 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -21,8 +21,8 @@
#include <ostream>
#include <vector>
+#include "arch/instruction_set.h"
#include "base/macros.h"
-#include "instruction_set.h"
namespace art {
diff --git a/runtime/arch/instruction_set_test.cc b/runtime/arch/instruction_set_test.cc
deleted file mode 100644
index 12a117d7a1..0000000000
--- a/runtime/arch/instruction_set_test.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set.h"
-
-#include <gtest/gtest.h>
-
-#include "base/enums.h"
-
-namespace art {
-
-TEST(InstructionSetTest, GetInstructionSetFromString) {
- EXPECT_EQ(InstructionSet::kArm, GetInstructionSetFromString("arm"));
- EXPECT_EQ(InstructionSet::kArm64, GetInstructionSetFromString("arm64"));
- EXPECT_EQ(InstructionSet::kX86, GetInstructionSetFromString("x86"));
- EXPECT_EQ(InstructionSet::kX86_64, GetInstructionSetFromString("x86_64"));
- EXPECT_EQ(InstructionSet::kMips, GetInstructionSetFromString("mips"));
- EXPECT_EQ(InstructionSet::kMips64, GetInstructionSetFromString("mips64"));
- EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("none"));
- EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("random-string"));
-}
-
-TEST(InstructionSetTest, GetInstructionSetString) {
- EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kArm));
- EXPECT_STREQ("arm", GetInstructionSetString(InstructionSet::kThumb2));
- EXPECT_STREQ("arm64", GetInstructionSetString(InstructionSet::kArm64));
- EXPECT_STREQ("x86", GetInstructionSetString(InstructionSet::kX86));
- EXPECT_STREQ("x86_64", GetInstructionSetString(InstructionSet::kX86_64));
- EXPECT_STREQ("mips", GetInstructionSetString(InstructionSet::kMips));
- EXPECT_STREQ("mips64", GetInstructionSetString(InstructionSet::kMips64));
- EXPECT_STREQ("none", GetInstructionSetString(InstructionSet::kNone));
-}
-
-TEST(InstructionSetTest, GetInstructionSetInstructionAlignment) {
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kThumb2),
- kThumb2InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kArm64),
- kArm64InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86),
- kX86InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86_64),
- kX86_64InstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips),
- kMipsInstructionAlignment);
- EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips64),
- kMips64InstructionAlignment);
-}
-
-TEST(InstructionSetTest, TestRoundTrip) {
- EXPECT_EQ(kRuntimeISA, GetInstructionSetFromString(GetInstructionSetString(kRuntimeISA)));
-}
-
-TEST(InstructionSetTest, PointerSize) {
- EXPECT_EQ(kRuntimePointerSize, GetInstructionSetPointerSize(kRuntimeISA));
-}
-
-} // namespace art
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8ab4ce160f..b89d45f617 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1292,7 +1292,7 @@ DEFINE_FUNCTION art_quick_lock_object
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx // test the 2 high bits.
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx // test the 2 high bits.
jne .Lslow_lock // slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
@@ -1362,7 +1362,7 @@ DEFINE_FUNCTION art_quick_unlock_object
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax), %ecx // ecx := lock word
movl %fs:THREAD_ID_OFFSET, %edx // edx := thread id
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index eb945ed366..c179033e6b 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1312,7 +1312,7 @@ DEFINE_FUNCTION art_quick_lock_object
jz .Lslow_lock
.Lretry_lock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word.
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx // Test the 2 high bits.
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx // Test the 2 high bits.
jne .Lslow_lock // Slow path if either of the two high bits are set.
movl %ecx, %edx // save lock word (edx) to keep read barrier bits.
andl LITERAL(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), %ecx // zero the gc bits.
@@ -1362,7 +1362,7 @@ DEFINE_FUNCTION art_quick_unlock_object
.Lretry_unlock:
movl MIRROR_OBJECT_LOCK_WORD_OFFSET(%edi), %ecx // ecx := lock word
movl %gs:THREAD_ID_OFFSET, %edx // edx := thread id
- test LITERAL(LOCK_WORD_STATE_MASK), %ecx
+ test LITERAL(LOCK_WORD_STATE_MASK_SHIFTED), %ecx
jnz .Lslow_unlock // lock word contains a monitor
cmpw %cx, %dx // does the thread id match?
jne .Lslow_unlock
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 87fcb20698..608e33cf65 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -710,6 +710,23 @@ bool ArtMethod::HasAnyCompiledCode() {
return GetOatMethodQuickCode(runtime->GetClassLinker()->GetImagePointerSize()) != nullptr;
}
+void ArtMethod::SetNotIntrinsic() {
+ if (!IsIntrinsic()) {
+ return;
+ }
+
+ // Query the hidden API access flags of the intrinsic.
+ HiddenApiAccessFlags::ApiList intrinsic_api_list = GetHiddenApiAccessFlags();
+
+ // Clear intrinsic-related access flags.
+ ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
+
+ // Re-apply hidden API access flags now that the method is not an intrinsic.
+ SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(GetAccessFlags(), intrinsic_api_list));
+ DCHECK_EQ(GetHiddenApiAccessFlags(), intrinsic_api_list);
+}
+
+
void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
memcpy(reinterpret_cast<void*>(this), reinterpret_cast<const void*>(src),
Size(image_pointer_size));
diff --git a/runtime/art_method.h b/runtime/art_method.h
index acaa4a68a1..012d706756 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -194,9 +194,7 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccIntrinsicBits) >> kAccFlagsShift;
}
- void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_) {
- ClearAccessFlags(kAccIntrinsic | kAccIntrinsicBits);
- }
+ void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsCopied() {
static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
diff --git a/runtime/base/file_utils.cc b/runtime/base/file_utils.cc
deleted file mode 100644
index 537216c198..0000000000
--- a/runtime/base/file_utils.cc
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "file_utils.h"
-
-#include <inttypes.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-// We need dladdr.
-#ifndef __APPLE__
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#define DEFINED_GNU_SOURCE
-#endif
-#include <dlfcn.h>
-#include <libgen.h>
-#ifdef DEFINED_GNU_SOURCE
-#undef _GNU_SOURCE
-#undef DEFINED_GNU_SOURCE
-#endif
-#endif
-
-
-#include <memory>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/bit_utils.h"
-#include "base/globals.h"
-#include "base/os.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
-#include "dex/dex_file_loader.h"
-
-#if defined(__APPLE__)
-#include <crt_externs.h>
-#include <sys/syscall.h>
-#include "AvailabilityMacros.h" // For MAC_OS_X_VERSION_MAX_ALLOWED
-#endif
-
-#if defined(__linux__)
-#include <linux/unistd.h>
-#endif
-
-namespace art {
-
-using android::base::StringAppendF;
-using android::base::StringPrintf;
-
-bool ReadFileToString(const std::string& file_name, std::string* result) {
- File file(file_name, O_RDONLY, false);
- if (!file.IsOpened()) {
- return false;
- }
-
- std::vector<char> buf(8 * KB);
- while (true) {
- int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
- if (n == -1) {
- return false;
- }
- if (n == 0) {
- return true;
- }
- result->append(&buf[0], n);
- }
-}
-
-std::string GetAndroidRootSafe(std::string* error_msg) {
- // Prefer ANDROID_ROOT if it's set.
- const char* android_dir = getenv("ANDROID_ROOT");
- if (android_dir != nullptr) {
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_dir);
- return "";
- }
- return android_dir;
- }
-
- // Check where libart is from, and derive from there. Only do this for non-Mac.
-#ifndef __APPLE__
- {
- Dl_info info;
- if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
- // Make a duplicate of the fname so dirname can modify it.
- UniqueCPtr<char> fname(strdup(info.dli_fname));
-
- char* dir1 = dirname(fname.get()); // This is the lib directory.
- char* dir2 = dirname(dir1); // This is the "system" directory.
- if (OS::DirectoryExists(dir2)) {
- std::string tmp = dir2; // Make a copy here so that fname can be released.
- return tmp;
- }
- }
- }
-#endif
-
- // Try "/system".
- if (!OS::DirectoryExists("/system")) {
- *error_msg = "Failed to find ANDROID_ROOT directory /system";
- return "";
- }
- return "/system";
-}
-
-std::string GetAndroidRoot() {
- std::string error_msg;
- std::string ret = GetAndroidRootSafe(&error_msg);
- if (ret.empty()) {
- LOG(FATAL) << error_msg;
- UNREACHABLE();
- }
- return ret;
-}
-
-
-static const char* GetAndroidDirSafe(const char* env_var,
- const char* default_dir,
- std::string* error_msg) {
- const char* android_dir = getenv(env_var);
- if (android_dir == nullptr) {
- if (OS::DirectoryExists(default_dir)) {
- android_dir = default_dir;
- } else {
- *error_msg = StringPrintf("%s not set and %s does not exist", env_var, default_dir);
- return nullptr;
- }
- }
- if (!OS::DirectoryExists(android_dir)) {
- *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
- return nullptr;
- }
- return android_dir;
-}
-
-static const char* GetAndroidDir(const char* env_var, const char* default_dir) {
- std::string error_msg;
- const char* dir = GetAndroidDirSafe(env_var, default_dir, &error_msg);
- if (dir != nullptr) {
- return dir;
- } else {
- LOG(FATAL) << error_msg;
- return nullptr;
- }
-}
-
-const char* GetAndroidData() {
- return GetAndroidDir("ANDROID_DATA", "/data");
-}
-
-const char* GetAndroidDataSafe(std::string* error_msg) {
- return GetAndroidDirSafe("ANDROID_DATA", "/data", error_msg);
-}
-
-std::string GetDefaultBootImageLocation(std::string* error_msg) {
- std::string android_root = GetAndroidRootSafe(error_msg);
- if (android_root.empty()) {
- return "";
- }
- return StringPrintf("%s/framework/boot.art", android_root.c_str());
-}
-
-void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
- CHECK(subdir != nullptr);
- std::string error_msg;
- const char* android_data = GetAndroidDataSafe(&error_msg);
- if (android_data == nullptr) {
- *have_android_data = false;
- *dalvik_cache_exists = false;
- *is_global_cache = false;
- return;
- } else {
- *have_android_data = true;
- }
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- *dalvik_cache = dalvik_cache_root + subdir;
- *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
- *is_global_cache = strcmp(android_data, "/data") == 0;
- if (create_if_absent && !*dalvik_cache_exists && !*is_global_cache) {
- // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
- *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
- (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
- }
-}
-
-std::string GetDalvikCache(const char* subdir) {
- CHECK(subdir != nullptr);
- const char* android_data = GetAndroidData();
- const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
- const std::string dalvik_cache = dalvik_cache_root + subdir;
- if (!OS::DirectoryExists(dalvik_cache.c_str())) {
- // TODO: Check callers. Traditional behavior is to not abort.
- return "";
- }
- return dalvik_cache;
-}
-
-bool GetDalvikCacheFilename(const char* location, const char* cache_location,
- std::string* filename, std::string* error_msg) {
- if (location[0] != '/') {
- *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
- return false;
- }
- std::string cache_file(&location[1]); // skip leading slash
- if (!android::base::EndsWith(location, ".dex") &&
- !android::base::EndsWith(location, ".art") &&
- !android::base::EndsWith(location, ".oat")) {
- cache_file += "/";
- cache_file += DexFileLoader::kClassesDex;
- }
- std::replace(cache_file.begin(), cache_file.end(), '/', '@');
- *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
- return true;
-}
-
-std::string GetVdexFilename(const std::string& oat_location) {
- return ReplaceFileExtension(oat_location, "vdex");
-}
-
-static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
- // in = /foo/bar/baz
- // out = /foo/bar/<isa>/baz
- size_t pos = filename->rfind('/');
- CHECK_NE(pos, std::string::npos) << *filename << " " << isa;
- filename->insert(pos, "/", 1);
- filename->insert(pos + 1, GetInstructionSetString(isa));
-}
-
-std::string GetSystemImageFilename(const char* location, const InstructionSet isa) {
- // location = /system/framework/boot.art
- // filename = /system/framework/<isa>/boot.art
- std::string filename(location);
- InsertIsaDirectory(isa, &filename);
- return filename;
-}
-
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension) {
- const size_t last_ext = filename.find_last_of("./");
- if (last_ext == std::string::npos || filename[last_ext] != '.') {
- return filename + "." + new_extension;
- } else {
- return filename.substr(0, last_ext + 1) + new_extension;
- }
-}
-
-bool LocationIsOnSystem(const char* path) {
- UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
- return path != nullptr && android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
-}
-
-bool LocationIsOnSystemFramework(const char* full_path) {
- std::string error_msg;
- std::string root_path = GetAndroidRootSafe(&error_msg);
- if (root_path.empty()) {
- // Could not find Android root.
- // TODO(dbrazdil): change to stricter GetAndroidRoot() once b/76452688 is resolved.
- return false;
- }
- std::string framework_path = root_path + "/framework/";
- return android::base::StartsWith(full_path, framework_path);
-}
-
-} // namespace art
diff --git a/runtime/base/file_utils.h b/runtime/base/file_utils.h
deleted file mode 100644
index d4f6c576c0..0000000000
--- a/runtime/base/file_utils.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_FILE_UTILS_H_
-#define ART_RUNTIME_BASE_FILE_UTILS_H_
-
-#include <stdlib.h>
-
-#include <string>
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set.h"
-
-namespace art {
-
-bool ReadFileToString(const std::string& file_name, std::string* result);
-
-// Find $ANDROID_ROOT, /system, or abort.
-std::string GetAndroidRoot();
-// Find $ANDROID_ROOT, /system, or return an empty string.
-std::string GetAndroidRootSafe(std::string* error_msg);
-
-// Find $ANDROID_DATA, /data, or abort.
-const char* GetAndroidData();
-// Find $ANDROID_DATA, /data, or return null.
-const char* GetAndroidDataSafe(std::string* error_msg);
-
-// Returns the default boot image location (ANDROID_ROOT/framework/boot.art).
-// Returns an empty string if ANDROID_ROOT is not set.
-std::string GetDefaultBootImageLocation(std::string* error_msg);
-
-// Returns the dalvik-cache location, with subdir appended. Returns the empty string if the cache
-// could not be found.
-std::string GetDalvikCache(const char* subdir);
-// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
-// have_android_data will be set to true if we have an ANDROID_DATA that exists,
-// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
-// The flag is_global_cache tells whether this cache is /data/dalvik-cache.
-void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
- bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache);
-
-// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
-// rooted at cache_location.
-bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
- std::string* filename, std::string* error_msg);
-
-// Returns the system location for an image
-std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-
-// Returns the vdex filename for the given oat filename.
-std::string GetVdexFilename(const std::string& oat_filename);
-
-// Returns `filename` with the text after the last occurrence of '.' replaced with
-// `extension`. If `filename` does not contain a period, returns a string containing `filename`,
-// a period, and `new_extension`.
-// Example: ReplaceFileExtension("foo.bar", "abc") == "foo.abc"
-// ReplaceFileExtension("foo", "abc") == "foo.abc"
-std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
-
-// Return whether the location is on system (i.e. android root).
-bool LocationIsOnSystem(const char* location);
-
-// Return whether the location is on system/framework (i.e. android_root/framework).
-bool LocationIsOnSystemFramework(const char* location);
-
-} // namespace art
-
-#endif // ART_RUNTIME_BASE_FILE_UTILS_H_
diff --git a/runtime/base/file_utils_test.cc b/runtime/base/file_utils_test.cc
deleted file mode 100644
index e74dfe5e64..0000000000
--- a/runtime/base/file_utils_test.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/file_utils.h"
-
-#include <libgen.h>
-#include <stdlib.h>
-
-#include "base/stl_util.h"
-#include "common_runtime_test.h"
-
-namespace art {
-
-class FileUtilsTest : public CommonRuntimeTest {};
-
-TEST_F(FileUtilsTest, GetDalvikCacheFilename) {
- std::string name;
- std::string error;
-
- EXPECT_TRUE(GetDalvikCacheFilename("/system/app/Foo.apk", "/foo", &name, &error)) << error;
- EXPECT_EQ("/foo/system@app@Foo.apk@classes.dex", name);
-
- EXPECT_TRUE(GetDalvikCacheFilename("/data/app/foo-1.apk", "/foo", &name, &error)) << error;
- EXPECT_EQ("/foo/data@app@foo-1.apk@classes.dex", name);
-
- EXPECT_TRUE(GetDalvikCacheFilename("/system/framework/core.jar", "/foo", &name, &error)) << error;
- EXPECT_EQ("/foo/system@framework@core.jar@classes.dex", name);
-
- EXPECT_TRUE(GetDalvikCacheFilename("/system/framework/boot.art", "/foo", &name, &error)) << error;
- EXPECT_EQ("/foo/system@framework@boot.art", name);
-
- EXPECT_TRUE(GetDalvikCacheFilename("/system/framework/boot.oat", "/foo", &name, &error)) << error;
- EXPECT_EQ("/foo/system@framework@boot.oat", name);
-}
-
-TEST_F(FileUtilsTest, GetDalvikCache) {
- EXPECT_STREQ("", GetDalvikCache("should-not-exist123").c_str());
-
- EXPECT_STREQ((android_data_ + "/dalvik-cache/.").c_str(), GetDalvikCache(".").c_str());
-}
-
-
-TEST_F(FileUtilsTest, GetSystemImageFilename) {
- EXPECT_STREQ("/system/framework/arm/boot.art",
- GetSystemImageFilename("/system/framework/boot.art", InstructionSet::kArm).c_str());
-}
-
-TEST_F(FileUtilsTest, GetAndroidRootSafe) {
- std::string error_msg;
-
- // We don't expect null returns for most cases, so don't check and let std::string crash.
-
- // CommonRuntimeTest sets ANDROID_ROOT, so expect this to be the same.
- std::string android_root = GetAndroidRootSafe(&error_msg);
- std::string android_root_env = getenv("ANDROID_ROOT");
- EXPECT_EQ(android_root, android_root_env);
-
- // Set ANDROID_ROOT to something else (but the directory must exist). So use dirname.
- char* root_dup = strdup(android_root_env.c_str());
- char* dir = dirname(root_dup);
- ASSERT_EQ(0, setenv("ANDROID_ROOT", dir, 1 /* overwrite */));
- std::string android_root2 = GetAndroidRootSafe(&error_msg);
- EXPECT_STREQ(dir, android_root2.c_str());
- free(root_dup);
-
- // Set a bogus value for ANDROID_ROOT. This should be an error.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", 1 /* overwrite */));
- EXPECT_TRUE(GetAndroidRootSafe(&error_msg) == nullptr);
-
- // Unset ANDROID_ROOT and see that it still returns something (as libart code is running).
- ASSERT_EQ(0, unsetenv("ANDROID_ROOT"));
- std::string android_root3 = GetAndroidRootSafe(&error_msg);
- // This should be the same as the other root (modulo realpath), otherwise the test setup is
- // broken.
- UniqueCPtr<char> real_root(realpath(android_root.c_str(), nullptr));
- UniqueCPtr<char> real_root3(realpath(android_root3.c_str(), nullptr));
- EXPECT_STREQ(real_root.get(), real_root3.get());
-
-
- // Reset ANDROID_ROOT, as other things may depend on it.
- ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), 1 /* overwrite */));
-}
-
-TEST_F(FileUtilsTest, ReplaceFileExtension) {
- EXPECT_EQ("/directory/file.vdex", ReplaceFileExtension("/directory/file.oat", "vdex"));
- EXPECT_EQ("/.directory/file.vdex", ReplaceFileExtension("/.directory/file.oat", "vdex"));
- EXPECT_EQ("/directory/file.vdex", ReplaceFileExtension("/directory/file", "vdex"));
- EXPECT_EQ("/.directory/file.vdex", ReplaceFileExtension("/.directory/file", "vdex"));
-}
-
-} // namespace art
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index e2ad7fd83f..6917899bff 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -64,20 +64,19 @@ class CheckReferenceMapVisitor : public StackVisitor {
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ CodeInfo code_info(GetCurrentOatQuickMethodHeader());
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK_LT(reg, accessor.RegistersSize());
DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
- reg, number_of_dex_registers, code_info, encoding);
+ reg, number_of_dex_registers, code_info);
switch (location.GetKind()) {
case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4141a37366..b88aa5e07a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -828,9 +828,24 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b
return true;
}
+static void CreateStringInitBindings(Thread* self, ClassLinker* class_linker)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Find String.<init> -> StringFactory bindings.
+ ObjPtr<mirror::Class> string_factory_class =
+ class_linker->FindSystemClass(self, "Ljava/lang/StringFactory;");
+ CHECK(string_factory_class != nullptr);
+ ObjPtr<mirror::Class> string_class =
+ class_linker->GetClassRoot(ClassLinker::ClassRoot::kJavaLangString);
+ WellKnownClasses::InitStringInit(string_class, string_factory_class);
+ // Update the primordial thread.
+ self->InitStringEntryPoints();
+}
+
void ClassLinker::FinishInit(Thread* self) {
VLOG(startup) << "ClassLinker::FinishInit entering";
+ CreateStringInitBindings(self, this);
+
// Let the heap know some key offsets into java.lang.ref instances
// Note: we hard code the field indexes here rather than using FindInstanceField
// as the types of the field can't be resolved prior to the runtime being
@@ -4850,6 +4865,9 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
const uint32_t field_idx = field->GetDexFieldIndex();
ArtField* resolved_field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
if (resolved_field == nullptr) {
+ // Populating cache of a dex file which defines `klass` should always be allowed.
+ DCHECK_EQ(hiddenapi::GetMemberAction(
+ field, class_loader.Get(), dex_cache.Get(), hiddenapi::kNone), hiddenapi::kAllow);
dex_cache->SetResolvedField(field_idx, field, image_pointer_size_);
} else {
DCHECK_EQ(field, resolved_field);
@@ -8053,26 +8071,8 @@ ArtField* ClassLinker::LookupResolvedField(uint32_t field_idx,
return nullptr;
}
DCHECK(klass->IsResolved());
- Thread* self = is_static ? Thread::Current() : nullptr;
- // First try to find a field declared directly by `klass` by the field index.
- ArtField* resolved_field = is_static
- ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
- : klass->FindInstanceField(dex_cache, field_idx);
-
- if (resolved_field == nullptr) {
- // If not found in `klass` by field index, search the class hierarchy using the name and type.
- const char* name = dex_file.GetFieldName(field_id);
- const char* type = dex_file.GetFieldTypeDescriptor(field_id);
- resolved_field = is_static
- ? mirror::Class::FindStaticField(self, klass, name, type)
- : klass->FindInstanceField(name, type);
- }
-
- if (resolved_field != nullptr) {
- dex_cache->SetResolvedField(field_idx, resolved_field, image_pointer_size_);
- }
- return resolved_field;
+ return FindResolvedField(klass, dex_cache, class_loader, field_idx, is_static);
}
ArtField* ClassLinker::ResolveField(uint32_t field_idx,
@@ -8087,39 +8087,18 @@ ArtField* ClassLinker::ResolveField(uint32_t field_idx,
}
const DexFile& dex_file = *dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
- Thread* const self = Thread::Current();
ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
- if (is_static) {
- resolved = mirror::Class::FindStaticField(self, klass, dex_cache.Get(), field_idx);
- } else {
- resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
- }
-
+ resolved = FindResolvedField(klass, dex_cache.Get(), class_loader.Get(), field_idx, is_static);
if (resolved == nullptr) {
const char* name = dex_file.GetFieldName(field_id);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
- if (is_static) {
- resolved = mirror::Class::FindStaticField(self, klass, name, type);
- } else {
- resolved = klass->FindInstanceField(name, type);
- }
- }
-
- if (resolved == nullptr ||
- hiddenapi::GetMemberAction(
- resolved, class_loader.Get(), dex_cache.Get(), hiddenapi::kLinking) == hiddenapi::kDeny) {
- const char* name = dex_file.GetFieldName(field_id);
- const char* type = dex_file.GetFieldTypeDescriptor(field_id);
ThrowNoSuchFieldError(is_static ? "static " : "instance ", klass, type, name);
- return nullptr;
}
-
- dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
return resolved;
}
@@ -8134,32 +8113,83 @@ ArtField* ClassLinker::ResolveFieldJLS(uint32_t field_idx,
}
const DexFile& dex_file = *dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
- Thread* self = Thread::Current();
ObjPtr<mirror::Class> klass = ResolveType(field_id.class_idx_, dex_cache, class_loader);
if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
return nullptr;
}
- StringPiece name(dex_file.GetFieldName(field_id));
- StringPiece type(dex_file.GetFieldTypeDescriptor(field_id));
+ resolved = FindResolvedFieldJLS(klass, dex_cache.Get(), class_loader.Get(), field_idx);
+ if (resolved == nullptr) {
+ const char* name = dex_file.GetFieldName(field_id);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ ThrowNoSuchFieldError("", klass, type, name);
+ }
+ return resolved;
+}
+
+ArtField* ClassLinker::FindResolvedField(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ uint32_t field_idx,
+ bool is_static) {
+ ArtField* resolved = nullptr;
+ Thread* self = is_static ? Thread::Current() : nullptr;
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+
+ resolved = is_static ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
+ : klass->FindInstanceField(dex_cache, field_idx);
+
+ if (resolved == nullptr) {
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ const char* name = dex_file.GetFieldName(field_id);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ resolved = is_static ? mirror::Class::FindStaticField(self, klass, name, type)
+ : klass->FindInstanceField(name, type);
+ }
+
+ if (resolved != nullptr &&
+ hiddenapi::GetMemberAction(
+ resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
+ resolved = nullptr;
+ }
+
+ if (resolved != nullptr) {
+ dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
+ }
+
+ return resolved;
+}
+
+ArtField* ClassLinker::FindResolvedFieldJLS(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ uint32_t field_idx) {
+ ArtField* resolved = nullptr;
+ Thread* self = Thread::Current();
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+
+ const char* name = dex_file.GetFieldName(field_id);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
resolved = mirror::Class::FindField(self, klass, name, type);
+
if (resolved != nullptr &&
hiddenapi::GetMemberAction(
- resolved, class_loader.Get(), dex_cache.Get(), hiddenapi::kLinking) == hiddenapi::kDeny) {
+ resolved, class_loader, dex_cache, hiddenapi::kLinking) == hiddenapi::kDeny) {
resolved = nullptr;
}
+
if (resolved != nullptr) {
dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
- } else {
- ThrowNoSuchFieldError("", klass, type, name);
}
+
return resolved;
}
ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType(
Thread* self,
- uint32_t proto_idx,
+ dex::ProtoIndex proto_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
@@ -8221,7 +8251,7 @@ ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType(
}
ObjPtr<mirror::MethodType> ClassLinker::ResolveMethodType(Thread* self,
- uint32_t proto_idx,
+ dex::ProtoIndex proto_idx,
ArtMethod* referrer) {
StackHandleScope<2> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index e935d1dfb8..52ecf82c86 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -392,17 +392,38 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
+ // Find a field with a given ID from the DexFile associated with the given DexCache
+ // and ClassLoader, storing the result in DexCache. The declaring class is assumed
+ // to have been already resolved into `klass`. The `is_static` argument is used to
+ // determine if we are resolving a static or non-static field.
+ ArtField* FindResolvedField(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ uint32_t field_idx,
+ bool is_static)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Find a field with a given ID from the DexFile associated with the given DexCache
+ // and ClassLoader, storing the result in DexCache. The declaring class is assumed
+ // to have been already resolved into `klass`. No is_static argument is provided
+ // so that Java field resolution semantics are followed.
+ ArtField* FindResolvedFieldJLS(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ uint32_t field_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Resolve a method type with a given ID from the DexFile associated with a given DexCache
// and ClassLoader, storing the result in the DexCache.
ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
- uint32_t proto_idx,
+ dex::ProtoIndex proto_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
- uint32_t proto_idx,
+ dex::ProtoIndex proto_idx,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 37e074d552..9c2a40b50d 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -29,6 +29,7 @@
#include "instrumentation.h"
#include "interpreter/shadow_frame.h"
#include "interpreter/unstarted_runtime.h"
+#include "jvalue-inl.h"
#include "mirror/class.h"
#include "mirror/object.h"
#include "obj_ptr-inl.h"
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 8fd95ed890..657a78bd2f 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -880,11 +880,14 @@ void ThrowVerifyError(ObjPtr<mirror::Class> referrer, const char* fmt, ...) {
void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> expected_type,
ObjPtr<mirror::MethodType> actual_type) {
- ThrowException("Ljava/lang/invoke/WrongMethodTypeException;",
- nullptr,
- StringPrintf("Expected %s but was %s",
- expected_type->PrettyDescriptor().c_str(),
- actual_type->PrettyDescriptor().c_str()).c_str());
+ ThrowWrongMethodTypeException(expected_type->PrettyDescriptor(), actual_type->PrettyDescriptor());
+}
+
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+ const std::string& actual_descriptor) {
+ std::ostringstream msg;
+ msg << "Expected " << expected_descriptor << " but was " << actual_descriptor;
+ ThrowException("Ljava/lang/invoke/WrongMethodTypeException;", nullptr, msg.str().c_str());
}
} // namespace art
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 29a056e9ea..6acff6f222 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -274,6 +274,10 @@ void ThrowWrongMethodTypeException(ObjPtr<mirror::MethodType> callee_type,
ObjPtr<mirror::MethodType> callsite_type)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+void ThrowWrongMethodTypeException(const std::string& expected_descriptor,
+ const std::string& actual_descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
} // namespace art
#endif // ART_RUNTIME_COMMON_THROWS_H_
diff --git a/runtime/dex/art_dex_file_loader.cc b/runtime/dex/art_dex_file_loader.cc
deleted file mode 100644
index 392ce1e7f5..0000000000
--- a/runtime/dex/art_dex_file_loader.cc
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "art_dex_file_loader.h"
-
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#include <sys/stat.h>
-
-#include "android-base/stringprintf.h"
-
-#include "base/file_magic.h"
-#include "base/file_utils.h"
-#include "base/stl_util.h"
-#include "base/systrace.h"
-#include "base/unix_file/fd_file.h"
-#include "base/zip_archive.h"
-#include "dex/compact_dex_file.h"
-#include "dex/dex_file.h"
-#include "dex/dex_file_verifier.h"
-#include "dex/standard_dex_file.h"
-
-namespace art {
-
-namespace {
-
-class MemMapContainer : public DexFileContainer {
- public:
- explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
- virtual ~MemMapContainer() OVERRIDE { }
-
- int GetPermissions() OVERRIDE {
- if (mem_map_.get() == nullptr) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
- }
-
- bool IsReadOnly() OVERRIDE {
- return GetPermissions() == PROT_READ;
- }
-
- bool EnableWrite() OVERRIDE {
- CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
- }
-
- bool DisableWrite() OVERRIDE {
- CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ);
- }
- }
-
- private:
- std::unique_ptr<MemMap> mem_map_;
- DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
-};
-
-} // namespace
-
-using android::base::StringPrintf;
-
-static constexpr OatDexFile* kNoOatDexFile = nullptr;
-
-
-bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd,
- bool* zip_file_only_contains_uncompressed_dex) const {
- CHECK(checksums != nullptr);
- uint32_t magic;
-
- File fd;
- if (zip_fd != -1) {
- if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
- fd = File(zip_fd, false /* check_usage */);
- }
- } else {
- fd = OpenAndReadMagic(filename, &magic, error_msg);
- }
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(
- ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
- if (zip_archive.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
- error_msg->c_str());
- return false;
- }
-
- uint32_t i = 0;
- std::string zip_entry_name = GetMultiDexClassesDexName(i++);
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
- zip_entry_name.c_str(), error_msg->c_str());
- return false;
- }
-
- if (zip_file_only_contains_uncompressed_dex != nullptr) {
- // Start by assuming everything is uncompressed.
- *zip_file_only_contains_uncompressed_dex = true;
- }
-
- do {
- if (zip_file_only_contains_uncompressed_dex != nullptr) {
- if (!(zip_entry->IsUncompressed() && zip_entry->IsAlignedTo(alignof(DexFile::Header)))) {
- *zip_file_only_contains_uncompressed_dex = false;
- }
- }
- checksums->push_back(zip_entry->GetCrc32());
- zip_entry_name = GetMultiDexClassesDexName(i++);
- zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- } while (zip_entry.get() != nullptr);
- return true;
- }
- if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
- filename,
- /* verify */ false,
- /* verify_checksum */ false,
- /* mmap_shared */ false,
- error_msg));
- if (dex_file == nullptr) {
- return false;
- }
- checksums->push_back(dex_file->GetHeader().checksum_);
- return true;
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
-std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
- ScopedTrace trace(std::string("Open dex file from RAM ") + location);
- return OpenCommon(base,
- size,
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
- location,
- location_checksum,
- oat_dex_file,
- verify,
- verify_checksum,
- error_msg,
- /*container*/ nullptr,
- /*verify_result*/ nullptr);
-}
-
-std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
- ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
- location,
- location_checksum,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
- // Opening CompactDex is only supported from vdex files.
- if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
- *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
- location.c_str());
- return nullptr;
- }
- return dex_file;
-}
-
-bool ArtDexFileLoader::Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
- uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
- }
- if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
- location,
- verify,
- verify_checksum,
- /* mmap_shared */ false,
- error_msg));
- if (dex_file.get() != nullptr) {
- dex_files->push_back(std::move(dex_file));
- return true;
- } else {
- return false;
- }
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
-std::unique_ptr<const DexFile> ArtDexFileLoader::OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- bool mmap_shared,
- std::string* error_msg) const {
- ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, verify, verify_checksum, mmap_shared, error_msg);
-}
-
-bool ArtDexFileLoader::OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
- ScopedTrace trace("Dex file open Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
- if (zip_archive.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return false;
- }
- return OpenAllDexFilesFromZip(
- *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
-}
-
-std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- bool mmap_shared,
- std::string* error_msg) const {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
- {
- File delayed_close(fd, /* check_usage */ false);
- struct stat sbuf;
- memset(&sbuf, 0, sizeof(sbuf));
- if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
- strerror(errno));
- return nullptr;
- }
- if (S_ISDIR(sbuf.st_mode)) {
- *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
- return nullptr;
- }
- size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- mmap_shared ? MAP_SHARED : MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- }
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
- location,
- dex_header->checksum_,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- std::make_unique<MemMapContainer>(std::move(map)),
- /*verify_result*/ nullptr);
-
- // Opening CompactDex is only supported from vdex files.
- if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
- *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
- location.c_str());
- return nullptr;
- }
- return dex_file;
-}
-
-std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
- const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) const {
- ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry == nullptr) {
- *error_code = ZipOpenErrorCode::kEntryNotFound;
- return nullptr;
- }
- if (zip_entry->GetUncompressedLength() == 0) {
- *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
- return nullptr;
- }
-
- std::unique_ptr<MemMap> map;
- if (zip_entry->IsUncompressed()) {
- if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
- // Do not mmap unaligned ZIP entries because
- // doing so would fail dex verification which requires 4 byte alignment.
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
- << "Falling back to extracting file.";
- } else {
- // Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "is your ZIP file corrupted? Falling back to extraction.";
- // Try again with Extraction which still has a chance of recovery.
- }
- }
- }
-
- if (map == nullptr) {
- // Default path for compressed ZIP entries,
- // and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- }
-
- if (map == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
- error_msg->c_str());
- *error_code = ZipOpenErrorCode::kExtractToMemoryError;
- return nullptr;
- }
- VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- /*data_base*/ nullptr,
- /*data_size*/ 0u,
- location,
- zip_entry->GetCrc32(),
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- std::make_unique<MemMapContainer>(std::move(map)),
- &verify_result);
- if (dex_file != nullptr && dex_file->IsCompactDexFile()) {
- *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files",
- location.c_str());
- return nullptr;
- }
- if (dex_file == nullptr) {
- if (verify_result == VerifyResult::kVerifyNotAttempted) {
- *error_code = ZipOpenErrorCode::kDexFileError;
- } else {
- *error_code = ZipOpenErrorCode::kVerifyError;
- }
- return nullptr;
- }
- if (!dex_file->DisableWrite()) {
- *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
- *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
- return nullptr;
- }
- CHECK(dex_file->IsReadOnly()) << location;
- if (verify_result != VerifyResult::kVerifySucceeded) {
- *error_code = ZipOpenErrorCode::kVerifyError;
- return nullptr;
- }
- *error_code = ZipOpenErrorCode::kNoError;
- return dex_file;
-}
-
-// Technically we do not have a limitation with respect to the number of dex files that can be in a
-// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
-// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
-// seems an excessive number.
-static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-
-bool ArtDexFileLoader::OpenAllDexFilesFromZip(
- const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
- ScopedTrace trace("Dex file open from Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
- ZipOpenErrorCode error_code;
- std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
- kClassesDex,
- location,
- verify,
- verify_checksum,
- error_msg,
- &error_code));
- if (dex_file.get() == nullptr) {
- return false;
- } else {
- // Had at least classes.dex.
- dex_files->push_back(std::move(dex_file));
-
- // Now try some more.
-
- // We could try to avoid std::string allocations by working on a char array directly. As we
- // do not expect a lot of iterations, this seems too involved and brittle.
-
- for (size_t i = 1; ; ++i) {
- std::string name = GetMultiDexClassesDexName(i);
- std::string fake_location = GetMultiDexLocation(i, location.c_str());
- std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
- name.c_str(),
- fake_location,
- verify,
- verify_checksum,
- error_msg,
- &error_code));
- if (next_dex_file.get() == nullptr) {
- if (error_code != ZipOpenErrorCode::kEntryNotFound) {
- LOG(WARNING) << "Zip open failed: " << *error_msg;
- }
- break;
- } else {
- dex_files->push_back(std::move(next_dex_file));
- }
-
- if (i == kWarnOnManyDexFilesThreshold) {
- LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
- << " dex files. Please consider coalescing and shrinking the number to "
- " avoid runtime overhead.";
- }
-
- if (i == std::numeric_limits<size_t>::max()) {
- LOG(ERROR) << "Overflow in number of dex files!";
- break;
- }
- }
-
- return true;
- }
-}
-
-std::unique_ptr<DexFile> ArtDexFileLoader::OpenCommon(const uint8_t* base,
- size_t size,
- const uint8_t* data_base,
- size_t data_size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::unique_ptr<DexFileContainer> container,
- VerifyResult* verify_result) {
- std::unique_ptr<DexFile> dex_file = DexFileLoader::OpenCommon(base,
- size,
- data_base,
- data_size,
- location,
- location_checksum,
- oat_dex_file,
- verify,
- verify_checksum,
- error_msg,
- std::move(container),
- verify_result);
-
- // Check if this dex file is located in the framework directory.
- // If it is, set a flag on the dex file. This is used by hidden API
- // policy decision logic.
- // Location can contain multidex suffix, so fetch its canonical version. Note
- // that this will call `realpath`.
- std::string path = DexFileLoader::GetDexCanonicalLocation(location.c_str());
- if (dex_file != nullptr && LocationIsOnSystemFramework(path.c_str())) {
- dex_file->SetIsPlatformDexFile();
- }
-
- return dex_file;
-}
-
-} // namespace art
diff --git a/runtime/dex/art_dex_file_loader.h b/runtime/dex/art_dex_file_loader.h
deleted file mode 100644
index 7577945632..0000000000
--- a/runtime/dex/art_dex_file_loader.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
-#define ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
-
-#include <cstdint>
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "base/macros.h"
-#include "dex/dex_file_loader.h"
-
-namespace art {
-
-class DexFile;
-class DexFileContainer;
-class MemMap;
-class OatDexFile;
-class ZipArchive;
-
-// Class that is used to open dex files and deal with corresponding multidex and location logic.
-class ArtDexFileLoader : public DexFileLoader {
- public:
- virtual ~ArtDexFileLoader() { }
-
- // Returns the checksums of a file for comparison with GetLocationChecksum().
- // For .dex files, this is the single header checksum.
- // For zip files, this is the zip entry CRC32 checksum for classes.dex and
- // each additional multidex entry classes2.dex, classes3.dex, etc.
- // If a valid zip_fd is provided the file content will be read directly from
- // the descriptor and `filename` will be used as alias for error logging. If
- // zip_fd is -1, the method will try to open the `filename` and read the
- // content from it.
- // Return true if the checksums could be found, false otherwise.
- bool GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd = -1,
- bool* only_contains_uncompressed_dex = nullptr) const OVERRIDE;
-
- // Opens .dex file, backed by existing memory
- std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const OVERRIDE;
-
- // Opens .dex file that has been memory-mapped by the caller.
- std::unique_ptr<const DexFile> Open(const std::string& location,
- uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const;
-
- // Opens all .dex files found in the file, guessing the container format based on file extension.
- bool Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
-
- // Open a single dex file from an fd. This function closes the fd.
- std::unique_ptr<const DexFile> OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- bool mmap_shared,
- std::string* error_msg) const;
-
- // Opens dex files from within a .jar, .zip, or .apk file
- bool OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
-
- private:
- std::unique_ptr<const DexFile> OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- bool mmap_shared,
- std::string* error_msg) const;
-
- // Open all classesXXX.dex files from a zip archive.
- bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
-
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
- // return.
- std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) const;
-
- static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
- size_t size,
- const uint8_t* data_base,
- size_t data_size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::unique_ptr<DexFileContainer> container,
- VerifyResult* verify_result);
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
diff --git a/runtime/dex/art_dex_file_loader_test.cc b/runtime/dex/art_dex_file_loader_test.cc
deleted file mode 100644
index 274a6df702..0000000000
--- a/runtime/dex/art_dex_file_loader_test.cc
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sys/mman.h>
-
-#include <fstream>
-#include <memory>
-
-#include "art_dex_file_loader.h"
-#include "base/file_utils.h"
-#include "base/mem_map.h"
-#include "base/os.h"
-#include "base/stl_util.h"
-#include "base/unix_file/fd_file.h"
-#include "common_runtime_test.h"
-#include "dex/base64_test_util.h"
-#include "dex/code_item_accessors-inl.h"
-#include "dex/descriptors_names.h"
-#include "dex/dex_file.h"
-#include "dex/dex_file-inl.h"
-#include "dex/dex_file_loader.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread-current-inl.h"
-
-namespace art {
-
-static void Copy(const std::string& src, const std::string& dst) {
- std::ifstream src_stream(src, std::ios::binary);
- std::ofstream dst_stream(dst, std::ios::binary);
- dst_stream << src_stream.rdbuf();
-}
-
-class ArtDexFileLoaderTest : public CommonRuntimeTest {
- public:
- virtual void SetUp() {
- CommonRuntimeTest::SetUp();
-
- std::string dex_location = GetTestDexFileName("Main");
- std::string multidex_location = GetTestDexFileName("MultiDex");
-
- data_location_path_ = android_data_ + "/foo.jar";
- system_location_path_ = GetAndroidRoot() + "/foo.jar";
- system_framework_location_path_ = GetAndroidRoot() + "/framework/foo.jar";
- data_multi_location_path_ = android_data_ + "/multifoo.jar";
- system_multi_location_path_ = GetAndroidRoot() + "/multifoo.jar";
- system_framework_multi_location_path_ = GetAndroidRoot() + "/framework/multifoo.jar";
-
- Copy(dex_location, data_location_path_);
- Copy(dex_location, system_location_path_);
- Copy(dex_location, system_framework_location_path_);
-
- Copy(multidex_location, data_multi_location_path_);
- Copy(multidex_location, system_multi_location_path_);
- Copy(multidex_location, system_framework_multi_location_path_);
- }
-
- virtual void TearDown() {
- remove(data_location_path_.c_str());
- remove(system_location_path_.c_str());
- remove(system_framework_location_path_.c_str());
- remove(data_multi_location_path_.c_str());
- remove(system_multi_location_path_.c_str());
- remove(system_framework_multi_location_path_.c_str());
- CommonRuntimeTest::TearDown();
- }
-
- protected:
- std::string data_location_path_;
- std::string system_location_path_;
- std::string system_framework_location_path_;
- std::string data_multi_location_path_;
- std::string system_multi_location_path_;
- std::string system_framework_multi_location_path_;
-};
-
-// TODO: Port OpenTestDexFile(s) need to be ported to use non-ART utilities, and
-// the tests that depend upon them should be moved to dex_file_loader_test.cc
-
-TEST_F(ArtDexFileLoaderTest, Open) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<const DexFile> dex(OpenTestDexFile("Nested"));
- ASSERT_TRUE(dex.get() != nullptr);
-}
-
-TEST_F(ArtDexFileLoaderTest, GetLocationChecksum) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<const DexFile> raw(OpenTestDexFile("Main"));
- EXPECT_NE(raw->GetHeader().checksum_, raw->GetLocationChecksum());
-}
-
-TEST_F(ArtDexFileLoaderTest, GetChecksum) {
- std::vector<uint32_t> checksums;
- ScopedObjectAccess soa(Thread::Current());
- std::string error_msg;
- const ArtDexFileLoader dex_file_loader;
- EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
- &checksums,
- &error_msg))
- << error_msg;
- ASSERT_EQ(1U, checksums.size());
- EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksums[0]);
-}
-
-TEST_F(ArtDexFileLoaderTest, GetMultiDexChecksums) {
- std::string error_msg;
- std::vector<uint32_t> checksums;
- std::string multidex_file = GetTestDexFileName("MultiDex");
- const ArtDexFileLoader dex_file_loader;
- EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(multidex_file.c_str(),
- &checksums,
- &error_msg)) << error_msg;
-
- std::vector<std::unique_ptr<const DexFile>> dexes = OpenTestDexFiles("MultiDex");
- ASSERT_EQ(2U, dexes.size());
- ASSERT_EQ(2U, checksums.size());
-
- EXPECT_EQ(dexes[0]->GetLocation(), DexFileLoader::GetMultiDexLocation(0, multidex_file.c_str()));
- EXPECT_EQ(dexes[0]->GetLocationChecksum(), checksums[0]);
-
- EXPECT_EQ(dexes[1]->GetLocation(), DexFileLoader::GetMultiDexLocation(1, multidex_file.c_str()));
- EXPECT_EQ(dexes[1]->GetLocationChecksum(), checksums[1]);
-}
-
-TEST_F(ArtDexFileLoaderTest, ClassDefs) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<const DexFile> raw(OpenTestDexFile("Nested"));
- ASSERT_TRUE(raw.get() != nullptr);
- EXPECT_EQ(3U, raw->NumClassDefs());
-
- const DexFile::ClassDef& c0 = raw->GetClassDef(0);
- EXPECT_STREQ("LNested$1;", raw->GetClassDescriptor(c0));
-
- const DexFile::ClassDef& c1 = raw->GetClassDef(1);
- EXPECT_STREQ("LNested$Inner;", raw->GetClassDescriptor(c1));
-
- const DexFile::ClassDef& c2 = raw->GetClassDef(2);
- EXPECT_STREQ("LNested;", raw->GetClassDescriptor(c2));
-}
-
-TEST_F(ArtDexFileLoaderTest, GetMethodSignature) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<const DexFile> raw(OpenTestDexFile("GetMethodSignature"));
- ASSERT_TRUE(raw.get() != nullptr);
- EXPECT_EQ(1U, raw->NumClassDefs());
-
- const DexFile::ClassDef& class_def = raw->GetClassDef(0);
- ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
-
- const uint8_t* class_data = raw->GetClassData(class_def);
- ASSERT_TRUE(class_data != nullptr);
- ClassDataItemIterator it(*raw, class_data);
-
- EXPECT_EQ(1u, it.NumDirectMethods());
-
- // Check the signature for the static initializer.
- {
- ASSERT_EQ(1U, it.NumDirectMethods());
- const DexFile::MethodId& method_id = raw->GetMethodId(it.GetMemberIndex());
- const char* name = raw->StringDataByIdx(method_id.name_idx_);
- ASSERT_STREQ("<init>", name);
- std::string signature(raw->GetMethodSignature(method_id).ToString());
- ASSERT_EQ("()V", signature);
- }
-
- // Check all virtual methods.
- struct Result {
- const char* name;
- const char* signature;
- const char* pretty_method;
- };
- static const Result results[] = {
- {
- "m1",
- "(IDJLjava/lang/Object;)Ljava/lang/Float;",
- "java.lang.Float GetMethodSignature.m1(int, double, long, java.lang.Object)"
- },
- {
- "m2",
- "(ZSC)LGetMethodSignature;",
- "GetMethodSignature GetMethodSignature.m2(boolean, short, char)"
- },
- {
- "m3",
- "()V",
- "void GetMethodSignature.m3()"
- },
- {
- "m4",
- "(I)V",
- "void GetMethodSignature.m4(int)"
- },
- {
- "m5",
- "(II)V",
- "void GetMethodSignature.m5(int, int)"
- },
- {
- "m6",
- "(II[[I)V",
- "void GetMethodSignature.m6(int, int, int[][])"
- },
- {
- "m7",
- "(II[[ILjava/lang/Object;)V",
- "void GetMethodSignature.m7(int, int, int[][], java.lang.Object)"
- },
- {
- "m8",
- "(II[[ILjava/lang/Object;[[Ljava/lang/Object;)V",
- "void GetMethodSignature.m8(int, int, int[][], java.lang.Object, java.lang.Object[][])"
- },
- {
- "m9",
- "()I",
- "int GetMethodSignature.m9()"
- },
- {
- "mA",
- "()[[I",
- "int[][] GetMethodSignature.mA()"
- },
- {
- "mB",
- "()[[Ljava/lang/Object;",
- "java.lang.Object[][] GetMethodSignature.mB()"
- },
- };
- ASSERT_EQ(arraysize(results), it.NumVirtualMethods());
- for (const Result& r : results) {
- it.Next();
- const DexFile::MethodId& method_id = raw->GetMethodId(it.GetMemberIndex());
-
- const char* name = raw->StringDataByIdx(method_id.name_idx_);
- ASSERT_STREQ(r.name, name);
-
- std::string signature(raw->GetMethodSignature(method_id).ToString());
- ASSERT_EQ(r.signature, signature);
-
- std::string plain_method = std::string("GetMethodSignature.") + r.name;
- ASSERT_EQ(plain_method, raw->PrettyMethod(it.GetMemberIndex(), /* with_signature */ false));
- ASSERT_EQ(r.pretty_method, raw->PrettyMethod(it.GetMemberIndex(), /* with_signature */ true));
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, FindStringId) {
- ScopedObjectAccess soa(Thread::Current());
- std::unique_ptr<const DexFile> raw(OpenTestDexFile("GetMethodSignature"));
- ASSERT_TRUE(raw.get() != nullptr);
- EXPECT_EQ(1U, raw->NumClassDefs());
-
- const char* strings[] = { "LGetMethodSignature;", "Ljava/lang/Float;", "Ljava/lang/Object;",
- "D", "I", "J", nullptr };
- for (size_t i = 0; strings[i] != nullptr; i++) {
- const char* str = strings[i];
- const DexFile::StringId* str_id = raw->FindStringId(str);
- const char* dex_str = raw->GetStringData(*str_id);
- EXPECT_STREQ(dex_str, str);
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, FindTypeId) {
- for (size_t i = 0; i < java_lang_dex_file_->NumTypeIds(); i++) {
- const char* type_str = java_lang_dex_file_->StringByTypeIdx(dex::TypeIndex(i));
- const DexFile::StringId* type_str_id = java_lang_dex_file_->FindStringId(type_str);
- ASSERT_TRUE(type_str_id != nullptr);
- dex::StringIndex type_str_idx = java_lang_dex_file_->GetIndexForStringId(*type_str_id);
- const DexFile::TypeId* type_id = java_lang_dex_file_->FindTypeId(type_str_idx);
- ASSERT_EQ(type_id, java_lang_dex_file_->FindTypeId(type_str));
- ASSERT_TRUE(type_id != nullptr);
- EXPECT_EQ(java_lang_dex_file_->GetIndexForTypeId(*type_id).index_, i);
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, FindProtoId) {
- for (size_t i = 0; i < java_lang_dex_file_->NumProtoIds(); i++) {
- const DexFile::ProtoId& to_find = java_lang_dex_file_->GetProtoId(i);
- const DexFile::TypeList* to_find_tl = java_lang_dex_file_->GetProtoParameters(to_find);
- std::vector<dex::TypeIndex> to_find_types;
- if (to_find_tl != nullptr) {
- for (size_t j = 0; j < to_find_tl->Size(); j++) {
- to_find_types.push_back(to_find_tl->GetTypeItem(j).type_idx_);
- }
- }
- const DexFile::ProtoId* found =
- java_lang_dex_file_->FindProtoId(to_find.return_type_idx_, to_find_types);
- ASSERT_TRUE(found != nullptr);
- EXPECT_EQ(java_lang_dex_file_->GetIndexForProtoId(*found), i);
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, FindMethodId) {
- for (size_t i = 0; i < java_lang_dex_file_->NumMethodIds(); i++) {
- const DexFile::MethodId& to_find = java_lang_dex_file_->GetMethodId(i);
- const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
- const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
- const DexFile::ProtoId& signature = java_lang_dex_file_->GetProtoId(to_find.proto_idx_);
- const DexFile::MethodId* found = java_lang_dex_file_->FindMethodId(klass, name, signature);
- ASSERT_TRUE(found != nullptr) << "Didn't find method " << i << ": "
- << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
- << java_lang_dex_file_->GetStringData(name)
- << java_lang_dex_file_->GetMethodSignature(to_find);
- EXPECT_EQ(java_lang_dex_file_->GetIndexForMethodId(*found), i);
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, FindFieldId) {
- for (size_t i = 0; i < java_lang_dex_file_->NumFieldIds(); i++) {
- const DexFile::FieldId& to_find = java_lang_dex_file_->GetFieldId(i);
- const DexFile::TypeId& klass = java_lang_dex_file_->GetTypeId(to_find.class_idx_);
- const DexFile::StringId& name = java_lang_dex_file_->GetStringId(to_find.name_idx_);
- const DexFile::TypeId& type = java_lang_dex_file_->GetTypeId(to_find.type_idx_);
- const DexFile::FieldId* found = java_lang_dex_file_->FindFieldId(klass, name, type);
- ASSERT_TRUE(found != nullptr) << "Didn't find field " << i << ": "
- << java_lang_dex_file_->StringByTypeIdx(to_find.type_idx_) << " "
- << java_lang_dex_file_->StringByTypeIdx(to_find.class_idx_) << "."
- << java_lang_dex_file_->GetStringData(name);
- EXPECT_EQ(java_lang_dex_file_->GetIndexForFieldId(*found), i);
- }
-}
-
-TEST_F(ArtDexFileLoaderTest, GetDexCanonicalLocation) {
- ScratchFile file;
- UniqueCPtr<const char[]> dex_location_real(realpath(file.GetFilename().c_str(), nullptr));
- std::string dex_location(dex_location_real.get());
-
- ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location.c_str()));
- std::string multidex_location = DexFileLoader::GetMultiDexLocation(1, dex_location.c_str());
- ASSERT_EQ(multidex_location, DexFileLoader::GetDexCanonicalLocation(multidex_location.c_str()));
-
- std::string dex_location_sym = dex_location + "symlink";
- ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str()));
-
- ASSERT_EQ(dex_location, DexFileLoader::GetDexCanonicalLocation(dex_location_sym.c_str()));
-
- std::string multidex_location_sym = DexFileLoader::GetMultiDexLocation(
- 1, dex_location_sym.c_str());
- ASSERT_EQ(multidex_location,
- DexFileLoader::GetDexCanonicalLocation(multidex_location_sym.c_str()));
-
- ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
-}
-
-TEST_F(ArtDexFileLoaderTest, IsPlatformDexFile) {
- ArtDexFileLoader loader;
- bool success;
- std::string error_msg;
- std::vector<std::unique_ptr<const DexFile>> dex_files;
-
- // Load file from a non-system directory and check that it is not flagged as framework.
- ASSERT_FALSE(LocationIsOnSystemFramework(data_location_path_.c_str()));
- success = loader.Open(data_location_path_.c_str(),
- data_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success) << error_msg;
- ASSERT_GE(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_FALSE(dex_file->IsPlatformDexFile());
- }
-
- dex_files.clear();
-
- // Load file from a system, non-framework directory and check that it is not flagged as framework.
- ASSERT_FALSE(LocationIsOnSystemFramework(system_location_path_.c_str()));
- success = loader.Open(system_location_path_.c_str(),
- system_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success);
- ASSERT_GE(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_FALSE(dex_file->IsPlatformDexFile());
- }
-
- dex_files.clear();
-
- // Load file from a system/framework directory and check that it is flagged as a framework dex.
- ASSERT_TRUE(LocationIsOnSystemFramework(system_framework_location_path_.c_str()));
- success = loader.Open(system_framework_location_path_.c_str(),
- system_framework_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success);
- ASSERT_GE(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_TRUE(dex_file->IsPlatformDexFile());
- }
-
- dex_files.clear();
-
- // Load multidex file from a non-system directory and check that it is not flagged as framework.
- success = loader.Open(data_multi_location_path_.c_str(),
- data_multi_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success) << error_msg;
- ASSERT_GT(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_FALSE(dex_file->IsPlatformDexFile());
- }
-
- dex_files.clear();
-
- // Load multidex file from a system, non-framework directory and check that it is not flagged
- // as framework.
- success = loader.Open(system_multi_location_path_.c_str(),
- system_multi_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success);
- ASSERT_GT(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_FALSE(dex_file->IsPlatformDexFile());
- }
-
- dex_files.clear();
-
- // Load multidex file from a system/framework directory and check that it is flagged as a
- // framework dex.
- success = loader.Open(system_framework_multi_location_path_.c_str(),
- system_framework_multi_location_path_,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg,
- &dex_files);
- ASSERT_TRUE(success);
- ASSERT_GT(dex_files.size(), 1u);
- for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
- ASSERT_TRUE(dex_file->IsPlatformDexFile());
- }
-}
-
-} // namespace art
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index c399b1c7fe..95b42d27d2 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -29,6 +29,7 @@
#include "mirror/field.h"
#include "mirror/method.h"
#include "oat_file.h"
+#include "obj_ptr-inl.h"
#include "reflection.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -116,9 +117,9 @@ class ClassData {
DISALLOW_COPY_AND_ASSIGN(ClassData);
};
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
- Handle<mirror::Class> annotation_class,
- const uint8_t** annotation)
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+ Handle<mirror::Class> annotation_class,
+ const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsVisibilityCompatible(uint32_t actual, uint32_t expected) {
@@ -333,7 +334,7 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& kla
return dex_file.GetClassAnnotationSet(annotations_dir);
}
-mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
+ObjPtr<mirror::Object> ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t type_index = DecodeUnsignedLeb128(annotation);
uint32_t size = DecodeUnsignedLeb128(annotation);
@@ -355,13 +356,13 @@ mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t**
}
ObjPtr<mirror::Class> annotation_member_class =
- soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember).Ptr();
- mirror::Class* annotation_member_array_class =
+ soa.Decode<mirror::Class>(WellKnownClasses::libcore_reflect_AnnotationMember);
+ ObjPtr<mirror::Class> annotation_member_array_class =
class_linker->FindArrayClass(self, &annotation_member_class);
if (annotation_member_array_class == nullptr) {
return nullptr;
}
- mirror::ObjectArray<mirror::Object>* element_array = nullptr;
+ ObjPtr<mirror::ObjectArray<mirror::Object>> element_array = nullptr;
if (size > 0) {
element_array =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_member_array_class, size);
@@ -373,7 +374,7 @@ mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t**
Handle<mirror::ObjectArray<mirror::Object>> h_element_array(hs.NewHandle(element_array));
for (uint32_t i = 0; i < size; ++i) {
- mirror::Object* new_member = CreateAnnotationMember(klass, annotation_class, annotation);
+ ObjPtr<mirror::Object> new_member = CreateAnnotationMember(klass, annotation_class, annotation);
if (new_member == nullptr) {
return nullptr;
}
@@ -605,7 +606,7 @@ bool ProcessAnnotationValue(const ClassData& klass,
return false;
}
if (!component_type->IsPrimitive()) {
- mirror::Object* obj = new_annotation_value.value_.GetL();
+ ObjPtr<mirror::Object> obj = new_annotation_value.value_.GetL();
new_array->AsObjectArray<mirror::Object>()->
SetWithoutChecks<kTransactionActive>(i, obj);
} else {
@@ -682,20 +683,20 @@ bool ProcessAnnotationValue(const ClassData& klass,
*annotation_ptr = annotation;
if (result_style == DexFile::kAllObjects && primitive_type != Primitive::kPrimVoid) {
- element_object = BoxPrimitive(primitive_type, annotation_value->value_).Ptr();
+ element_object = BoxPrimitive(primitive_type, annotation_value->value_);
set_object = true;
}
if (set_object) {
- annotation_value->value_.SetL(element_object.Ptr());
+ annotation_value->value_.SetL(element_object);
}
return true;
}
-mirror::Object* CreateAnnotationMember(const ClassData& klass,
- Handle<mirror::Class> annotation_class,
- const uint8_t** annotation) {
+ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
+ Handle<mirror::Class> annotation_class,
+ const uint8_t** annotation) {
const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
@@ -799,7 +800,7 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
return nullptr;
}
-mirror::Object* GetAnnotationObjectFromAnnotationSet(
+ObjPtr<mirror::Object> GetAnnotationObjectFromAnnotationSet(
const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility,
@@ -814,11 +815,11 @@ mirror::Object* GetAnnotationObjectFromAnnotationSet(
return ProcessEncodedAnnotation(klass, &annotation);
}
-mirror::Object* GetAnnotationValue(const ClassData& klass,
- const DexFile::AnnotationItem* annotation_item,
- const char* annotation_name,
- Handle<mirror::Class> array_class,
- uint32_t expected_type)
+ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
+ const DexFile::AnnotationItem* annotation_item,
+ const char* annotation_name,
+ Handle<mirror::Class> array_class,
+ uint32_t expected_type)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
const uint8_t* annotation =
@@ -864,7 +865,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass,
if (string_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(klass, annotation_item, "value", string_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -873,8 +874,9 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass,
return obj->AsObjectArray<mirror::String>();
}
-mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
- const DexFile::AnnotationSetItem* annotation_set)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetThrowsValue(
+ const ClassData& klass,
+ const DexFile::AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile& dex_file = klass.GetDexFile();
StackHandleScope<1> hs(Thread::Current());
@@ -890,7 +892,7 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(klass, annotation_item, "value", class_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -899,7 +901,7 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
return obj->AsObjectArray<mirror::Class>();
}
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSet(
const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility)
@@ -930,7 +932,7 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
continue;
}
const uint8_t* annotation = annotation_item->annotation_;
- mirror::Object* annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
+ ObjPtr<mirror::Object> annotation_obj = ProcessEncodedAnnotation(klass, &annotation);
if (annotation_obj != nullptr) {
result->SetWithoutChecks<false>(dest_index, annotation_obj);
++dest_index;
@@ -943,21 +945,21 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
return result.Get();
}
- mirror::ObjectArray<mirror::Object>* trimmed_result =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> trimmed_result =
mirror::ObjectArray<mirror::Object>::Alloc(self, annotation_array_class.Get(), dest_index);
if (trimmed_result == nullptr) {
return nullptr;
}
for (uint32_t i = 0; i < dest_index; ++i) {
- mirror::Object* obj = result->GetWithoutChecks(i);
+ ObjPtr<mirror::Object> obj = result->GetWithoutChecks(i);
trimmed_result->SetWithoutChecks<false>(i, obj);
}
return trimmed_result;
}
-mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
+ObjPtr<mirror::ObjectArray<mirror::Object>> ProcessAnnotationSetRefList(
const ClassData& klass,
const DexFile::AnnotationSetRefList* set_ref_list,
uint32_t size)
@@ -968,7 +970,7 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
StackHandleScope<1> hs(self);
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::Class* annotation_array_array_class =
+ ObjPtr<mirror::Class> annotation_array_array_class =
Runtime::Current()->GetClassLinker()->FindArrayClass(self, &annotation_array_class);
if (annotation_array_array_class == nullptr) {
return nullptr;
@@ -982,8 +984,9 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
for (uint32_t index = 0; index < size; ++index) {
const DexFile::AnnotationSetRefItem* set_ref_item = &set_ref_list->list_[index];
const DexFile::AnnotationSetItem* set_item = dex_file.GetSetRefItemItem(set_ref_item);
- mirror::Object* annotation_set = ProcessAnnotationSet(klass, set_item,
- DexFile::kDexVisibilityRuntime);
+ ObjPtr<mirror::Object> annotation_set = ProcessAnnotationSet(klass,
+ set_item,
+ DexFile::kDexVisibilityRuntime);
if (annotation_set == nullptr) {
return nullptr;
}
@@ -995,7 +998,8 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
namespace annotations {
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+ Handle<mirror::Class> annotation_class) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
if (annotation_set == nullptr) {
return nullptr;
@@ -1008,7 +1012,7 @@ mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> ann
annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
StackHandleScope<1> hs(Thread::Current());
const ClassData field_class(hs, field);
@@ -1037,7 +1041,7 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_
return annotation_item != nullptr;
}
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method) {
const ClassData klass(method);
const DexFile* dex_file = &klass.GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
@@ -1081,7 +1085,8 @@ mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
return annotation_value.value_.GetL();
}
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+ Handle<mirror::Class> annotation_class) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
@@ -1090,14 +1095,14 @@ mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class>
DexFile::kDexVisibilityRuntime, annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
return ProcessAnnotationSet(ClassData(method),
annotation_set,
DexFile::kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
if (annotation_set == nullptr) {
return nullptr;
@@ -1105,7 +1110,7 @@ mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method
return GetThrowsValue(ClassData(method), annotation_set);
}
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method) {
const DexFile* dex_file = method->GetDexFile();
const DexFile::ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
@@ -1136,9 +1141,9 @@ uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method) {
return set_ref_list->size_;
}
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
- uint32_t parameter_idx,
- Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+ uint32_t parameter_idx,
+ Handle<mirror::Class> annotation_class) {
const DexFile* dex_file = method->GetDexFile();
const DexFile::ParameterAnnotationsItem* parameter_annotations =
FindAnnotationsItemForMethod(method);
@@ -1307,8 +1312,8 @@ uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
return access_flags;
}
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
- Handle<mirror::Class> annotation_class) {
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
+ Handle<mirror::Class> annotation_class) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1320,13 +1325,13 @@ mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
annotation_class);
}
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
}
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) {
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1345,7 +1350,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
if (class_array_class == nullptr) {
return nullptr;
}
- mirror::Object* obj =
+ ObjPtr<mirror::Object> obj =
GetAnnotationValue(data, annotation_item, "value", class_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
@@ -1354,7 +1359,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
return obj->AsObjectArray<mirror::Class>();
}
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1366,17 +1371,19 @@ mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value",
- ScopedNullHandle<mirror::Class>(),
- DexFile::kDexAnnotationType);
+ ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+ annotation_item,
+ "value",
+ ScopedNullHandle<mirror::Class>(),
+ DexFile::kDexAnnotationType);
if (obj == nullptr) {
return nullptr;
}
return obj->AsClass();
}
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
- mirror::Class* declaring_class = GetDeclaringClass(klass);
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass) {
+ ObjPtr<mirror::Class> declaring_class = GetDeclaringClass(klass);
if (declaring_class != nullptr) {
return declaring_class;
}
@@ -1420,7 +1427,7 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
return method->GetDeclaringClass();
}
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
@@ -1438,7 +1445,7 @@ mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
DexFile::kDexAnnotationMethod);
}
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name) {
ClassData data(klass);
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index 4bb0d75a57..9645a7febd 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -22,6 +22,7 @@
#include "handle.h"
#include "mirror/dex_cache.h"
#include "mirror/object_array.h"
+#include "obj_ptr.h"
namespace art {
@@ -35,9 +36,10 @@ class ClassLinker;
namespace annotations {
// Field annotations.
-mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForField(ArtField* field,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -45,21 +47,22 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_
REQUIRES_SHARED(Locks::mutator_lock_);
// Method annotations.
-mirror::Object* GetAnnotationDefaultValue(ArtMethod* method)
+ObjPtr<mirror::Object> GetAnnotationDefaultValue(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethod(ArtMethod* method,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetExceptionTypesForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetParameterAnnotations(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetNumberOfAnnotatedMethodParameters(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
- uint32_t parameter_idx,
- Handle<mirror::Class> annotation_class)
+ObjPtr<mirror::Object> GetAnnotationForMethodParameter(ArtMethod* method,
+ uint32_t parameter_idx,
+ Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
bool GetParametersMetadataForMethod(ArtMethod* method,
MutableHandle<mirror::ObjectArray<mirror::String>>* names,
@@ -85,20 +88,20 @@ uint32_t GetNativeMethodAnnotationAccessFlags(const DexFile& dex_file,
uint32_t method_index);
// Class annotations.
-mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
+ObjPtr<mirror::Object> GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass)
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass)
+ObjPtr<mirror::Class> GetEnclosingClass(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass)
+ObjPtr<mirror::Object> GetEnclosingMethod(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
-bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name)
+bool GetInnerClass(Handle<mirror::Class> klass, ObjPtr<mirror::String>* name)
REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 719b4af293..026b5da748 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1073,6 +1073,29 @@ bool ElfFileImpl<ElfTypes>::GetLoadedSize(size_t* size, std::string* error_msg)
return true;
}
+static InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
+ switch (e_machine) {
+ case EM_ARM:
+ return InstructionSet::kArm;
+ case EM_AARCH64:
+ return InstructionSet::kArm64;
+ case EM_386:
+ return InstructionSet::kX86;
+ case EM_X86_64:
+ return InstructionSet::kX86_64;
+ case EM_MIPS: {
+ if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
+ (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
+ return InstructionSet::kMips;
+ } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
+ return InstructionSet::kMips64;
+ }
+ break;
+ }
+ }
+ return InstructionSet::kNone;
+}
+
template <typename ElfTypes>
bool ElfFileImpl<ElfTypes>::Load(File* file,
bool executable,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index d4e7492f00..f6b1c73230 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -47,7 +47,6 @@ namespace art {
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
const MethodInfo& method_info,
const InlineInfo& inline_info,
- const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!outer_method->IsObsolete());
@@ -57,12 +56,12 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
// suspended while executing it.
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
- if (inline_info.EncodesArtMethodAtDepth(encoding, inlining_depth)) {
- return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
+ if (inline_info.EncodesArtMethodAtDepth(inlining_depth)) {
+ return inline_info.GetArtMethodAtDepth(inlining_depth);
}
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
- if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, inlining_depth);
+ if (inline_info.GetDexPcAtDepth(inlining_depth) == static_cast<uint32_t>(-1)) {
// "charAt" special case. It is the only non-leaf method we inline across dex files.
ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
DCHECK_EQ(inlined_method->GetDexMethodIndex(), method_index);
@@ -73,9 +72,9 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ArtMethod* method = outer_method;
for (uint32_t depth = 0, end = inlining_depth + 1u; depth != end; ++depth) {
- DCHECK(!inline_info.EncodesArtMethodAtDepth(encoding, depth));
- DCHECK_NE(inline_info.GetDexPcAtDepth(encoding, depth), static_cast<uint32_t>(-1));
- method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, depth);
+ DCHECK(!inline_info.EncodesArtMethodAtDepth(depth));
+ DCHECK_NE(inline_info.GetDexPcAtDepth(depth), static_cast<uint32_t>(-1));
+ method_index = inline_info.GetMethodIndexAtDepth(method_info, depth);
ArtMethod* inlined_method = class_linker->LookupResolvedMethod(method_index,
method->GetDexCache(),
method->GetClassLoader());
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 7536910cee..7f9b385a0a 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -201,18 +201,16 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
DCHECK(current_code != nullptr);
DCHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
caller = GetResolvedMethod(outer_method,
method_info,
inline_info,
- encoding.inline_info.encoding,
- inline_info.GetDepth(encoding.inline_info.encoding) - 1);
+ inline_info.GetDepth() - 1);
}
}
if (kIsDebugBuild && do_caller_check) {
@@ -268,7 +266,7 @@ ObjPtr<mirror::MethodHandle> ResolveMethodHandleFromCode(ArtMethod* referrer,
}
ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer,
- uint32_t proto_idx) {
+ dex::ProtoIndex proto_idx) {
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::MethodType> method_type =
referrer->GetDexCache()->GetResolvedMethodType(proto_idx);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 0a3b5dfc93..e33de9c45a 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -158,7 +158,7 @@ ObjPtr<mirror::MethodHandle> ResolveMethodHandleFromCode(ArtMethod* referrer,
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
-ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer, uint32_t proto_idx)
+ObjPtr<mirror::MethodType> ResolveMethodTypeFromCode(ArtMethod* referrer, dex::ProtoIndex proto_idx)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index cf9ddd8aa8..fa536c77a9 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -200,7 +200,7 @@ extern "C" mirror::MethodType* artResolveMethodTypeFromCode(uint32_t proto_idx,
auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self,
CalleeSaveType::kSaveEverything);
ArtMethod* caller = caller_and_outer.caller;
- ObjPtr<mirror::MethodType> result = ResolveMethodTypeFromCode(caller, proto_idx);
+ ObjPtr<mirror::MethodType> result = ResolveMethodTypeFromCode(caller, dex::ProtoIndex(proto_idx));
return result.Ptr();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index d0aec0309d..ff85f477ec 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -43,6 +43,7 @@
#include "mirror/method_handle_impl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/var_handle.h"
#include "oat_file.h"
#include "oat_quick_method_header.h"
#include "quick_exception_handler.h"
@@ -50,6 +51,7 @@
#include "scoped_thread_state_change-inl.h"
#include "stack.h"
#include "thread-inl.h"
+#include "var_handles.h"
#include "well_known_classes.h"
namespace art {
@@ -337,16 +339,14 @@ class QuickArgumentVisitor {
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
if (current_code->IsOptimized()) {
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
+ CodeInfo code_info(current_code);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- return inline_info.GetDexPcAtDepth(encoding.inline_info.encoding,
- inline_info.GetDepth(encoding.inline_info.encoding)-1);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ return inline_info.GetDexPcAtDepth(inline_info.GetDepth()-1);
} else {
- return stack_map.GetDexPc(encoding.stack_map.encoding);
+ return stack_map.GetDexPc();
}
} else {
return current_code->ToDexPc(*caller_sp, outer_pc);
@@ -366,13 +366,12 @@ class QuickArgumentVisitor {
return false;
}
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
+ InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset));
if (invoke.IsValid()) {
- *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
- *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
+ *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType());
+ *dex_method_index = invoke.GetMethodIndex(method_info);
return true;
}
return false;
@@ -1212,12 +1211,11 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
CHECK(current_code != nullptr);
CHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ CodeInfo code_info(current_code);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CHECK(stack_map.IsValid());
- uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map.encoding);
+ uint32_t dex_pc = stack_map.GetDexPc();
// Log the outer method and its associated dex file and class table pointer which can be used
// to find out if the inlined methods were defined by other dex file(s) or class loader(s).
@@ -1231,20 +1229,17 @@ static void DumpB74410240DebugData(ArtMethod** sp) REQUIRES_SHARED(Locks::mutato
LOG(FATAL_WITHOUT_ABORT) << " instruction: " << DumpInstruction(outer_method, dex_pc);
ArtMethod* caller = outer_method;
- if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
- const InlineInfoEncoding& inline_info_encoding = encoding.inline_info.encoding;
- size_t depth = inline_info.GetDepth(inline_info_encoding);
+ if (stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ size_t depth = inline_info.GetDepth();
for (size_t d = 0; d < depth; ++d) {
const char* tag = "";
- dex_pc = inline_info.GetDexPcAtDepth(inline_info_encoding, d);
- if (inline_info.EncodesArtMethodAtDepth(inline_info_encoding, d)) {
+ dex_pc = inline_info.GetDexPcAtDepth(d);
+ if (inline_info.EncodesArtMethodAtDepth(d)) {
tag = "encoded ";
- caller = inline_info.GetArtMethodAtDepth(inline_info_encoding, d);
+ caller = inline_info.GetArtMethodAtDepth(d);
} else {
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(inline_info_encoding,
- method_info,
- d);
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(method_info, d);
if (dex_pc == static_cast<uint32_t>(-1)) {
tag = "special ";
CHECK_EQ(d + 1u, depth);
@@ -2750,7 +2745,7 @@ extern "C" uintptr_t artInvokePolymorphic(
const Instruction& inst = caller_method->DexInstructions().InstructionAt(dex_pc);
DCHECK(inst.Opcode() == Instruction::INVOKE_POLYMORPHIC ||
inst.Opcode() == Instruction::INVOKE_POLYMORPHIC_RANGE);
- const uint32_t proto_idx = inst.VRegH();
+ const dex::ProtoIndex proto_idx(inst.VRegH());
const char* shorty = caller_method->GetDexFile()->GetShorty(proto_idx);
const size_t shorty_length = strlen(shorty);
static const bool kMethodIsStatic = false; // invoke() and invokeExact() are not static.
@@ -2773,13 +2768,6 @@ extern "C" uintptr_t artInvokePolymorphic(
return static_cast<uintptr_t>('V');
}
- // TODO(oth): Ensure this path isn't taken for VarHandle accessors (b/65872996).
- DCHECK_EQ(resolved_method->GetDeclaringClass(),
- WellKnownClasses::ToClass(WellKnownClasses::java_lang_invoke_MethodHandle));
-
- Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
- ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
-
Handle<mirror::MethodType> method_type(
hs.NewHandle(linker->ResolveMethodType(self, proto_idx, caller_method)));
@@ -2819,24 +2807,43 @@ extern "C" uintptr_t artInvokePolymorphic(
// Call DoInvokePolymorphic with |is_range| = true, as shadow frame has argument registers in
// consecutive order.
RangeInstructionOperands operands(first_arg + 1, num_vregs - 1);
- bool isExact = (jni::EncodeArtMethod(resolved_method) ==
- WellKnownClasses::java_lang_invoke_MethodHandle_invokeExact);
+ Intrinsics intrinsic = static_cast<Intrinsics>(resolved_method->GetIntrinsic());
bool success = false;
- if (isExact) {
- success = MethodHandleInvokeExact(self,
+ if (resolved_method->GetDeclaringClass() == mirror::MethodHandle::StaticClass()) {
+ Handle<mirror::MethodHandle> method_handle(hs.NewHandle(
+ ObjPtr<mirror::MethodHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+ if (intrinsic == Intrinsics::kMethodHandleInvokeExact) {
+ success = MethodHandleInvokeExact(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ &operands,
+ result);
+ } else {
+ DCHECK_EQ(static_cast<uint32_t>(intrinsic),
+ static_cast<uint32_t>(Intrinsics::kMethodHandleInvoke));
+ success = MethodHandleInvoke(self,
+ *shadow_frame,
+ method_handle,
+ method_type,
+ &operands,
+ result);
+ }
+ } else {
+ DCHECK_EQ(mirror::VarHandle::StaticClass(), resolved_method->GetDeclaringClass());
+ Handle<mirror::VarHandle> var_handle(hs.NewHandle(
+ ObjPtr<mirror::VarHandle>::DownCast(MakeObjPtr(receiver_handle.Get()))));
+ mirror::VarHandle::AccessMode access_mode =
+ mirror::VarHandle::GetAccessModeByIntrinsic(intrinsic);
+ success = VarHandleInvokeAccessor(self,
*shadow_frame,
- method_handle,
+ var_handle,
method_type,
+ access_mode,
&operands,
result);
- } else {
- success = MethodHandleInvoke(self,
- *shadow_frame,
- method_handle,
- method_type,
- &operands,
- result);
}
+
DCHECK(success || self->IsExceptionPending());
// Pop transition record.
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 46630dbeef..464c2b749f 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -90,16 +90,24 @@ DEFINE_CHECK_EQ(static_cast<size_t>(CARD_TABLE_CARD_SHIFT), (static_cast<size_t>
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kStateShift)))
-#define LOCK_WORD_STATE_MASK 0xc0000000
-DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
+#define LOCK_WORD_STATE_MASK_SHIFTED 0xc0000000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kStateMaskShifted)))
#define LOCK_WORD_READ_BARRIER_STATE_SHIFT 28
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_READ_BARRIER_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kReadBarrierStateShift)))
#define LOCK_WORD_READ_BARRIER_STATE_MASK 0x10000000
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShifted)))
#define LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED 0xefffffff
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_READ_BARRIER_STATE_MASK_TOGGLED), (static_cast<uint32_t>(art::LockWord::kReadBarrierStateMaskShiftedToggled)))
-#define LOCK_WORD_THIN_LOCK_COUNT_ONE 65536
-DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<int32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SIZE 12
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SIZE), (static_cast<int32_t>(art::LockWord::kThinLockCountSize)))
+#define LOCK_WORD_THIN_LOCK_COUNT_SHIFT 16
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_THIN_LOCK_COUNT_SHIFT), (static_cast<int32_t>(art::LockWord::kThinLockCountShift)))
+#define LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED 0xfff0000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockCountMaskShifted)))
+#define LOCK_WORD_THIN_LOCK_COUNT_ONE 0x10000
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_COUNT_ONE), (static_cast<uint32_t>(art::LockWord::kThinLockCountOne)))
+#define LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED 0xffff
+DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kThinLockOwnerMaskShifted)))
#define LOCK_WORD_STATE_FORWARDING_ADDRESS 0x3
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS), (static_cast<uint32_t>(art::LockWord::kStateForwardingAddress)))
#define LOCK_WORD_STATE_FORWARDING_ADDRESS_OVERFLOW 0x40000000
@@ -110,6 +118,8 @@ DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT),
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShifted)))
#define LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED 0xcfffffff
DEFINE_CHECK_EQ(static_cast<uint32_t>(LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED), (static_cast<uint32_t>(art::LockWord::kGCStateMaskShiftedToggled)))
+#define LOCK_WORD_GC_STATE_SIZE 2
+DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SIZE), (static_cast<int32_t>(art::LockWord::kGCStateSize)))
#define LOCK_WORD_GC_STATE_SHIFT 28
DEFINE_CHECK_EQ(static_cast<int32_t>(LOCK_WORD_GC_STATE_SHIFT), (static_cast<int32_t>(art::LockWord::kGCStateShift)))
#define LOCK_WORD_MARK_BIT_SHIFT 29
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 9445ae0c8e..e41d1d3eb9 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -14,8 +14,6 @@
* limitations under the License.
*/
-#include <metricslogger/metrics_logger.h>
-
#include "hidden_api.h"
#include <nativehelper/scoped_local_ref.h>
@@ -24,11 +22,14 @@
#include "thread-current-inl.h"
#include "well_known_classes.h"
+#ifdef ART_TARGET_ANDROID
+#include <metricslogger/metrics_logger.h>
using android::metricslogger::ComplexEventLogger;
using android::metricslogger::ACTION_HIDDEN_API_ACCESSED;
using android::metricslogger::FIELD_HIDDEN_API_ACCESS_METHOD;
using android::metricslogger::FIELD_HIDDEN_API_ACCESS_DENIED;
using android::metricslogger::FIELD_HIDDEN_API_SIGNATURE;
+#endif
namespace art {
namespace hiddenapi {
@@ -39,7 +40,7 @@ namespace hiddenapi {
// Note that when flipping this flag, you must also update the expectations of test 674-hiddenapi
// as it affects whether or not we warn for light grey APIs that have been added to the exemptions
// list.
-static constexpr bool kLogAllAccesses = true;
+static constexpr bool kLogAllAccesses = false;
static inline std::ostream& operator<<(std::ostream& os, AccessMethod value) {
switch (value) {
@@ -137,6 +138,7 @@ void MemberSignature::WarnAboutAccess(AccessMethod access_method,
LOG(WARNING) << "Accessing hidden " << (type_ == kField ? "field " : "method ")
<< Dumpable<MemberSignature>(*this) << " (" << list << ", " << access_method << ")";
}
+#ifdef ART_TARGET_ANDROID
// Convert an AccessMethod enum to a value for logging from the proto enum.
// This method may look odd (the enum values are current the same), but it
// prevents coupling the internal enum to the proto enum (which should never
@@ -156,12 +158,15 @@ inline static int32_t GetEnumValueForLog(AccessMethod access_method) {
DCHECK(false);
}
}
+#endif
void MemberSignature::LogAccessToEventLog(AccessMethod access_method, Action action_taken) {
- if (access_method == kLinking) {
+#ifdef ART_TARGET_ANDROID
+ if (access_method == kLinking || access_method == kNone) {
// Linking warnings come from static analysis/compilation of the bytecode
// and can contain false positives (i.e. code that is never run). We choose
// not to log these in the event log.
+ // None does not correspond to actual access, so should also be ignored.
return;
}
ComplexEventLogger log_maker(ACTION_HIDDEN_API_ACCESSED);
@@ -173,6 +178,10 @@ void MemberSignature::LogAccessToEventLog(AccessMethod access_method, Action act
Dump(signature_str);
log_maker.AddTaggedData(FIELD_HIDDEN_API_SIGNATURE, signature_str.str());
log_maker.Record();
+#else
+ UNUSED(access_method);
+ UNUSED(action_taken);
+#endif
}
static ALWAYS_INLINE bool CanUpdateMemberAccessFlags(ArtField*) {
@@ -210,7 +219,8 @@ Action GetMemberActionImpl(T* member,
// - for non-debuggable apps, there is no distinction between light grey & whitelisted APIs.
// - we want to avoid the overhead of checking for exemptions for light greylisted APIs whenever
// possible.
- if (kLogAllAccesses || action == kDeny || runtime->IsJavaDebuggable()) {
+ const bool shouldWarn = kLogAllAccesses || runtime->IsJavaDebuggable();
+ if (shouldWarn || action == kDeny) {
if (member_signature.IsExempted(runtime->GetHiddenApiExemptions())) {
action = kAllow;
// Avoid re-examining the exemption list next time.
@@ -227,7 +237,7 @@ Action GetMemberActionImpl(T* member,
}
}
- if (kIsTargetBuild) {
+ if (kIsTargetBuild && !kIsTargetLinux) {
uint32_t eventLogSampleRate = runtime->GetHiddenApiEventLogSampleRate();
// Assert that RAND_MAX is big enough, to ensure sampling below works as expected.
static_assert(RAND_MAX >= 0xffff, "RAND_MAX too small");
@@ -251,7 +261,8 @@ Action GetMemberActionImpl(T* member,
MaybeWhitelistMember(runtime, member);
// If this action requires a UI warning, set the appropriate flag.
- if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
+ if (shouldWarn &&
+ (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag())) {
runtime->SetPendingHiddenApiWarning(true);
}
}
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index 8e21fd3b8f..580224e439 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -95,6 +95,22 @@ inline Action GetActionFromAccessFlags(HiddenApiAccessFlags::ApiList api_list) {
}
}
+class ScopedHiddenApiEnforcementPolicySetting {
+ public:
+ explicit ScopedHiddenApiEnforcementPolicySetting(EnforcementPolicy new_policy)
+ : initial_policy_(Runtime::Current()->GetHiddenApiEnforcementPolicy()) {
+ Runtime::Current()->SetHiddenApiEnforcementPolicy(new_policy);
+ }
+
+ ~ScopedHiddenApiEnforcementPolicySetting() {
+ Runtime::Current()->SetHiddenApiEnforcementPolicy(initial_policy_);
+ }
+
+ private:
+ const EnforcementPolicy initial_policy_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiEnforcementPolicySetting);
+};
+
// Implementation details. DO NOT ACCESS DIRECTLY.
namespace detail {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 8a85ee41f8..5a50ec5586 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -24,7 +24,7 @@
#include "entrypoints/runtime_asm_entrypoints.h"
#include "intrinsics_enum.h"
#include "jit/jit.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "method_handles-inl.h"
#include "method_handles.h"
#include "mirror/array-inl.h"
@@ -37,6 +37,7 @@
#include "stack.h"
#include "thread-inl.h"
#include "transaction.h"
+#include "var_handles.h"
#include "well_known_classes.h"
namespace art {
@@ -626,7 +627,8 @@ static bool DoMethodHandleInvokeCommon(Thread* self,
// The vRegH value gives the index of the proto_id associated with this
// signature polymorphic call site.
- const uint32_t callsite_proto_id = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+ const uint16_t vRegH = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+ const dex::ProtoIndex callsite_proto_id(vRegH);
// Call through to the classlinker and ask it to resolve the static type associated
// with the callsite. This information is stored in the dex cache so it's
@@ -724,38 +726,6 @@ bool DoMethodHandleInvoke(Thread* self,
}
}
-static bool DoVarHandleInvokeChecked(Thread* self,
- Handle<mirror::VarHandle> var_handle,
- Handle<mirror::MethodType> callsite_type,
- mirror::VarHandle::AccessMode access_mode,
- ShadowFrame& shadow_frame,
- InstructionOperands* operands,
- JValue* result)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // TODO(oth): GetMethodTypeForAccessMode() allocates a MethodType()
- // which is only required if we need to convert argument and/or
- // return types.
- StackHandleScope<1> hs(self);
- Handle<mirror::MethodType> accessor_type(hs.NewHandle(
- var_handle->GetMethodTypeForAccessMode(self, access_mode)));
- const size_t num_vregs = accessor_type->NumberOfVRegs();
- const int num_params = accessor_type->GetPTypes()->GetLength();
- ShadowFrameAllocaUniquePtr accessor_frame =
- CREATE_SHADOW_FRAME(num_vregs, nullptr, shadow_frame.GetMethod(), shadow_frame.GetDexPC());
- ShadowFrameGetter getter(shadow_frame, operands);
- static const uint32_t kFirstDestinationReg = 0;
- ShadowFrameSetter setter(accessor_frame.get(), kFirstDestinationReg);
- if (!PerformConversions(self, callsite_type, accessor_type, &getter, &setter, num_params)) {
- return false;
- }
- RangeInstructionOperands accessor_operands(kFirstDestinationReg,
- kFirstDestinationReg + num_vregs);
- if (!var_handle->Access(access_mode, accessor_frame.get(), &accessor_operands, result)) {
- return false;
- }
- return ConvertReturnValue(callsite_type, accessor_type, result);
-}
-
static bool DoVarHandleInvokeCommon(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -768,59 +738,43 @@ static bool DoVarHandleInvokeCommon(Thread* self,
return false;
}
- bool is_var_args = inst->HasVarArgs();
- const uint32_t vRegC = is_var_args ? inst->VRegC_45cc() : inst->VRegC_4rcc();
- ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(vRegC));
- if (receiver.IsNull()) {
- ThrowNullPointerExceptionFromDexPC();
- return false;
- }
-
StackHandleScope<2> hs(self);
- Handle<mirror::VarHandle> var_handle(hs.NewHandle(down_cast<mirror::VarHandle*>(receiver.Ptr())));
- if (!var_handle->IsAccessModeSupported(access_mode)) {
- ThrowUnsupportedOperationException();
- return false;
- }
-
- const uint32_t vRegH = is_var_args ? inst->VRegH_45cc() : inst->VRegH_4rcc();
+ bool is_var_args = inst->HasVarArgs();
+ const uint16_t vRegH = is_var_args ? inst->VRegH_45cc() : inst->VRegH_4rcc();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
Handle<mirror::MethodType> callsite_type(hs.NewHandle(
- class_linker->ResolveMethodType(self, vRegH, shadow_frame.GetMethod())));
+ class_linker->ResolveMethodType(self, dex::ProtoIndex(vRegH), shadow_frame.GetMethod())));
// This implies we couldn't resolve one or more types in this VarHandle.
if (UNLIKELY(callsite_type == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
}
- if (!var_handle->IsMethodTypeCompatible(access_mode, callsite_type.Get())) {
- ThrowWrongMethodTypeException(var_handle->GetMethodTypeForAccessMode(self, access_mode),
- callsite_type.Get());
- return false;
- }
-
+ const uint32_t vRegC = is_var_args ? inst->VRegC_45cc() : inst->VRegC_4rcc();
+ ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(vRegC));
+ Handle<mirror::VarHandle> var_handle(hs.NewHandle(down_cast<mirror::VarHandle*>(receiver.Ptr())));
if (is_var_args) {
uint32_t args[Instruction::kMaxVarArgRegs];
inst->GetVarArgs(args, inst_data);
VarArgsInstructionOperands all_operands(args, inst->VRegA_45cc());
NoReceiverInstructionOperands operands(&all_operands);
- return DoVarHandleInvokeChecked(self,
- var_handle,
- callsite_type,
- access_mode,
- shadow_frame,
- &operands,
- result);
+ return VarHandleInvokeAccessor(self,
+ shadow_frame,
+ var_handle,
+ callsite_type,
+ access_mode,
+ &operands,
+ result);
} else {
RangeInstructionOperands all_operands(inst->VRegC_4rcc(), inst->VRegA_4rcc());
NoReceiverInstructionOperands operands(&all_operands);
- return DoVarHandleInvokeChecked(self,
- var_handle,
- callsite_type,
- access_mode,
- shadow_frame,
- &operands,
- result);
+ return VarHandleInvokeAccessor(self,
+ shadow_frame,
+ var_handle,
+ callsite_type,
+ access_mode,
+ &operands,
+ result);
}
}
@@ -965,9 +919,10 @@ static bool GetArgumentForBootstrapMethod(Thread* self,
StackHandleScope<2> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- uint32_t index = static_cast<uint32_t>(encoded_value->GetI());
+ dex::ProtoIndex proto_idx(encoded_value->GetC());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- ObjPtr<mirror::MethodType> o = cl->ResolveMethodType(self, index, dex_cache, class_loader);
+ ObjPtr<mirror::MethodType> o =
+ cl->ResolveMethodType(self, proto_idx, dex_cache, class_loader);
if (UNLIKELY(o.IsNull())) {
DCHECK(self->IsExceptionPending());
return false;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0818e06675..67a0349d7a 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -217,7 +217,7 @@ static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
}
static inline ObjPtr<mirror::MethodType> ResolveMethodType(Thread* self,
- uint32_t method_type_index,
+ dex::ProtoIndex method_type_index,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 283885e522..5c7838cd66 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -565,7 +565,7 @@ void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
PREAMBLE();
ClassLinker* cl = Runtime::Current()->GetClassLinker();
ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
- inst->VRegB_21c(),
+ dex::ProtoIndex(inst->VRegB_21c()),
shadow_frame.GetMethod());
if (UNLIKELY(mt == nullptr)) {
HANDLE_PENDING_EXCEPTION();
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 2a9ef2ce98..1b39a7422d 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -408,7 +408,8 @@ extern "C" size_t MterpConstMethodType(uint32_t index,
ShadowFrame* shadow_frame,
Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::MethodType> mt = ResolveMethodType(self, index, shadow_frame->GetMethod());
+ ObjPtr<mirror::MethodType> mt =
+ ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
if (UNLIKELY(mt == nullptr)) {
return true;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 791ebf09b7..0e429a63f6 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -510,7 +510,7 @@ void UnstartedRuntime::UnstartedClassIsAnonymousClass(
result->SetZ(false);
return;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
result->SetZ(false);
return;
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0684b461ae..5d4b9e8cc9 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -46,8 +46,6 @@ namespace art {
namespace jit {
static constexpr bool kEnableOnStackReplacement = true;
-// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
-static constexpr int kJitPoolThreadPthreadPriority = 9;
// Different compilation threshold constants. These can be overridden on the command line.
static constexpr size_t kJitDefaultCompileThreshold = 10000; // Non-debug default.
@@ -80,6 +78,8 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt
options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
jit_options->profile_saver_options_ =
options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
+ jit_options->thread_pool_pthread_priority_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
@@ -167,21 +167,14 @@ void Jit::AddTimingLogger(const TimingLogger& logger) {
cumulative_timings_.AddLogger(logger);
}
-Jit::Jit() : dump_info_on_shutdown_(false),
- cumulative_timings_("JIT timings"),
- memory_use_("Memory used for compilation", 16),
- lock_("JIT memory use lock"),
- use_jit_compilation_(true),
- hot_method_threshold_(0),
- warm_method_threshold_(0),
- osr_method_threshold_(0),
- priority_thread_weight_(0),
- invoke_transition_weight_(0) {}
+Jit::Jit(JitOptions* options) : options_(options),
+ cumulative_timings_("JIT timings"),
+ memory_use_("Memory used for compilation", 16),
+ lock_("JIT memory use lock") {}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
DCHECK(options->UseJitCompilation() || options->GetProfileSaverOptions().IsEnabled());
- std::unique_ptr<Jit> jit(new Jit);
- jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
+ std::unique_ptr<Jit> jit(new Jit(options));
if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) {
return nullptr;
}
@@ -195,8 +188,6 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
if (jit->GetCodeCache() == nullptr) {
return nullptr;
}
- jit->use_jit_compilation_ = options->UseJitCompilation();
- jit->profile_saver_options_ = options->GetProfileSaverOptions();
VLOG(jit) << "JIT created with initial_capacity="
<< PrettySize(options->GetCodeCacheInitialCapacity())
<< ", max_capacity=" << PrettySize(options->GetCodeCacheMaxCapacity())
@@ -204,12 +195,6 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
<< ", profile_saver_options=" << options->GetProfileSaverOptions();
- jit->hot_method_threshold_ = options->GetCompileThreshold();
- jit->warm_method_threshold_ = options->GetWarmupThreshold();
- jit->osr_method_threshold_ = options->GetOsrThreshold();
- jit->priority_thread_weight_ = options->GetPriorityThreadWeight();
- jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight();
-
jit->CreateThreadPool();
// Notify native debugger about the classes already loaded before the creation of the jit.
@@ -330,7 +315,7 @@ void Jit::CreateThreadPool() {
constexpr bool kJitPoolNeedsPeers = true;
thread_pool_.reset(new ThreadPool("Jit thread pool", 1, kJitPoolNeedsPeers));
- thread_pool_->SetPthreadPriority(kJitPoolThreadPthreadPriority);
+ thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
Start();
}
@@ -360,8 +345,8 @@ void Jit::DeleteThreadPool() {
void Jit::StartProfileSaver(const std::string& filename,
const std::vector<std::string>& code_paths) {
- if (profile_saver_options_.IsEnabled()) {
- ProfileSaver::Start(profile_saver_options_,
+ if (options_->GetSaveProfilingInfo()) {
+ ProfileSaver::Start(options_->GetProfileSaverOptions(),
filename,
code_cache_.get(),
code_paths);
@@ -369,8 +354,8 @@ void Jit::StartProfileSaver(const std::string& filename,
}
void Jit::StopProfileSaver() {
- if (profile_saver_options_.IsEnabled() && ProfileSaver::IsStarted()) {
- ProfileSaver::Stop(dump_info_on_shutdown_);
+ if (options_->GetSaveProfilingInfo() && ProfileSaver::IsStarted()) {
+ ProfileSaver::Stop(options_->DumpJitInfoOnShutdown());
}
}
@@ -383,8 +368,8 @@ bool Jit::CanInvokeCompiledCode(ArtMethod* method) {
}
Jit::~Jit() {
- DCHECK(!profile_saver_options_.IsEnabled() || !ProfileSaver::IsStarted());
- if (dump_info_on_shutdown_) {
+ DCHECK(!options_->GetSaveProfilingInfo() || !ProfileSaver::IsStarted());
+ if (options_->DumpJitInfoOnShutdown()) {
DumpInfo(LOG_STREAM(INFO));
Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
}
@@ -488,11 +473,10 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
return false;
}
- CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(osr_method);
// Find stack map starting at the target dex_pc.
- StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
+ StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
if (!stack_map.IsValid()) {
// There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
// hope that the next branch has one.
@@ -509,7 +493,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// We found a stack map, now fill the frame with dex register values from the interpreter's
// shadow frame.
DexRegisterMap vreg_map =
- code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
frame_size = osr_method->GetFrameSizeInBytes();
@@ -531,7 +515,7 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
} else {
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
DexRegisterLocation::Kind location =
- vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
if (location == DexRegisterLocation::Kind::kNone) {
// Dex register is dead or uninitialized.
continue;
@@ -547,15 +531,14 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
int32_t vreg_value = shadow_frame->GetVReg(vreg);
int32_t slot_offset = vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
DCHECK_GT(slot_offset, 0);
(reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
}
}
- native_pc = stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) +
+ native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
@@ -671,25 +654,25 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_
if (IgnoreSamplesForMethod(method)) {
return;
}
- if (hot_method_threshold_ == 0) {
+ if (HotMethodThreshold() == 0) {
// Tests might request JIT on first use (compiled synchronously in the interpreter).
return;
}
DCHECK(thread_pool_ != nullptr);
- DCHECK_GT(warm_method_threshold_, 0);
- DCHECK_GT(hot_method_threshold_, warm_method_threshold_);
- DCHECK_GT(osr_method_threshold_, hot_method_threshold_);
- DCHECK_GE(priority_thread_weight_, 1);
- DCHECK_LE(priority_thread_weight_, hot_method_threshold_);
+ DCHECK_GT(WarmMethodThreshold(), 0);
+ DCHECK_GT(HotMethodThreshold(), WarmMethodThreshold());
+ DCHECK_GT(OSRMethodThreshold(), HotMethodThreshold());
+ DCHECK_GE(PriorityThreadWeight(), 1);
+ DCHECK_LE(PriorityThreadWeight(), HotMethodThreshold());
- int32_t starting_count = method->GetCounter();
+ uint16_t starting_count = method->GetCounter();
if (Jit::ShouldUsePriorityThreadWeight(self)) {
- count *= priority_thread_weight_;
+ count *= PriorityThreadWeight();
}
- int32_t new_count = starting_count + count; // int32 here to avoid wrap-around;
+ uint32_t new_count = starting_count + count;
// Note: Native method have no "warm" state or profiling info.
- if (LIKELY(!method->IsNative()) && starting_count < warm_method_threshold_) {
- if ((new_count >= warm_method_threshold_) &&
+ if (LIKELY(!method->IsNative()) && starting_count < WarmMethodThreshold()) {
+ if ((new_count >= WarmMethodThreshold()) &&
(method->GetProfilingInfo(kRuntimePointerSize) == nullptr)) {
bool success = ProfilingInfo::Create(self, method, /* retry_allocation */ false);
if (success) {
@@ -710,23 +693,23 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_
}
}
// Avoid jumping more than one state at a time.
- new_count = std::min(new_count, hot_method_threshold_ - 1);
- } else if (use_jit_compilation_) {
- if (starting_count < hot_method_threshold_) {
- if ((new_count >= hot_method_threshold_) &&
+ new_count = std::min(new_count, static_cast<uint32_t>(HotMethodThreshold() - 1));
+ } else if (UseJitCompilation()) {
+ if (starting_count < HotMethodThreshold()) {
+ if ((new_count >= HotMethodThreshold()) &&
!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile));
}
// Avoid jumping more than one state at a time.
- new_count = std::min(new_count, osr_method_threshold_ - 1);
- } else if (starting_count < osr_method_threshold_) {
+ new_count = std::min(new_count, static_cast<uint32_t>(OSRMethodThreshold() - 1));
+ } else if (starting_count < OSRMethodThreshold()) {
if (!with_backedges) {
// If the samples don't contain any back edge, we don't increment the hotness.
return;
}
DCHECK(!method->IsNative()); // No back edges reported for native methods.
- if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) {
+ if ((new_count >= OSRMethodThreshold()) && !code_cache_->IsOsrCompiled(method)) {
DCHECK(thread_pool_ != nullptr);
thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr));
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 4b8b8919d1..edaf348cc4 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -44,6 +44,110 @@ class JitOptions;
static constexpr int16_t kJitCheckForOSR = -1;
static constexpr int16_t kJitHotnessDisabled = -2;
+// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
+// See android/os/Process.java.
+static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+
+class JitOptions {
+ public:
+ static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
+
+ uint16_t GetCompileThreshold() const {
+ return compile_threshold_;
+ }
+
+ uint16_t GetWarmupThreshold() const {
+ return warmup_threshold_;
+ }
+
+ uint16_t GetOsrThreshold() const {
+ return osr_threshold_;
+ }
+
+ uint16_t GetPriorityThreadWeight() const {
+ return priority_thread_weight_;
+ }
+
+ uint16_t GetInvokeTransitionWeight() const {
+ return invoke_transition_weight_;
+ }
+
+ size_t GetCodeCacheInitialCapacity() const {
+ return code_cache_initial_capacity_;
+ }
+
+ size_t GetCodeCacheMaxCapacity() const {
+ return code_cache_max_capacity_;
+ }
+
+ bool DumpJitInfoOnShutdown() const {
+ return dump_info_on_shutdown_;
+ }
+
+ const ProfileSaverOptions& GetProfileSaverOptions() const {
+ return profile_saver_options_;
+ }
+
+ bool GetSaveProfilingInfo() const {
+ return profile_saver_options_.IsEnabled();
+ }
+
+ int GetThreadPoolPthreadPriority() const {
+ return thread_pool_pthread_priority_;
+ }
+
+ bool UseJitCompilation() const {
+ return use_jit_compilation_;
+ }
+
+ void SetUseJitCompilation(bool b) {
+ use_jit_compilation_ = b;
+ }
+
+ void SetSaveProfilingInfo(bool save_profiling_info) {
+ profile_saver_options_.SetEnabled(save_profiling_info);
+ }
+
+ void SetWaitForJitNotificationsToSaveProfile(bool value) {
+ profile_saver_options_.SetWaitForJitNotificationsToSave(value);
+ }
+
+ void SetProfileAOTCode(bool value) {
+ profile_saver_options_.SetProfileAOTCode(value);
+ }
+
+ void SetJitAtFirstUse() {
+ use_jit_compilation_ = true;
+ compile_threshold_ = 0;
+ }
+
+ private:
+ bool use_jit_compilation_;
+ size_t code_cache_initial_capacity_;
+ size_t code_cache_max_capacity_;
+ uint16_t compile_threshold_;
+ uint16_t warmup_threshold_;
+ uint16_t osr_threshold_;
+ uint16_t priority_thread_weight_;
+ uint16_t invoke_transition_weight_;
+ bool dump_info_on_shutdown_;
+ int thread_pool_pthread_priority_;
+ ProfileSaverOptions profile_saver_options_;
+
+ JitOptions()
+ : use_jit_compilation_(false),
+ code_cache_initial_capacity_(0),
+ code_cache_max_capacity_(0),
+ compile_threshold_(0),
+ warmup_threshold_(0),
+ osr_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0),
+ dump_info_on_shutdown_(false),
+ thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority) {}
+
+ DISALLOW_COPY_AND_ASSIGN(JitOptions);
+};
class Jit {
public:
@@ -77,29 +181,29 @@ class Jit {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- size_t OSRMethodThreshold() const {
- return osr_method_threshold_;
+ uint16_t OSRMethodThreshold() const {
+ return options_->GetOsrThreshold();
}
- size_t HotMethodThreshold() const {
- return hot_method_threshold_;
+ uint16_t HotMethodThreshold() const {
+ return options_->GetCompileThreshold();
}
- size_t WarmMethodThreshold() const {
- return warm_method_threshold_;
+ uint16_t WarmMethodThreshold() const {
+ return options_->GetWarmupThreshold();
}
uint16_t PriorityThreadWeight() const {
- return priority_thread_weight_;
+ return options_->GetPriorityThreadWeight();
}
// Returns false if we only need to save profile information and not compile methods.
bool UseJitCompilation() const {
- return use_jit_compilation_;
+ return options_->UseJitCompilation();
}
bool GetSaveProfilingInfo() const {
- return profile_saver_options_.IsEnabled();
+ return options_->GetSaveProfilingInfo();
}
// Wait until there is no more pending compilation tasks.
@@ -120,12 +224,12 @@ class Jit {
void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
REQUIRES_SHARED(Locks::mutator_lock_) {
- AddSamples(self, caller, invoke_transition_weight_, false);
+ AddSamples(self, caller, options_->GetInvokeTransitionWeight(), false);
}
void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
REQUIRES_SHARED(Locks::mutator_lock_) {
- AddSamples(self, callee, invoke_transition_weight_, false);
+ AddSamples(self, callee, options_->GetInvokeTransitionWeight(), false);
}
// Starts the profile saver if the config options allow profile recording.
@@ -177,7 +281,7 @@ class Jit {
void Start();
private:
- Jit();
+ explicit Jit(JitOptions* options);
static bool LoadCompiler(std::string* error_msg);
@@ -189,107 +293,22 @@ class Jit {
static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
+ // We make this static to simplify the interaction with libart-compiler.so.
+ static bool generate_debug_info_;
+
+ const JitOptions* const options_;
+
+ std::unique_ptr<jit::JitCodeCache> code_cache_;
+ std::unique_ptr<ThreadPool> thread_pool_;
+
// Performance monitoring.
- bool dump_info_on_shutdown_;
CumulativeLogger cumulative_timings_;
Histogram<uint64_t> memory_use_ GUARDED_BY(lock_);
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::unique_ptr<jit::JitCodeCache> code_cache_;
-
- bool use_jit_compilation_;
- ProfileSaverOptions profile_saver_options_;
- static bool generate_debug_info_;
- uint16_t hot_method_threshold_;
- uint16_t warm_method_threshold_;
- uint16_t osr_method_threshold_;
- uint16_t priority_thread_weight_;
- uint16_t invoke_transition_weight_;
- std::unique_ptr<ThreadPool> thread_pool_;
-
DISALLOW_COPY_AND_ASSIGN(Jit);
};
-class JitOptions {
- public:
- static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
- size_t GetCompileThreshold() const {
- return compile_threshold_;
- }
- size_t GetWarmupThreshold() const {
- return warmup_threshold_;
- }
- size_t GetOsrThreshold() const {
- return osr_threshold_;
- }
- uint16_t GetPriorityThreadWeight() const {
- return priority_thread_weight_;
- }
- size_t GetInvokeTransitionWeight() const {
- return invoke_transition_weight_;
- }
- size_t GetCodeCacheInitialCapacity() const {
- return code_cache_initial_capacity_;
- }
- size_t GetCodeCacheMaxCapacity() const {
- return code_cache_max_capacity_;
- }
- bool DumpJitInfoOnShutdown() const {
- return dump_info_on_shutdown_;
- }
- const ProfileSaverOptions& GetProfileSaverOptions() const {
- return profile_saver_options_;
- }
- bool GetSaveProfilingInfo() const {
- return profile_saver_options_.IsEnabled();
- }
- bool UseJitCompilation() const {
- return use_jit_compilation_;
- }
- void SetUseJitCompilation(bool b) {
- use_jit_compilation_ = b;
- }
- void SetSaveProfilingInfo(bool save_profiling_info) {
- profile_saver_options_.SetEnabled(save_profiling_info);
- }
- void SetWaitForJitNotificationsToSaveProfile(bool value) {
- profile_saver_options_.SetWaitForJitNotificationsToSave(value);
- }
- void SetProfileAOTCode(bool value) {
- profile_saver_options_.SetProfileAOTCode(value);
- }
-
- void SetJitAtFirstUse() {
- use_jit_compilation_ = true;
- compile_threshold_ = 0;
- }
-
- private:
- bool use_jit_compilation_;
- size_t code_cache_initial_capacity_;
- size_t code_cache_max_capacity_;
- size_t compile_threshold_;
- size_t warmup_threshold_;
- size_t osr_threshold_;
- uint16_t priority_thread_weight_;
- size_t invoke_transition_weight_;
- bool dump_info_on_shutdown_;
- ProfileSaverOptions profile_saver_options_;
-
- JitOptions()
- : use_jit_compilation_(false),
- code_cache_initial_capacity_(0),
- code_cache_max_capacity_(0),
- compile_threshold_(0),
- warmup_threshold_(0),
- osr_threshold_(0),
- priority_thread_weight_(0),
- invoke_transition_weight_(0),
- dump_info_on_shutdown_(false) {}
-
- DISALLOW_COPY_AND_ASSIGN(JitOptions);
-};
-
// Helper class to stop the JIT for a given scope. This will wait for the JIT to quiesce.
class ScopedJitSuspend {
public:
diff --git a/runtime/jvalue-inl.h b/runtime/jvalue-inl.h
index 25e34b2a74..5bd4f176f6 100644
--- a/runtime/jvalue-inl.h
+++ b/runtime/jvalue-inl.h
@@ -19,7 +19,7 @@
#include "jvalue.h"
-#include "obj_ptr.h"
+#include "obj_ptr-inl.h"
namespace art {
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 266abcf399..b42d995d5c 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -33,7 +33,7 @@ union PACKED(alignof(mirror::Object*)) JValue {
// We default initialize JValue instances to all-zeros.
JValue() : j(0) {}
- template<typename T> static JValue FromPrimitive(T v);
+ template<typename T> ALWAYS_INLINE static JValue FromPrimitive(T v);
int8_t GetB() const { return b; }
void SetB(int8_t new_b) {
@@ -62,6 +62,7 @@ union PACKED(alignof(mirror::Object*)) JValue {
mirror::Object* GetL() const REQUIRES_SHARED(Locks::mutator_lock_) {
return l;
}
+ ALWAYS_INLINE
void SetL(ObjPtr<mirror::Object> new_l) REQUIRES_SHARED(Locks::mutator_lock_);
int16_t GetS() const { return s; }
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 09d856f89b..ce7fe34b22 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -75,16 +75,18 @@ class LockWord {
// Remaining bits are the recursive lock count.
kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
kMarkBitStateSize,
- // Thin lock bits. Owner in lowest bits.
+ // Thin lock bits. Owner in lowest bits.
kThinLockOwnerShift = 0,
kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
+ kThinLockOwnerMaskShifted = kThinLockOwnerMask << kThinLockOwnerShift,
kThinLockMaxOwner = kThinLockOwnerMask,
// Count in higher bits.
kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
kThinLockCountMask = (1 << kThinLockCountSize) - 1,
kThinLockMaxCount = kThinLockCountMask,
kThinLockCountOne = 1 << kThinLockCountShift, // == 65536 (0x10000)
+ kThinLockCountMaskShifted = kThinLockCountMask << kThinLockCountShift,
// State in the highest bits.
kStateShift = kReadBarrierStateSize + kThinLockCountSize + kThinLockCountShift +
diff --git a/runtime/method_handles-inl.h b/runtime/method_handles-inl.h
index 41c8384e0d..00a8c00880 100644
--- a/runtime/method_handles-inl.h
+++ b/runtime/method_handles-inl.h
@@ -22,7 +22,7 @@
#include "common_throws.h"
#include "dex/dex_instruction.h"
#include "interpreter/interpreter_common.h"
-#include "jvalue.h"
+#include "jvalue-inl.h"
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "mirror/object.h"
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 64ab78997f..1d45aaeb2e 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -20,7 +20,6 @@
#include "common_dex_operations.h"
#include "jvalue-inl.h"
-#include "jvalue.h"
#include "mirror/emulated_stack_frame.h"
#include "mirror/method_handle_impl-inl.h"
#include "mirror/method_type.h"
diff --git a/runtime/method_info.h b/runtime/method_info.h
index b00ddc660f..6f74678e42 100644
--- a/runtime/method_info.h
+++ b/runtime/method_info.h
@@ -21,7 +21,7 @@
#include "base/leb128.h"
#include "base/macros.h"
-#include "base/memory_region.h"
+#include "base/bit_memory_region.h"
namespace art {
@@ -35,8 +35,8 @@ class MethodInfo {
explicit MethodInfo(const uint8_t* ptr) {
if (ptr != nullptr) {
num_method_indices_ = DecodeUnsignedLeb128(&ptr);
- region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
- num_method_indices_ * sizeof(MethodIndexType));
+ region_ = BitMemoryRegion(
+ MemoryRegion(const_cast<uint8_t*>(ptr), num_method_indices_ * sizeof(MethodIndexType)));
}
}
@@ -44,7 +44,7 @@ class MethodInfo {
MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
DCHECK(ptr != nullptr);
ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
- region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+ region_ = BitMemoryRegion(MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)));
}
static size_t ComputeSize(size_t num_method_indices) {
@@ -71,7 +71,7 @@ class MethodInfo {
private:
size_t num_method_indices_ = 0u;
- MemoryRegion region_;
+ BitMemoryRegion region_;
};
} // namespace art
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 7a4876c412..72f1443dfa 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -127,23 +127,23 @@ inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
}
}
-inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) {
+inline uint32_t DexCache::MethodTypeSlotIndex(dex::ProtoIndex proto_idx) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
- const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize;
+ DCHECK_LT(proto_idx.index_, GetDexFile()->NumProtoIds());
+ const uint32_t slot_idx = proto_idx.index_ % kDexCacheMethodTypeCacheSize;
DCHECK_LT(slot_idx, NumResolvedMethodTypes());
return slot_idx;
}
-inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+inline MethodType* DexCache::GetResolvedMethodType(dex::ProtoIndex proto_idx) {
return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
- std::memory_order_relaxed).GetObjectForIndex(proto_idx);
+ std::memory_order_relaxed).GetObjectForIndex(proto_idx.index_);
}
-inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+inline void DexCache::SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved) {
DCHECK(resolved != nullptr);
GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
- MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed);
+ MethodTypeDexCachePair(resolved, proto_idx.index_), std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index d940964edb..9aff9ec49a 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -307,9 +307,9 @@ class MANAGED DexCache FINAL : public Object {
ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ MethodType* GetResolvedMethodType(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved)
+ void SetResolvedMethodType(dex::ProtoIndex proto_idx, MethodType* resolved)
REQUIRES_SHARED(Locks::mutator_lock_);
CallSite* GetResolvedCallSite(uint32_t call_site_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -432,7 +432,7 @@ class MANAGED DexCache FINAL : public Object {
uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t MethodSlotIndex(uint32_t method_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(dex::ProtoIndex proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
private:
void Init(const DexFile* dex_file,
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index d2bff2c19a..97e0ce6684 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -169,9 +169,9 @@ TEST_F(DexCacheMethodHandlesTest, TestResolvedMethodTypes) {
for (size_t i = 0; i < dex_file.NumProtoIds(); ++i) {
const MethodTypeDexCachePair pair = method_types_cache[i].load(std::memory_order_relaxed);
- if (pair.index == method1_id.proto_idx_) {
+ if (dex::ProtoIndex(pair.index) == method1_id.proto_idx_) {
ASSERT_EQ(method1_type.Get(), pair.object.Read());
- } else if (pair.index == method2_id.proto_idx_) {
+ } else if (dex::ProtoIndex(pair.index) == method2_id.proto_idx_) {
ASSERT_EQ(method2_type.Get(), pair.object.Read());
} else {
ASSERT_TRUE(false);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index b309f596fd..44c819aaf7 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -1425,21 +1425,24 @@ int32_t VarHandle::GetAccessModesBitMask() {
return GetField32(AccessModesBitMaskOffset());
}
-bool VarHandle::IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type) {
- StackHandleScope<3> hs(Thread::Current());
- Handle<Class> mt_rtype(hs.NewHandle(method_type->GetRType()));
- Handle<VarHandle> vh(hs.NewHandle(this));
- Handle<Class> var_type(hs.NewHandle(vh->GetVarType()));
+VarHandle::MatchKind VarHandle::GetMethodTypeMatchForAccessMode(AccessMode access_mode,
+ MethodType* method_type) {
+ MatchKind match = MatchKind::kExact;
+
+ ObjPtr<VarHandle> vh = this;
+ ObjPtr<Class> var_type = vh->GetVarType();
+ ObjPtr<Class> mt_rtype = method_type->GetRType();
AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
- // Check return type first.
- if (mt_rtype->GetPrimitiveType() == Primitive::Type::kPrimVoid) {
- // The result of the operation will be discarded. The return type
- // of the VarHandle is immaterial.
- } else {
- ObjPtr<Class> vh_rtype(GetReturnType(access_mode_template, var_type.Get()));
- if (!IsReturnTypeConvertible(vh_rtype, mt_rtype.Get())) {
- return false;
+ // Check return type first. If the return type of the method
+ // of the VarHandle is immaterial.
+ if (mt_rtype->GetPrimitiveType() != Primitive::Type::kPrimVoid) {
+ ObjPtr<Class> vh_rtype = GetReturnType(access_mode_template, var_type.Ptr());
+ if (vh_rtype != mt_rtype) {
+ if (!IsReturnTypeConvertible(vh_rtype, mt_rtype)) {
+ return MatchKind::kNone;
+ }
+ match = MatchKind::kWithConversions;
}
}
@@ -1447,21 +1450,25 @@ bool VarHandle::IsMethodTypeCompatible(AccessMode access_mode, MethodType* metho
ObjPtr<Class> vh_ptypes[VarHandle::kMaxAccessorParameters];
const int32_t vh_ptypes_count = BuildParameterArray(vh_ptypes,
access_mode_template,
- var_type.Get(),
+ var_type,
GetCoordinateType0(),
GetCoordinateType1());
if (vh_ptypes_count != method_type->GetPTypes()->GetLength()) {
- return false;
+ return MatchKind::kNone;
}
// Check the parameter types are compatible.
ObjPtr<ObjectArray<Class>> mt_ptypes = method_type->GetPTypes();
for (int32_t i = 0; i < vh_ptypes_count; ++i) {
+ if (mt_ptypes->Get(i) == vh_ptypes[i]) {
+ continue;
+ }
if (!IsParameterTypeConvertible(mt_ptypes->Get(i), vh_ptypes[i])) {
- return false;
+ return MatchKind::kNone;
}
+ match = MatchKind::kWithConversions;
}
- return true;
+ return match;
}
bool VarHandle::IsInvokerMethodTypeCompatible(AccessMode access_mode,
@@ -1508,7 +1515,7 @@ bool VarHandle::IsInvokerMethodTypeCompatible(AccessMode access_mode,
MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self,
ObjPtr<VarHandle> var_handle,
AccessMode access_mode) {
- // This is a static as the var_handle might be moved by the GC during it's execution.
+ // This is a static method as the var_handle might be moved by the GC during it's execution.
AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
StackHandleScope<3> hs(self);
@@ -1538,9 +1545,40 @@ MethodType* VarHandle::GetMethodTypeForAccessMode(Thread* self, AccessMode acces
return GetMethodTypeForAccessMode(self, this, access_mode);
}
+std::string VarHandle::PrettyDescriptorForAccessMode(AccessMode access_mode) {
+ // Effect MethodType::PrettyDescriptor() without first creating a method type first.
+ std::ostringstream oss;
+ oss << '(';
+
+ AccessModeTemplate access_mode_template = GetAccessModeTemplate(access_mode);
+ ObjPtr<Class> var_type = GetVarType();
+ ObjPtr<Class> ctypes[2] = { GetCoordinateType0(), GetCoordinateType1() };
+ const int32_t ptypes_count = GetNumberOfParameters(access_mode_template, ctypes[0], ctypes[1]);
+ int32_t ptypes_done = 0;
+ for (ObjPtr<Class> ctype : ctypes) {
+ if (!ctype.IsNull()) {
+ if (ptypes_done != 0) {
+ oss << ", ";
+ }
+ oss << ctype->PrettyDescriptor();;
+ ptypes_done++;
+ }
+ }
+ while (ptypes_done != ptypes_count) {
+ if (ptypes_done != 0) {
+ oss << ", ";
+ }
+ oss << var_type->PrettyDescriptor();
+ ptypes_done++;
+ }
+ ObjPtr<Class> rtype = GetReturnType(access_mode_template, var_type);
+ oss << ')' << rtype->PrettyDescriptor();
+ return oss.str();
+}
+
bool VarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result) {
Class* klass = GetClass();
if (klass == FieldVarHandle::StaticClass()) {
@@ -1671,7 +1709,7 @@ ArtField* FieldVarHandle::GetField() {
bool FieldVarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result) {
ShadowFrameGetter getter(*shadow_frame, operands);
ArtField* field = GetField();
@@ -1743,7 +1781,7 @@ GcRoot<Class> FieldVarHandle::static_class_;
bool ArrayElementVarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result) {
ShadowFrameGetter getter(*shadow_frame, operands);
@@ -1856,7 +1894,7 @@ bool ByteArrayViewVarHandle::GetNativeByteOrder() {
bool ByteArrayViewVarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result) {
ShadowFrameGetter getter(*shadow_frame, operands);
@@ -1965,7 +2003,7 @@ bool ByteBufferViewVarHandle::GetNativeByteOrder() {
bool ByteBufferViewVarHandle::Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result) {
ShadowFrameGetter getter(*shadow_frame, operands);
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index d46d900a8d..5186d43830 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -99,14 +99,16 @@ class MANAGED VarHandle : public Object {
return (GetAccessModesBitMask() & (1u << static_cast<uint32_t>(accessMode))) != 0;
}
- // Returns true if the MethodType specified is compatible with the
- // method type associated with the specified AccessMode. The
- // supplied MethodType is assumed to be from the point of invocation
- // so it is valid for the supplied MethodType to have a void return
- // value when the return value for the AccessMode is non-void. This
- // corresponds to the result of the accessor being discarded.
- bool IsMethodTypeCompatible(AccessMode access_mode, MethodType* method_type)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ enum MatchKind : uint8_t {
+ kNone,
+ kWithConversions,
+ kExact
+ };
+
+ // Returns match information on the compatability between the exact method type for
+ // 'access_mode' and the provided 'method_type'.
+ MatchKind GetMethodTypeMatchForAccessMode(AccessMode access_mode, MethodType* method_type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the MethodType specified is compatible with the
// specified access_mode if the first parameter of method_type is
@@ -122,9 +124,14 @@ class MANAGED VarHandle : public Object {
MethodType* GetMethodTypeForAccessMode(Thread* self, AccessMode accessMode)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns a string representing the descriptor of the MethodType associated with
+ // this AccessMode.
+ std::string PrettyDescriptorForAccessMode(AccessMode access_mode)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -192,7 +199,7 @@ class MANAGED FieldVarHandle : public VarHandle {
public:
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -225,7 +232,7 @@ class MANAGED ArrayElementVarHandle : public VarHandle {
public:
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -248,7 +255,7 @@ class MANAGED ByteArrayViewVarHandle : public VarHandle {
public:
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -281,7 +288,7 @@ class MANAGED ByteBufferViewVarHandle : public VarHandle {
public:
bool Access(AccessMode access_mode,
ShadowFrame* shadow_frame,
- InstructionOperands* operands,
+ const InstructionOperands* const operands,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/var_handle_test.cc b/runtime/mirror/var_handle_test.cc
index d9fa07f207..005aba3edd 100644
--- a/runtime/mirror/var_handle_test.cc
+++ b/runtime/mirror/var_handle_test.cc
@@ -246,6 +246,47 @@ static MethodType* MethodTypeOf(const std::string& method_descriptor) {
return MethodType::Create(self, rtype, ptypes);
}
+static bool AccessModeMatch(VarHandle* vh,
+ VarHandle::AccessMode access_mode,
+ MethodType* method_type,
+ VarHandle::MatchKind expected_match)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return vh->GetMethodTypeMatchForAccessMode(access_mode, method_type) == expected_match;
+}
+
+template <typename VH>
+static bool AccessModeExactMatch(Handle<VH> vh,
+ VarHandle::AccessMode access_mode,
+ const char* descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return AccessModeMatch(vh.Get(),
+ access_mode,
+ MethodTypeOf(descriptor),
+ VarHandle::MatchKind::kExact);
+}
+
+template <typename VH>
+static bool AccessModeWithConversionsMatch(Handle<VH> vh,
+ VarHandle::AccessMode access_mode,
+ const char* descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return AccessModeMatch(vh.Get(),
+ access_mode,
+ MethodTypeOf(descriptor),
+ VarHandle::MatchKind::kWithConversions);
+}
+
+template <typename VH>
+static bool AccessModeNoMatch(Handle<VH> vh,
+ VarHandle::AccessMode access_mode,
+ const char* descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return AccessModeMatch(vh.Get(),
+ access_mode,
+ MethodTypeOf(descriptor),
+ VarHandle::MatchKind::kNone);
+}
+
TEST_F(VarHandleTest, InstanceFieldVarHandle) {
Thread * const self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -296,47 +337,53 @@ TEST_F(VarHandleTest, InstanceFieldVarHandle) {
// Check compatibility - "Get" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;)I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;)V"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;)D"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)Z"));
}
// Check compatibility - "Set" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)V"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;S)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndSet" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
- MethodTypeOf("(Ljava/lang/Integer;II)I")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)Z"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)V"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;II)Ljava/lang/Boolean;"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;IB)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;II)I"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndExchange" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;II)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(IIII)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;II)V"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;II)J"));
+ EXPECT_TRUE(AccessModeWithConversionsMatch(fvh, access_mode, "(Ljava/lang/Integer;BS)F"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(IIII)V"));
}
// Check compatibility - "GetAndUpdate" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/lang/Integer;I)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)S")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(Ljava/lang/Integer;I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Ljava/lang/Integer;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)S"));
}
// Check synthesized method types match expected forms.
@@ -430,48 +477,47 @@ TEST_F(VarHandleTest, StaticFieldVarHandle) {
// Check compatibility - "Get" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "()I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "()V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)Z"));
}
// Check compatibility - "Set" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(F)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(F)V"));
}
// Check compatibility - "CompareAndSet" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode,
- MethodTypeOf("(II)Ljava/lang/String;")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("()Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)Ljava/lang/String;"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "()Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndExchange" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(ID)I")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)S")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(IIJ)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(II)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(ID)I"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)S"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(IIJ)V"));
}
// Check compatibility - "GetAndUpdate" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)I")));
- EXPECT_TRUE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)V")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(I)Z")));
- EXPECT_FALSE(fvh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)I"));
+ EXPECT_TRUE(AccessModeExactMatch(fvh, access_mode, "(I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(fvh, access_mode, "(II)V"));
}
// Check synthesized method types match expected forms.
@@ -594,50 +640,46 @@ TEST_F(VarHandleTest, ArrayElementVarHandle) {
// Check compatibility - "Get" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Ljava/lang/String;")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;Ljava/lang/String;)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;I)Ljava/lang/String;"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;Ljava/lang/String;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
}
// Check compatibility - "Set" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndSet" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
- EXPECT_TRUE(
- vh->IsMethodTypeCompatible(
- access_mode,
- MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
- MethodTypeOf("([Ljava/lang/String;III)I")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;I)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;III)I"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndExchange" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;II)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)Ljava/lang/String;"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;Ljava/lang/String;)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;II)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
}
// Check compatibility - "GetAndUpdate" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([Ljava/lang/String;ILjava/lang/String;)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([Ljava/lang/String;ILjava/lang/String;)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
}
// Check synthesized method types match expected forms.
@@ -747,50 +789,46 @@ TEST_F(VarHandleTest, ByteArrayViewVarHandle) {
// Check compatibility - "Get" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)C")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BC)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BI)C"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BI)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BC)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
}
// Check compatibility - "Set" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndSet" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
- EXPECT_TRUE(
- vh->IsMethodTypeCompatible(
- access_mode,
- MethodTypeOf("([BICC)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
- MethodTypeOf("([BIII)I")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BI)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BIII)I"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BI)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndExchange" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)C")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BICC)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BII)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)C"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BICC)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BII)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
}
// Check compatibility - "GetAndUpdate" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)C")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("([BIC)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)C"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "([BIC)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "([BIC)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
}
// Check synthesized method types match expected forms.
@@ -900,50 +938,46 @@ TEST_F(VarHandleTest, ByteBufferViewVarHandle) {
// Check compatibility - "Get" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)D")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;D)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)Z")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)D"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;D)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)Z"));
}
// Check compatibility - "Set" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kSet;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndSet" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndSet;
- EXPECT_TRUE(
- vh->IsMethodTypeCompatible(
- access_mode,
- MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode,
- MethodTypeOf("(Ljava/nio/ByteBuffer;IDI)D")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;I)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Z)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDI)D"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;I)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Z)V"));
}
// Check compatibility - "CompareAndExchange" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kCompareAndExchange;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)D")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;IDD)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;II)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(III)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)D"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;IDD)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;II)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(III)V"));
}
// Check compatibility - "GetAndUpdate" pattern
{
const VarHandle::AccessMode access_mode = VarHandle::AccessMode::kGetAndAdd;
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)D")));
- EXPECT_TRUE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)V")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(Ljava/nio/ByteBuffer;ID)Z")));
- EXPECT_FALSE(vh->IsMethodTypeCompatible(access_mode, MethodTypeOf("(II)V")));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)D"));
+ EXPECT_TRUE(AccessModeExactMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)V"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(Ljava/nio/ByteBuffer;ID)Z"));
+ EXPECT_TRUE(AccessModeNoMatch(vh, access_mode, "(II)V"));
}
// Check synthesized method types match expected forms.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 7f7b524227..6c820190b4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -185,6 +185,10 @@ static jboolean VMRuntime_isNativeDebuggable(JNIEnv*, jobject) {
return Runtime::Current()->IsNativeDebuggable();
}
+static jboolean VMRuntime_isJavaDebuggable(JNIEnv*, jobject) {
+ return Runtime::Current()->IsJavaDebuggable();
+}
+
static jobjectArray VMRuntime_properties(JNIEnv* env, jobject) {
DCHECK(WellKnownClasses::java_lang_String != nullptr);
@@ -702,6 +706,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
FAST_NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
FAST_NATIVE_METHOD(VMRuntime, isNativeDebuggable, "()Z"),
+ NATIVE_METHOD(VMRuntime, isJavaDebuggable, "()Z"),
NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
FAST_NATIVE_METHOD(VMRuntime, newNonMovableArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 68024cd1c2..9f595b1c29 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -648,7 +648,7 @@ static jobjectArray Class_getDeclaredAnnotations(JNIEnv* env, jobject javaThis)
// Return an empty array instead of a null pointer.
ObjPtr<mirror::Class> annotation_array_class =
soa.Decode<mirror::Class>(WellKnownClasses::java_lang_annotation_Annotation__array);
- mirror::ObjectArray<mirror::Object>* empty_array =
+ ObjPtr<mirror::ObjectArray<mirror::Object>> empty_array =
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
annotation_array_class.Ptr(),
0);
@@ -661,7 +661,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
- mirror::ObjectArray<mirror::Class>* classes = nullptr;
+ ObjPtr<mirror::ObjectArray<mirror::Class>> classes = nullptr;
if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
classes = annotations::GetDeclaredClasses(klass);
}
@@ -738,7 +738,7 @@ static jstring Class_getInnerClassName(JNIEnv* env, jobject javaThis) {
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return nullptr;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
return nullptr;
}
@@ -763,7 +763,7 @@ static jboolean Class_isAnonymousClass(JNIEnv* env, jobject javaThis) {
if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
return false;
}
- mirror::String* class_name = nullptr;
+ ObjPtr<mirror::String> class_name = nullptr;
if (!annotations::GetInnerClass(klass, &class_name)) {
return false;
}
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index a5d6c9704d..13a8d28267 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -38,7 +38,7 @@ static jobjectArray Constructor_getExceptionTypes(JNIEnv* env, jobject javaMetho
ScopedFastNativeObjectAccess soa(env);
ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod)
->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- mirror::ObjectArray<mirror::Class>* result_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer.
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index e0afbee845..25599843e9 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -27,6 +27,7 @@
#include "dex/dex_file-inl.h"
#include "dex/dex_file_annotations.h"
#include "jni/jni_internal.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/field-inl.h"
#include "native_util.h"
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2503b3cb44..52e04941c6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -62,7 +62,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
klass->GetProxyThrows()->Get(throws_index);
return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
} else {
- mirror::ObjectArray<mirror::Class>* result_array =
+ ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
annotations::GetExceptionTypesForMethod(method);
if (result_array == nullptr) {
// Return an empty array instead of a null pointer
diff --git a/runtime/oat.h b/runtime/oat.h
index 6c683f1541..7b8f71a3f3 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: compiler support const-method-handle
- static constexpr uint8_t kOatVersion[] = { '1', '4', '3', '\0' };
+ // Last oat version changed reason: Refactor stackmap encoding.
+ static constexpr uint8_t kOatVersion[] = { '1', '4', '4', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 371678d4d9..ffbc26c647 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -211,12 +211,12 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
return nullptr;
}
+ ret->PreSetup(elf_filename);
+
if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
return nullptr;
}
- ret->PreSetup(elf_filename);
-
if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
return nullptr;
}
@@ -252,12 +252,12 @@ OatFileBase* OatFileBase::OpenOatFile(int zip_fd,
return nullptr;
}
+ ret->PreSetup(oat_location);
+
if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
return nullptr;
}
- ret->PreSetup(oat_location);
-
if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
return nullptr;
}
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 7d69927ffb..6c869cada5 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -36,6 +36,7 @@
#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
+#include "hidden_api.h"
#include "image.h"
#include "oat.h"
#include "runtime.h"
@@ -823,6 +824,11 @@ bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
argv.push_back("--compiler-filter=verify-none");
}
+ if (runtime->GetHiddenApiEnforcementPolicy() != hiddenapi::EnforcementPolicy::kNoChecks) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xhidden-api-checks");
+ }
+
if (runtime->MustRelocateIfPossible()) {
argv.push_back("--runtime-arg");
argv.push_back("-Xrelocate");
@@ -860,6 +866,13 @@ bool OatFileAssistant::DexLocationToOatFilename(const std::string& location,
CHECK(oat_filename != nullptr);
CHECK(error_msg != nullptr);
+ // If ANDROID_DATA is not set, return false instead of aborting.
+ // This can occur for preopt when using a class loader context.
+ if (GetAndroidDataSafe(error_msg) == nullptr) {
+ *error_msg = "GetAndroidDataSafe failed: " + *error_msg;
+ return false;
+ }
+
std::string cache_dir = GetDalvikCache(GetInstructionSetString(isa));
if (cache_dir.empty()) {
*error_msg = "Dalvik cache directory does not exist";
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 99bc0b2c6e..0b3c61d474 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -33,6 +33,7 @@
#include "class_loader_context.h"
#include "common_runtime_test.h"
#include "dexopt_test.h"
+#include "hidden_api.h"
#include "oat_file.h"
#include "oat_file_manager.h"
#include "scoped_thread_state_change-inl.h"
@@ -43,6 +44,8 @@ namespace art {
static const std::string kSpecialSharedLibrary = "&"; // NOLINT [runtime/string] [4]
static ClassLoaderContext* kSpecialSharedLibraryContext = nullptr;
+static constexpr char kDex2oatCmdLineHiddenApiArg[] = " --runtime-arg -Xhidden-api-checks";
+
class OatFileAssistantTest : public DexoptTest {
public:
void VerifyOptimizationStatus(const std::string& file,
@@ -1413,6 +1416,46 @@ TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
}
+TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiDisabled) {
+ hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+ hiddenapi::EnforcementPolicy::kNoChecks);
+
+ std::string dex_location = GetScratchDir() + "/TestDexHiddenApiDisabled.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+ std::string error_msg;
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_NE(nullptr, oat_file.get());
+
+ const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
+ EXPECT_NE(nullptr, cmd_line);
+ EXPECT_EQ(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
+}
+
+TEST_F(OatFileAssistantTest, MakeUpToDateWithHiddenApiEnabled) {
+ hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+ hiddenapi::EnforcementPolicy::kBlacklistOnly);
+
+ std::string dex_location = GetScratchDir() + "/TestDexHiddenApiEnabled.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+ std::string error_msg;
+ int status = oat_file_assistant.MakeUpToDate(false, kSpecialSharedLibraryContext, &error_msg);
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded, status) << error_msg;
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_NE(nullptr, oat_file.get());
+
+ const char* cmd_line = oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kDex2OatCmdLineKey);
+ EXPECT_NE(nullptr, cmd_line);
+ EXPECT_NE(nullptr, strstr(cmd_line, kDex2oatCmdLineHiddenApiArg));
+}
+
TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string context_location = GetScratchDir() + "/ContextDex.jar";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 98238e5600..aed6bc57b3 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -19,6 +19,7 @@
#include "art_method.h"
#include "dex/dex_file_types.h"
#include "scoped_thread_state_change-inl.h"
+#include "stack_map.h"
#include "thread.h"
namespace art {
@@ -42,11 +43,10 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
const void* entry_point = GetEntryPoint();
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (IsOptimized()) {
- CodeInfo code_info = GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
+ CodeInfo code_info(this);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding.stack_map.encoding);
+ return stack_map.GetDexPc();
}
} else {
DCHECK(method->IsNative());
@@ -71,18 +71,17 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
DCHECK(!method->IsNative());
DCHECK(IsOptimized());
// Search for the dex-to-pc mapping in stack maps.
- CodeInfo code_info = GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(this);
// All stack maps are stored in the same CodeItem section, safepoint stack
// maps first, then catch stack maps. We use `is_for_catch_handler` to select
// the order of iteration.
StackMap stack_map =
- LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
- : code_info.GetStackMapForDexPc(dex_pc, encoding);
+ LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc)
+ : code_info.GetStackMapForDexPc(dex_pc);
if (stack_map.IsValid()) {
return reinterpret_cast<uintptr_t>(entry_point) +
- stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA);
+ stack_map.GetNativePcOffset(kRuntimeISA);
}
if (abort_on_failure) {
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index f0966b7bfa..d6762d6bc6 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -22,7 +22,6 @@
#include "base/utils.h"
#include "method_info.h"
#include "quick/quick_method_frame_info.h"
-#include "stack_map.h"
namespace art {
@@ -75,10 +74,6 @@ class PACKED(4) OatQuickMethodHeader {
return code_ - vmap_table_offset_;
}
- CodeInfo GetOptimizedCodeInfo() const {
- return CodeInfo(GetOptimizedCodeInfoPtr());
- }
-
const void* GetOptimizedMethodInfoPtr() const {
DCHECK(IsOptimized());
return reinterpret_cast<const void*>(code_ - method_info_offset_);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3aa481af8c..7383d477bb 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -194,6 +194,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xjittransitionweight:_")
.WithType<unsigned int>()
.IntoKey(M::JITInvokeTransitionWeight)
+ .Define("-Xjitpthreadpriority:_")
+ .WithType<int>()
+ .IntoKey(M::JITPoolThreadPthreadPriority)
.Define("-Xjitsaveprofilinginfo")
.WithType<ProfileSaverOptions>()
.AppendValues()
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 077aa33925..c555fca23c 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -224,30 +224,29 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
- CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(handler_method_header_);
// Find stack map of the catch block.
- StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
+ StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
DCHECK(catch_stack_map.IsValid());
DexRegisterMap catch_vreg_map =
- code_info.GetDexRegisterMapOf(catch_stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(catch_stack_map, number_of_vregs);
if (!catch_vreg_map.IsValid()) {
return;
}
// Find stack map of the throwing instruction.
StackMap throw_stack_map =
- code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset(), encoding);
+ code_info.GetStackMapForNativePcOffset(stack_visitor->GetNativePcOffset());
DCHECK(throw_stack_map.IsValid());
DexRegisterMap throw_vreg_map =
- code_info.GetDexRegisterMapOf(throw_stack_map, encoding, number_of_vregs);
+ code_info.GetDexRegisterMapOf(throw_stack_map, number_of_vregs);
DCHECK(throw_vreg_map.IsValid());
// Copy values between them.
for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
DexRegisterLocation::Kind catch_location =
- catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ catch_vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
if (catch_location == DexRegisterLocation::Kind::kNone) {
continue;
}
@@ -257,8 +256,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
uint32_t vreg_value;
VRegKind vreg_kind = ToVRegKind(throw_vreg_map.GetLocationKind(vreg,
number_of_vregs,
- code_info,
- encoding));
+ code_info));
bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
vreg,
vreg_kind,
@@ -271,8 +269,7 @@ void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor*
// Copy value to the catch phi's stack slot.
int32_t slot_offset = catch_vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
ArtMethod** frame_top = stack_visitor->GetCurrentQuickFrame();
uint8_t* slot_address = reinterpret_cast<uint8_t*>(frame_top) + slot_offset;
uint32_t* slot_ptr = reinterpret_cast<uint32_t*>(slot_address);
@@ -404,20 +401,18 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
const bool* updated_vregs)
REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
+ CodeInfo code_info(method_header);
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
CodeItemDataAccessor accessor(m->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, stack_map);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, stack_map);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
DexRegisterMap vreg_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
- code_info.GetInlineInfoOf(stack_map, encoding),
- encoding,
+ code_info.GetInlineInfoOf(stack_map),
number_of_vregs)
- : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_vregs);
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_vregs);
if (!vreg_map.IsValid()) {
return;
@@ -430,7 +425,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
DexRegisterLocation::Kind location =
- vreg_map.GetLocationKind(vreg, number_of_vregs, code_info, encoding);
+ vreg_map.GetLocationKind(vreg, number_of_vregs, code_info);
static constexpr uint32_t kDeadValue = 0xEBADDE09;
uint32_t value = kDeadValue;
bool is_reference = false;
@@ -439,12 +434,11 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = vreg_map.GetStackOffsetInBytes(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
- if (bit < encoding.stack_mask.encoding.BitSize() && stack_mask.LoadBit(bit)) {
+ if (bit < code_info.GetNumberOfStackMaskBits() && stack_mask.LoadBit(bit)) {
is_reference = true;
}
break;
@@ -453,7 +447,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
case DexRegisterLocation::Kind::kInRegisterHigh:
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
- uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info, encoding);
+ uint32_t reg = vreg_map.GetMachineRegister(vreg, number_of_vregs, code_info);
bool result = GetRegisterIfAccessible(reg, ToVRegKind(location), &value);
CHECK(result);
if (location == DexRegisterLocation::Kind::kInRegister) {
@@ -464,7 +458,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
break;
}
case DexRegisterLocation::Kind::kConstant: {
- value = vreg_map.GetConstant(vreg, number_of_vregs, code_info, encoding);
+ value = vreg_map.GetConstant(vreg, number_of_vregs, code_info);
if (value == 0) {
// Make it a reference for extra safety.
is_reference = true;
@@ -479,8 +473,7 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
<< "Unexpected location kind "
<< vreg_map.GetLocationInternalKind(vreg,
number_of_vregs,
- code_info,
- encoding);
+ code_info);
UNREACHABLE();
}
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index dfa4b3daab..66eba1e1d4 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -25,6 +25,7 @@
#include "indirect_reference_table-inl.h"
#include "jni/java_vm_ext.h"
#include "jni/jni_internal.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/executable.h"
#include "mirror/object_array-inl.h"
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5d974246ff..14027493d8 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1192,7 +1192,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// As is, we're encoding some logic here about which specific policy to use, which would be better
// controlled by the framework.
hidden_api_policy_ = do_hidden_api_checks
- ? hiddenapi::EnforcementPolicy::kBlacklistOnly
+ ? hiddenapi::EnforcementPolicy::kDarkGreyAndBlackList
: hiddenapi::EnforcementPolicy::kNoChecks;
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 427385d914..e647423b9c 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -77,6 +77,7 @@ RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold)
RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight)
RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight)
+RUNTIME_OPTIONS_KEY (int, JITPoolThreadPthreadPriority, jit::kJitPoolThreadPthreadDefaultPriority)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity)
RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity)
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
diff --git a/runtime/stack.cc b/runtime/stack.cc
index c58380dee2..7d1cb5cc4b 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -76,15 +76,14 @@ StackVisitor::StackVisitor(Thread* thread,
}
}
-static InlineInfo GetCurrentInlineInfo(const OatQuickMethodHeader* method_header,
+static InlineInfo GetCurrentInlineInfo(CodeInfo& code_info,
+ const OatQuickMethodHeader* method_header,
uintptr_t cur_quick_frame_pc)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc);
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
- return code_info.GetInlineInfoOf(stack_map, encoding);
+ return code_info.GetInlineInfoOf(stack_map);
}
ArtMethod* StackVisitor::GetMethod() const {
@@ -93,16 +92,16 @@ ArtMethod* StackVisitor::GetMethod() const {
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
- InlineInfo inline_info = GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(),
- cur_quick_frame_pc_);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+ CodeInfo code_info(method_header);
+ InlineInfo inline_info = GetCurrentInlineInfo(code_info,
+ method_header,
+ cur_quick_frame_pc_);
MethodInfo method_info = method_header->GetOptimizedMethodInfo();
DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
return GetResolvedMethod(*GetCurrentQuickFrame(),
method_info,
inline_info,
- encoding.inline_info.encoding,
depth_in_stack_map);
} else {
return *cur_quick_frame_;
@@ -116,11 +115,11 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != nullptr) {
if (IsInInlinedFrame()) {
- size_t depth_in_stack_map = current_inlining_depth_ - 1;
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
- return GetCurrentInlineInfo(GetCurrentOatQuickMethodHeader(), cur_quick_frame_pc_).
- GetDexPcAtDepth(encoding.inline_info.encoding, depth_in_stack_map);
+ CodeInfo code_info(method_header);
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentInlineInfo(code_info, method_header, cur_quick_frame_pc_).
+ GetDexPcAtDepth(depth_in_stack_map);
} else if (cur_oat_quick_method_header_ == nullptr) {
return dex::kDexNoIndex;
} else {
@@ -230,32 +229,29 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
uint16_t number_of_dex_registers = accessor.RegistersSize();
DCHECK_LT(vreg, number_of_dex_registers);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(method_header);
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
size_t depth_in_stack_map = current_inlining_depth_ - 1;
DexRegisterMap dex_register_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
- code_info.GetInlineInfoOf(stack_map, encoding),
- encoding,
+ code_info.GetInlineInfoOf(stack_map),
number_of_dex_registers)
- : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
if (!dex_register_map.IsValid()) {
return false;
}
DexRegisterLocation::Kind location_kind =
- dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
+ dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
switch (location_kind) {
case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
@@ -265,11 +261,11 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
case DexRegisterLocation::Kind::kInFpuRegister:
case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
uint32_t reg =
- dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
+ dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info);
return GetRegisterIfAccessible(reg, kind, val);
}
case DexRegisterLocation::Kind::kConstant:
- *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
+ *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info);
return true;
case DexRegisterLocation::Kind::kNone:
return false;
@@ -278,8 +274,7 @@ bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKin
<< "Unexpected location kind "
<< dex_register_map.GetLocationInternalKind(vreg,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
UNREACHABLE();
}
}
@@ -831,15 +826,14 @@ void StackVisitor::WalkStack(bool include_transitions) {
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
&& (cur_oat_quick_method_header_ != nullptr)
&& cur_oat_quick_method_header_->IsOptimized()) {
- CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ CodeInfo code_info(cur_oat_quick_method_header_);
uint32_t native_pc_offset =
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
DCHECK_EQ(current_inlining_depth_, 0u);
- for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info.encoding);
+ for (current_inlining_depth_ = inline_info.GetDepth();
current_inlining_depth_ != 0;
--current_inlining_depth_) {
bool should_continue = VisitFrame();
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9c7b6875cf..2b7e8dd748 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -25,8 +25,6 @@
namespace art {
constexpr size_t DexRegisterLocationCatalog::kNoLocationEntryIndex;
-constexpr uint32_t StackMap::kNoDexRegisterMap;
-constexpr uint32_t StackMap::kNoInlineInfo;
std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind& kind) {
using Kind = DexRegisterLocation::Kind;
@@ -56,27 +54,25 @@ std::ostream& operator<<(std::ostream& stream, const DexRegisterLocation::Kind&
DexRegisterLocation::Kind DexRegisterMap::GetLocationInternalKind(
uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog(enc);
+ code_info.GetDexRegisterLocationCatalog();
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries(enc));
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
}
DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocationCatalog dex_register_location_catalog =
- code_info.GetDexRegisterLocationCatalog(enc);
+ code_info.GetDexRegisterLocationCatalog();
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries(enc));
+ code_info.GetNumberOfLocationCatalogEntries());
return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
}
@@ -90,27 +86,28 @@ static void DumpRegisterMapping(std::ostream& os,
<< " (" << location.GetValue() << ")" << suffix << '\n';
}
-void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void StackMap::DumpEncoding(const BitTable<6>& table,
+ VariableIndentationOutputStream* vios) {
vios->Stream()
<< "StackMapEncoding"
- << " (native_pc_bit_offset=" << static_cast<uint32_t>(kNativePcBitOffset)
- << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
- << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
- << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
- << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_index_bit_offset_)
- << ", stack_mask_index_bit_offset=" << static_cast<uint32_t>(stack_mask_index_bit_offset_)
- << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+ << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset)
+ << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+ << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
+ << ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex)
+ << ", RegisterMaskIndexBits=" << table.NumColumnBits(kRegisterMaskIndex)
+ << ", StackMaskIndexBits=" << table.NumColumnBits(kStackMaskIndex)
<< ")\n";
}
-void InlineInfoEncoding::Dump(VariableIndentationOutputStream* vios) const {
+void InlineInfo::DumpEncoding(const BitTable<5>& table,
+ VariableIndentationOutputStream* vios) {
vios->Stream()
<< "InlineInfoEncoding"
- << " (method_index_bit_offset=" << static_cast<uint32_t>(kMethodIndexBitOffset)
- << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
- << ", extra_data_bit_offset=" << static_cast<uint32_t>(extra_data_bit_offset_)
- << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
- << ", total_bit_size=" << static_cast<uint32_t>(total_bit_size_)
+ << " (IsLastBits=" << table.NumColumnBits(kIsLast)
+ << ", MethodIndexIdxBits=" << table.NumColumnBits(kMethodIndexIdx)
+ << ", DexPcBits=" << table.NumColumnBits(kDexPc)
+ << ", ExtraDataBits=" << table.NumColumnBits(kExtraData)
+ << ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
<< ")\n";
}
@@ -120,26 +117,24 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
bool dump_stack_maps,
InstructionSet instruction_set,
const MethodInfo& method_info) const {
- CodeInfoEncoding encoding = ExtractEncoding();
- size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
+ size_t number_of_stack_maps = GetNumberOfStackMaps();
vios->Stream()
<< "Optimized CodeInfo (number_of_dex_registers=" << number_of_dex_registers
<< ", number_of_stack_maps=" << number_of_stack_maps
<< ")\n";
ScopedIndentation indent1(vios);
- encoding.stack_map.encoding.Dump(vios);
- if (HasInlineInfo(encoding)) {
- encoding.inline_info.encoding.Dump(vios);
+ StackMap::DumpEncoding(stack_maps_, vios);
+ if (HasInlineInfo()) {
+ InlineInfo::DumpEncoding(inline_infos_, vios);
}
// Display the Dex register location catalog.
- GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
+ GetDexRegisterLocationCatalog().Dump(vios, *this);
// Display stack maps along with (live) Dex register maps.
if (dump_stack_maps) {
for (size_t i = 0; i < number_of_stack_maps; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
+ StackMap stack_map = GetStackMapAt(i);
stack_map.Dump(vios,
*this,
- encoding,
method_info,
code_offset,
number_of_dex_registers,
@@ -153,9 +148,8 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info) {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
- size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+ size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize();
vios->Stream()
<< "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
<< ", size_in_bytes=" << location_catalog_size_in_bytes << ")\n";
@@ -169,8 +163,7 @@ void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
// TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
if (IsDexRegisterLive(j)) {
@@ -178,8 +171,7 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
j, number_of_dex_registers, number_of_location_catalog_entries);
DexRegisterLocation location = GetDexRegisterLocation(j,
number_of_dex_registers,
- code_info,
- encoding);
+ code_info);
ScopedIndentation indent1(vios);
DumpRegisterMapping(
vios->Stream(), j, location, "v",
@@ -190,38 +182,35 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const CodeInfoEncoding& encoding,
const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
const std::string& header_suffix) const {
- StackMapEncoding stack_map_encoding = encoding.stack_map.encoding;
- const uint32_t pc_offset = GetNativePcOffset(stack_map_encoding, instruction_set);
+ const uint32_t pc_offset = GetNativePcOffset(instruction_set);
vios->Stream()
<< "StackMap" << header_suffix
<< std::hex
<< " [native_pc=0x" << code_offset + pc_offset << "]"
- << " [entry_size=0x" << encoding.stack_map.encoding.BitSize() << " bits]"
- << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
+ << " (dex_pc=0x" << GetDexPc()
<< ", native_pc_offset=0x" << pc_offset
- << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
- << ", inline_info_offset=0x" << GetInlineInfoIndex(stack_map_encoding)
- << ", register_mask=0x" << code_info.GetRegisterMaskOf(encoding, *this)
+ << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset()
+ << ", inline_info_offset=0x" << GetInlineInfoIndex()
+ << ", register_mask=0x" << code_info.GetRegisterMaskOf(*this)
<< std::dec
<< ", stack_mask=0b";
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, *this);
- for (size_t i = 0, e = encoding.stack_mask.encoding.BitSize(); i < e; ++i) {
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(*this);
+ for (size_t i = 0, e = code_info.GetNumberOfStackMaskBits(); i < e; ++i) {
vios->Stream() << stack_mask.LoadBit(e - i - 1);
}
vios->Stream() << ")\n";
- if (HasDexRegisterMap(stack_map_encoding)) {
+ if (HasDexRegisterMap()) {
DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
- *this, encoding, number_of_dex_registers);
+ *this, number_of_dex_registers);
dex_register_map.Dump(vios, code_info, number_of_dex_registers);
}
- if (HasInlineInfo(stack_map_encoding)) {
- InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
+ if (HasInlineInfo()) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(*this);
// We do not know the length of the dex register maps of inlined frames
// at this level, so we just pass null to `InlineInfo::Dump` to tell
// it not to look at these maps.
@@ -233,29 +222,27 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const MethodInfo& method_info,
uint16_t number_of_dex_registers[]) const {
- InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
vios->Stream() << "InlineInfo with depth "
- << static_cast<uint32_t>(GetDepth(inline_info_encoding))
+ << static_cast<uint32_t>(GetDepth())
<< "\n";
- for (size_t i = 0; i < GetDepth(inline_info_encoding); ++i) {
+ for (size_t i = 0; i < GetDepth(); ++i) {
vios->Stream()
<< " At depth " << i
<< std::hex
- << " (dex_pc=0x" << GetDexPcAtDepth(inline_info_encoding, i);
- if (EncodesArtMethodAtDepth(inline_info_encoding, i)) {
+ << " (dex_pc=0x" << GetDexPcAtDepth(i);
+ if (EncodesArtMethodAtDepth(i)) {
ScopedObjectAccess soa(Thread::Current());
- vios->Stream() << ", method=" << GetArtMethodAtDepth(inline_info_encoding, i)->PrettyMethod();
+ vios->Stream() << ", method=" << GetArtMethodAtDepth(i)->PrettyMethod();
} else {
vios->Stream()
<< std::dec
- << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
+ << ", method_index=" << GetMethodIndexAtDepth(method_info, i);
}
vios->Stream() << ")\n";
- if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
+ code_info.GetDexRegisterMapAtDepth(i, *this, number_of_dex_registers[i]);
ScopedIndentation indent1(vios);
dex_register_map.Dump(vios, code_info, number_of_dex_registers[i]);
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 38397643b6..91cecf0690 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -21,12 +21,14 @@
#include "arch/code_offset.h"
#include "base/bit_memory_region.h"
+#include "base/bit_table.h"
#include "base/bit_utils.h"
#include "base/bit_vector.h"
#include "base/leb128.h"
#include "base/memory_region.h"
#include "dex/dex_file_types.h"
#include "method_info.h"
+#include "oat_quick_method_header.h"
namespace art {
@@ -37,13 +39,8 @@ class VariableIndentationOutputStream;
// (signed) values.
static constexpr ssize_t kFrameSlotSize = 4;
-// Size of Dex virtual registers.
-static constexpr size_t kVRegSize = 4;
-
class ArtMethod;
class CodeInfo;
-class StackMapEncoding;
-struct CodeInfoEncoding;
/**
* Classes in the following file are wrapper on stack map information backed
@@ -452,35 +449,31 @@ class DexRegisterMap {
explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
DexRegisterMap() {}
- bool IsValid() const { return region_.pointer() != nullptr; }
+ bool IsValid() const { return region_.IsValid(); }
// Get the surface kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
return DexRegisterLocation::ConvertToSurfaceKind(
- GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
+ GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info));
}
// Get the internal kind of Dex register `dex_register_number`.
DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const;
+ const CodeInfo& code_info) const;
// Get the Dex register location `dex_register_number`.
DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const;
+ const CodeInfo& code_info) const;
int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
// GetDexRegisterLocation returns the offset in bytes.
return location.GetValue();
@@ -488,20 +481,18 @@ class DexRegisterMap {
int32_t GetConstant(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
return location.GetValue();
}
int32_t GetMachineRegister(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
- const CodeInfo& code_info,
- const CodeInfoEncoding& enc) const {
+ const CodeInfo& code_info) const {
DexRegisterLocation location =
- GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
+ GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info);
DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
location.GetInternalKind() == DexRegisterLocation::Kind::kInRegisterHigh ||
location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister ||
@@ -627,7 +618,7 @@ class DexRegisterMap {
// Return the size of the DexRegisterMap object, in bytes.
size_t Size() const {
- return region_.size();
+ return BitsToBytesRoundUp(region_.size_in_bits());
}
void Dump(VariableIndentationOutputStream* vios,
@@ -650,143 +641,12 @@ class DexRegisterMap {
static constexpr int kFixedSize = 0;
- MemoryRegion region_;
+ BitMemoryRegion region_;
friend class CodeInfo;
friend class StackMapStream;
};
-// Represents bit range of bit-packed integer field.
-// We reuse the idea from ULEB128p1 to support encoding of -1 (aka 0xFFFFFFFF).
-// If min_value is set to -1, we implicitly subtract one from any loaded value,
-// and add one to any stored value. This is generalized to any negative values.
-// In other words, min_value acts as a base and the stored value is added to it.
-struct FieldEncoding {
- FieldEncoding(size_t start_offset, size_t end_offset, int32_t min_value = 0)
- : start_offset_(start_offset), end_offset_(end_offset), min_value_(min_value) {
- DCHECK_LE(start_offset_, end_offset_);
- DCHECK_LE(BitSize(), 32u);
- }
-
- ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
-
- template <typename Region>
- ALWAYS_INLINE int32_t Load(const Region& region) const {
- DCHECK_LE(end_offset_, region.size_in_bits());
- return static_cast<int32_t>(region.LoadBits(start_offset_, BitSize())) + min_value_;
- }
-
- template <typename Region>
- ALWAYS_INLINE void Store(Region region, int32_t value) const {
- region.StoreBits(start_offset_, value - min_value_, BitSize());
- DCHECK_EQ(Load(region), value);
- }
-
- private:
- size_t start_offset_;
- size_t end_offset_;
- int32_t min_value_;
-};
-
-class StackMapEncoding {
- public:
- StackMapEncoding()
- : dex_pc_bit_offset_(0),
- dex_register_map_bit_offset_(0),
- inline_info_bit_offset_(0),
- register_mask_index_bit_offset_(0),
- stack_mask_index_bit_offset_(0),
- total_bit_size_(0) {}
-
- // Set stack map bit layout based on given sizes.
- // Returns the size of stack map in bits.
- size_t SetFromSizes(size_t native_pc_max,
- size_t dex_pc_max,
- size_t dex_register_map_size,
- size_t number_of_inline_info,
- size_t number_of_register_masks,
- size_t number_of_stack_masks) {
- total_bit_size_ = 0;
- DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(native_pc_max);
-
- dex_pc_bit_offset_ = total_bit_size_;
- // Note: We're not encoding the dex pc if there is none. That's the case
- // for an intrinsified native method, such as String.charAt().
- if (dex_pc_max != dex::kDexNoIndex) {
- total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
- }
-
- // We also need +1 for kNoDexRegisterMap, but since the size is strictly
- // greater than any offset we might try to encode, we already implicitly have it.
- dex_register_map_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
-
- // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
- // greater than the offset we might try to encode, we already implicitly have it.
- // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
- inline_info_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_inline_info);
-
- register_mask_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_register_masks);
-
- stack_mask_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(number_of_stack_masks);
-
- return total_bit_size_;
- }
-
- ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
- return FieldEncoding(kNativePcBitOffset, dex_pc_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
- return FieldEncoding(dex_pc_bit_offset_, dex_register_map_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
- return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
- return FieldEncoding(inline_info_bit_offset_,
- register_mask_index_bit_offset_,
- -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetRegisterMaskIndexEncoding() const {
- return FieldEncoding(register_mask_index_bit_offset_, stack_mask_index_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetStackMaskIndexEncoding() const {
- return FieldEncoding(stack_mask_index_bit_offset_, total_bit_size_);
- }
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- // Encode the encoding into the vector.
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(StackMapEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- // Decode the encoding from a pointer, updates the pointer.
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const StackMapEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- void Dump(VariableIndentationOutputStream* vios) const;
-
- private:
- static constexpr size_t kNativePcBitOffset = 0;
- uint8_t dex_pc_bit_offset_;
- uint8_t dex_register_map_bit_offset_;
- uint8_t inline_info_bit_offset_;
- uint8_t register_mask_index_bit_offset_;
- uint8_t stack_mask_index_bit_offset_;
- uint8_t total_bit_size_;
-};
-
/**
* A Stack Map holds compilation information for a specific PC necessary for:
* - Mapping it to a dex PC,
@@ -794,248 +654,101 @@ class StackMapEncoding {
* - Knowing which registers hold objects,
* - Knowing the inlining information,
* - Knowing the values of dex registers.
- *
- * The information is of the form:
- *
- * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_index, register_mask_index,
- * stack_mask_index].
*/
-class StackMap {
+class StackMap : public BitTable<6>::Accessor {
public:
- StackMap() {}
- explicit StackMap(BitMemoryRegion region) : region_(region) {}
-
- ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
-
- ALWAYS_INLINE uint32_t GetDexPc(const StackMapEncoding& encoding) const {
- return encoding.GetDexPcEncoding().Load(region_);
- }
+ enum Field {
+ kNativePcOffset,
+ kDexPc,
+ kDexRegisterMapOffset,
+ kInlineInfoIndex,
+ kRegisterMaskIndex,
+ kStackMaskIndex,
+ kCount,
+ };
- ALWAYS_INLINE void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
- encoding.GetDexPcEncoding().Store(region_, dex_pc);
- }
+ StackMap() : BitTable<kCount>::Accessor(nullptr, -1) {}
+ StackMap(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding,
- InstructionSet instruction_set) const {
- CodeOffset offset(
- CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+ ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+ CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
return offset.Uint32Value(instruction_set);
}
- ALWAYS_INLINE void SetNativePcCodeOffset(const StackMapEncoding& encoding,
- CodeOffset native_pc_offset) {
- encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
- }
-
- ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
- return encoding.GetDexRegisterMapEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
- encoding.GetDexRegisterMapEncoding().Store(region_, offset);
- }
-
- ALWAYS_INLINE uint32_t GetInlineInfoIndex(const StackMapEncoding& encoding) const {
- return encoding.GetInlineInfoEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetInlineInfoIndex(const StackMapEncoding& encoding, uint32_t index) {
- encoding.GetInlineInfoEncoding().Store(region_, index);
- }
-
- ALWAYS_INLINE uint32_t GetRegisterMaskIndex(const StackMapEncoding& encoding) const {
- return encoding.GetRegisterMaskIndexEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetRegisterMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
- encoding.GetRegisterMaskIndexEncoding().Store(region_, mask);
- }
+ uint32_t GetDexPc() const { return Get<kDexPc>(); }
- ALWAYS_INLINE uint32_t GetStackMaskIndex(const StackMapEncoding& encoding) const {
- return encoding.GetStackMaskIndexEncoding().Load(region_);
- }
+ uint32_t GetDexRegisterMapOffset() const { return Get<kDexRegisterMapOffset>(); }
+ bool HasDexRegisterMap() const { return GetDexRegisterMapOffset() != kNoValue; }
- ALWAYS_INLINE void SetStackMaskIndex(const StackMapEncoding& encoding, uint32_t mask) {
- encoding.GetStackMaskIndexEncoding().Store(region_, mask);
- }
+ uint32_t GetInlineInfoIndex() const { return Get<kInlineInfoIndex>(); }
+ bool HasInlineInfo() const { return GetInlineInfoIndex() != kNoValue; }
- ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
- return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
- }
+ uint32_t GetRegisterMaskIndex() const { return Get<kRegisterMaskIndex>(); }
- ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
- return GetInlineInfoIndex(encoding) != kNoInlineInfo;
- }
-
- ALWAYS_INLINE bool Equals(const StackMap& other) const {
- return region_.pointer() == other.region_.pointer() &&
- region_.size() == other.region_.size() &&
- region_.BitOffset() == other.region_.BitOffset();
- }
+ uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); }
+ static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios);
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const CodeInfoEncoding& encoding,
const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
const std::string& header_suffix = "") const;
-
- // Special (invalid) offset for the DexRegisterMapOffset field meaning
- // that there is no Dex register map for this stack map.
- static constexpr uint32_t kNoDexRegisterMap = -1;
-
- // Special (invalid) offset for the InlineDescriptorOffset field meaning
- // that there is no inline info for this stack map.
- static constexpr uint32_t kNoInlineInfo = -1;
-
- private:
- static constexpr int kFixedSize = 0;
-
- BitMemoryRegion region_;
-
- friend class StackMapStream;
-};
-
-class InlineInfoEncoding {
- public:
- void SetFromSizes(size_t method_index_idx_max,
- size_t dex_pc_max,
- size_t extra_data_max,
- size_t dex_register_map_size) {
- total_bit_size_ = kMethodIndexBitOffset;
- total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
-
- dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- // Note: We're not encoding the dex pc if there is none. That's the case
- // for an intrinsified native method, such as String.charAt().
- if (dex_pc_max != dex::kDexNoIndex) {
- total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
- }
-
- extra_data_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(extra_data_max);
-
- // We also need +1 for kNoDexRegisterMap, but since the size is strictly
- // greater than any offset we might try to encode, we already implicitly have it.
- dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
- }
-
- ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
- return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
- return FieldEncoding(dex_pc_bit_offset_, extra_data_bit_offset_, -1 /* min_value */);
- }
- ALWAYS_INLINE FieldEncoding GetExtraDataEncoding() const {
- return FieldEncoding(extra_data_bit_offset_, dex_register_map_bit_offset_);
- }
- ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
- return FieldEncoding(dex_register_map_bit_offset_, total_bit_size_, -1 /* min_value */);
- }
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- void Dump(VariableIndentationOutputStream* vios) const;
-
- // Encode the encoding into the vector.
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(InlineInfoEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- // Decode the encoding from a pointer, updates the pointer.
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const InlineInfoEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- private:
- static constexpr uint8_t kIsLastBitOffset = 0;
- static constexpr uint8_t kMethodIndexBitOffset = 1;
- uint8_t dex_pc_bit_offset_;
- uint8_t extra_data_bit_offset_;
- uint8_t dex_register_map_bit_offset_;
- uint8_t total_bit_size_;
};
/**
- * Inline information for a specific PC. The information is of the form:
- *
- * [is_last,
- * method_index (or ArtMethod high bits),
- * dex_pc,
- * extra_data (ArtMethod low bits or 1),
- * dex_register_map_offset]+.
+ * Inline information for a specific PC.
+ * The row referenced from the StackMap holds information at depth 0.
+ * Following rows hold information for further depths.
*/
-class InlineInfo {
+class InlineInfo : public BitTable<5>::Accessor {
public:
- explicit InlineInfo(BitMemoryRegion region) : region_(region) {}
-
- ALWAYS_INLINE uint32_t GetDepth(const InlineInfoEncoding& encoding) const {
- size_t depth = 0;
- while (!GetRegionAtDepth(encoding, depth++).LoadBit(0)) { } // Check is_last bit.
- return depth;
- }
-
- ALWAYS_INLINE void SetDepth(const InlineInfoEncoding& encoding, uint32_t depth) {
- DCHECK_GT(depth, 0u);
- for (size_t d = 0; d < depth; ++d) {
- GetRegionAtDepth(encoding, d).StoreBit(0, d == depth - 1); // Set is_last bit.
- }
- }
+ enum Field {
+ kIsLast, // Determines if there are further rows for further depths.
+ kMethodIndexIdx, // Method index or ArtMethod high bits.
+ kDexPc,
+ kExtraData, // ArtMethod low bits or 1.
+ kDexRegisterMapOffset,
+ kCount,
+ };
+ static constexpr uint32_t kLast = -1;
+ static constexpr uint32_t kMore = 0;
- ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
- return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
- }
+ InlineInfo(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t index) {
- encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+ ALWAYS_INLINE InlineInfo AtDepth(uint32_t depth) const {
+ return InlineInfo(table_, this->row_ + depth);
}
-
- ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
- const MethodInfo& method_info,
- uint32_t depth) const {
- return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
+ uint32_t GetDepth() const {
+ size_t depth = 0;
+ while (AtDepth(depth++).Get<kIsLast>() == kMore) { }
+ return depth;
}
- ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return encoding.GetDexPcEncoding().Load(GetRegionAtDepth(encoding, depth));
+ uint32_t GetMethodIndexIdxAtDepth(uint32_t depth) const {
+ DCHECK(!EncodesArtMethodAtDepth(depth));
+ return AtDepth(depth).Get<kMethodIndexIdx>();
}
- ALWAYS_INLINE void SetDexPcAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t dex_pc) {
- encoding.GetDexPcEncoding().Store(GetRegionAtDepth(encoding, depth), dex_pc);
+ uint32_t GetMethodIndexAtDepth(const MethodInfo& method_info, uint32_t depth) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(depth));
}
- ALWAYS_INLINE bool EncodesArtMethodAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return (encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)) & 1) == 0;
+ uint32_t GetDexPcAtDepth(uint32_t depth) const {
+ return AtDepth(depth).Get<kDexPc>();
}
- ALWAYS_INLINE void SetExtraDataAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t extra_data) {
- encoding.GetExtraDataEncoding().Store(GetRegionAtDepth(encoding, depth), extra_data);
+ bool EncodesArtMethodAtDepth(uint32_t depth) const {
+ return (AtDepth(depth).Get<kExtraData>() & 1) == 0;
}
- ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
- uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
- GetRegionAtDepth(encoding, depth));
+ ArtMethod* GetArtMethodAtDepth(uint32_t depth) const {
+ uint32_t low_bits = AtDepth(depth).Get<kExtraData>();
+ uint32_t high_bits = AtDepth(depth).Get<kMethodIndexIdx>();
if (high_bits == 0) {
return reinterpret_cast<ArtMethod*>(low_bits);
} else {
@@ -1045,411 +758,132 @@ class InlineInfo {
}
}
- ALWAYS_INLINE uint32_t GetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return encoding.GetDexRegisterMapEncoding().Load(GetRegionAtDepth(encoding, depth));
- }
-
- ALWAYS_INLINE void SetDexRegisterMapOffsetAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t offset) {
- encoding.GetDexRegisterMapEncoding().Store(GetRegionAtDepth(encoding, depth), offset);
+ uint32_t GetDexRegisterMapOffsetAtDepth(uint32_t depth) const {
+ return AtDepth(depth).Get<kDexRegisterMapOffset>();
}
- ALWAYS_INLINE bool HasDexRegisterMapAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- return GetDexRegisterMapOffsetAtDepth(encoding, depth) != StackMap::kNoDexRegisterMap;
+ bool HasDexRegisterMapAtDepth(uint32_t depth) const {
+ return GetDexRegisterMapOffsetAtDepth(depth) != StackMap::kNoValue;
}
+ static void DumpEncoding(const BitTable<5>& table, VariableIndentationOutputStream* vios);
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& info,
const MethodInfo& method_info,
uint16_t* number_of_dex_registers) const;
-
- private:
- ALWAYS_INLINE BitMemoryRegion GetRegionAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
- size_t entry_size = encoding.BitSize();
- DCHECK_GT(entry_size, 0u);
- return region_.Subregion(depth * entry_size, entry_size);
- }
-
- BitMemoryRegion region_;
};
-// Bit sized region encoding, may be more than 255 bits.
-class BitRegionEncoding {
+class InvokeInfo : public BitTable<3>::Accessor {
public:
- uint32_t num_bits = 0;
-
- ALWAYS_INLINE size_t BitSize() const {
- return num_bits;
- }
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_bits); // Use leb in case num_bits is greater than 255.
- }
-
- void Decode(const uint8_t** ptr) {
- num_bits = DecodeUnsignedLeb128(ptr);
- }
-};
-
-// A table of bit sized encodings.
-template <typename Encoding>
-struct BitEncodingTable {
- static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
- // How the encoding is laid out (serialized).
- Encoding encoding;
-
- // Number of entries in the table (serialized).
- size_t num_entries;
-
- // Bit offset for the base of the table (computed).
- size_t bit_offset = kInvalidOffset;
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_entries);
- encoding.Encode(dest);
- }
-
- ALWAYS_INLINE void Decode(const uint8_t** ptr) {
- num_entries = DecodeUnsignedLeb128(ptr);
- encoding.Decode(ptr);
- }
-
- // Set the bit offset in the table and adds the space used by the table to offset.
- void UpdateBitOffset(size_t* offset) {
- DCHECK(offset != nullptr);
- bit_offset = *offset;
- *offset += encoding.BitSize() * num_entries;
- }
-
- // Return the bit region for the map at index i.
- ALWAYS_INLINE BitMemoryRegion BitRegion(MemoryRegion region, size_t index) const {
- DCHECK_NE(bit_offset, kInvalidOffset) << "Invalid table offset";
- DCHECK_LT(index, num_entries);
- const size_t map_size = encoding.BitSize();
- return BitMemoryRegion(region, bit_offset + index * map_size, map_size);
- }
-};
-
-// A byte sized table of possible variable sized encodings.
-struct ByteSizedTable {
- static constexpr size_t kInvalidOffset = static_cast<size_t>(-1);
-
- // Number of entries in the table (serialized).
- size_t num_entries = 0;
-
- // Number of bytes of the table (serialized).
- size_t num_bytes;
-
- // Bit offset for the base of the table (computed).
- size_t byte_offset = kInvalidOffset;
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- EncodeUnsignedLeb128(dest, num_entries);
- EncodeUnsignedLeb128(dest, num_bytes);
- }
-
- ALWAYS_INLINE void Decode(const uint8_t** ptr) {
- num_entries = DecodeUnsignedLeb128(ptr);
- num_bytes = DecodeUnsignedLeb128(ptr);
- }
-
- // Set the bit offset of the table. Adds the total bit size of the table to offset.
- void UpdateBitOffset(size_t* offset) {
- DCHECK(offset != nullptr);
- DCHECK_ALIGNED(*offset, kBitsPerByte);
- byte_offset = *offset / kBitsPerByte;
- *offset += num_bytes * kBitsPerByte;
- }
-};
-
-// Format is [native pc, invoke type, method index].
-class InvokeInfoEncoding {
- public:
- void SetFromSizes(size_t native_pc_max,
- size_t invoke_type_max,
- size_t method_index_max) {
- total_bit_size_ = 0;
- DCHECK_EQ(kNativePcBitOffset, total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(native_pc_max);
- invoke_type_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(invoke_type_max);
- method_index_bit_offset_ = total_bit_size_;
- total_bit_size_ += MinimumBitsToStore(method_index_max);
- }
-
- ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
- return FieldEncoding(kNativePcBitOffset, invoke_type_bit_offset_);
- }
-
- ALWAYS_INLINE FieldEncoding GetInvokeTypeEncoding() const {
- return FieldEncoding(invoke_type_bit_offset_, method_index_bit_offset_);
- }
-
- ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
- return FieldEncoding(method_index_bit_offset_, total_bit_size_);
- }
-
- ALWAYS_INLINE size_t BitSize() const {
- return total_bit_size_;
- }
-
- template<typename Vector>
- void Encode(Vector* dest) const {
- static_assert(alignof(InvokeInfoEncoding) == 1, "Should not require alignment");
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(this);
- dest->insert(dest->end(), ptr, ptr + sizeof(*this));
- }
-
- void Decode(const uint8_t** ptr) {
- *this = *reinterpret_cast<const InvokeInfoEncoding*>(*ptr);
- *ptr += sizeof(*this);
- }
-
- private:
- static constexpr uint8_t kNativePcBitOffset = 0;
- uint8_t invoke_type_bit_offset_;
- uint8_t method_index_bit_offset_;
- uint8_t total_bit_size_;
-};
+ enum Field {
+ kNativePcOffset,
+ kInvokeType,
+ kMethodIndexIdx,
+ kCount,
+ };
-class InvokeInfo {
- public:
- explicit InvokeInfo(BitMemoryRegion region) : region_(region) {}
+ InvokeInfo(const BitTable<kCount>* table, uint32_t row)
+ : BitTable<kCount>::Accessor(table, row) {}
- ALWAYS_INLINE uint32_t GetNativePcOffset(const InvokeInfoEncoding& encoding,
- InstructionSet instruction_set) const {
- CodeOffset offset(
- CodeOffset::FromCompressedOffset(encoding.GetNativePcEncoding().Load(region_)));
+ ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
+ CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
return offset.Uint32Value(instruction_set);
}
- ALWAYS_INLINE void SetNativePcCodeOffset(const InvokeInfoEncoding& encoding,
- CodeOffset native_pc_offset) {
- encoding.GetNativePcEncoding().Store(region_, native_pc_offset.CompressedValue());
- }
-
- ALWAYS_INLINE uint32_t GetInvokeType(const InvokeInfoEncoding& encoding) const {
- return encoding.GetInvokeTypeEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetInvokeType(const InvokeInfoEncoding& encoding, uint32_t invoke_type) {
- encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
- }
-
- ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
- return encoding.GetMethodIndexEncoding().Load(region_);
- }
-
- ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
- uint32_t method_index_idx) {
- encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
- }
-
- ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
- MethodInfo method_info) const {
- return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
- }
-
- bool IsValid() const { return region_.pointer() != nullptr; }
-
- private:
- BitMemoryRegion region_;
-};
-
-// Most of the fields are encoded as ULEB128 to save space.
-struct CodeInfoEncoding {
- using SizeType = uint32_t;
-
- static constexpr SizeType kInvalidSize = std::numeric_limits<SizeType>::max();
-
- // Byte sized tables go first to avoid unnecessary alignment bits.
- ByteSizedTable dex_register_map;
- ByteSizedTable location_catalog;
- BitEncodingTable<StackMapEncoding> stack_map;
- BitEncodingTable<BitRegionEncoding> register_mask;
- BitEncodingTable<BitRegionEncoding> stack_mask;
- BitEncodingTable<InvokeInfoEncoding> invoke_info;
- BitEncodingTable<InlineInfoEncoding> inline_info;
-
- CodeInfoEncoding() {}
-
- explicit CodeInfoEncoding(const void* data) {
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
- dex_register_map.Decode(&ptr);
- location_catalog.Decode(&ptr);
- stack_map.Decode(&ptr);
- register_mask.Decode(&ptr);
- stack_mask.Decode(&ptr);
- invoke_info.Decode(&ptr);
- if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
- inline_info.Decode(&ptr);
- } else {
- inline_info = BitEncodingTable<InlineInfoEncoding>();
- }
- cache_header_size =
- dchecked_integral_cast<SizeType>(ptr - reinterpret_cast<const uint8_t*>(data));
- ComputeTableOffsets();
- }
-
- // Compress is not const since it calculates cache_header_size. This is used by PrepareForFillIn.
- template<typename Vector>
- void Compress(Vector* dest) {
- dex_register_map.Encode(dest);
- location_catalog.Encode(dest);
- stack_map.Encode(dest);
- register_mask.Encode(dest);
- stack_mask.Encode(dest);
- invoke_info.Encode(dest);
- if (stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0) {
- inline_info.Encode(dest);
- }
- cache_header_size = dest->size();
- }
-
- ALWAYS_INLINE void ComputeTableOffsets() {
- // Skip the header.
- size_t bit_offset = HeaderSize() * kBitsPerByte;
- // The byte tables must be aligned so they must go first.
- dex_register_map.UpdateBitOffset(&bit_offset);
- location_catalog.UpdateBitOffset(&bit_offset);
- // Other tables don't require alignment.
- stack_map.UpdateBitOffset(&bit_offset);
- register_mask.UpdateBitOffset(&bit_offset);
- stack_mask.UpdateBitOffset(&bit_offset);
- invoke_info.UpdateBitOffset(&bit_offset);
- inline_info.UpdateBitOffset(&bit_offset);
- cache_non_header_size = RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte - HeaderSize();
- }
+ uint32_t GetInvokeType() const { return Get<kInvokeType>(); }
- ALWAYS_INLINE size_t HeaderSize() const {
- DCHECK_NE(cache_header_size, kInvalidSize) << "Uninitialized";
- return cache_header_size;
- }
+ uint32_t GetMethodIndexIdx() const { return Get<kMethodIndexIdx>(); }
- ALWAYS_INLINE size_t NonHeaderSize() const {
- DCHECK_NE(cache_non_header_size, kInvalidSize) << "Uninitialized";
- return cache_non_header_size;
+ uint32_t GetMethodIndex(MethodInfo method_info) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdx());
}
-
- private:
- // Computed fields (not serialized).
- // Header size in bytes, cached to avoid needing to re-decoding the encoding in HeaderSize.
- SizeType cache_header_size = kInvalidSize;
- // Non header size in bytes, cached to avoid needing to re-decoding the encoding in NonHeaderSize.
- SizeType cache_non_header_size = kInvalidSize;
};
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
*
- * [CodeInfoEncoding, DexRegisterMap+, DexLocationCatalog+, StackMap+, RegisterMask+, StackMask+,
- * InlineInfo*]
- *
- * where CodeInfoEncoding is of the form:
+ * [BitTable<Header>, BitTable<StackMap>, BitTable<RegisterMask>, BitTable<InlineInfo>,
+ * BitTable<InvokeInfo>, BitTable<StackMask>, DexRegisterMap, DexLocationCatalog]
*
- * [ByteSizedTable(dex_register_map), ByteSizedTable(location_catalog),
- * BitEncodingTable<StackMapEncoding>, BitEncodingTable<BitRegionEncoding>,
- * BitEncodingTable<BitRegionEncoding>, BitEncodingTable<InlineInfoEncoding>]
*/
class CodeInfo {
public:
- explicit CodeInfo(MemoryRegion region) : region_(region) {
- }
-
explicit CodeInfo(const void* data) {
- CodeInfoEncoding encoding = CodeInfoEncoding(data);
- region_ = MemoryRegion(const_cast<void*>(data),
- encoding.HeaderSize() + encoding.NonHeaderSize());
+ Decode(reinterpret_cast<const uint8_t*>(data));
}
- CodeInfoEncoding ExtractEncoding() const {
- CodeInfoEncoding encoding(region_.begin());
- AssertValidStackMap(encoding);
- return encoding;
+ explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
+ DCHECK_EQ(size_, region.size());
}
- bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.encoding.GetInlineInfoEncoding().BitSize() > 0;
+ explicit CodeInfo(const OatQuickMethodHeader* header)
+ : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
}
- DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
- return DexRegisterLocationCatalog(region_.Subregion(encoding.location_catalog.byte_offset,
- encoding.location_catalog.num_bytes));
+ size_t Size() const {
+ return size_;
}
- ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_mask.encoding.BitSize();
+ bool HasInlineInfo() const {
+ return stack_maps_.NumColumnBits(StackMap::kInlineInfoIndex) != 0;
}
- ALWAYS_INLINE StackMap GetStackMapAt(size_t index, const CodeInfoEncoding& encoding) const {
- return StackMap(encoding.stack_map.BitRegion(region_, index));
+ DexRegisterLocationCatalog GetDexRegisterLocationCatalog() const {
+ return DexRegisterLocationCatalog(location_catalog_);
}
- BitMemoryRegion GetStackMask(size_t index, const CodeInfoEncoding& encoding) const {
- return encoding.stack_mask.BitRegion(region_, index);
+ ALWAYS_INLINE size_t GetNumberOfStackMaskBits() const {
+ return stack_mask_bits_;
}
- BitMemoryRegion GetStackMaskOf(const CodeInfoEncoding& encoding,
- const StackMap& stack_map) const {
- return GetStackMask(stack_map.GetStackMaskIndex(encoding.stack_map.encoding), encoding);
+ ALWAYS_INLINE StackMap GetStackMapAt(size_t index) const {
+ return StackMap(&stack_maps_, index);
}
- BitMemoryRegion GetRegisterMask(size_t index, const CodeInfoEncoding& encoding) const {
- return encoding.register_mask.BitRegion(region_, index);
+ BitMemoryRegion GetStackMask(size_t index) const {
+ return stack_masks_.Subregion(index * stack_mask_bits_, stack_mask_bits_);
}
- uint32_t GetRegisterMaskOf(const CodeInfoEncoding& encoding, const StackMap& stack_map) const {
- size_t index = stack_map.GetRegisterMaskIndex(encoding.stack_map.encoding);
- return GetRegisterMask(index, encoding).LoadBits(0u, encoding.register_mask.encoding.BitSize());
+ BitMemoryRegion GetStackMaskOf(const StackMap& stack_map) const {
+ return GetStackMask(stack_map.GetStackMaskIndex());
}
- uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
- return encoding.location_catalog.num_entries;
+ uint32_t GetRegisterMaskOf(const StackMap& stack_map) const {
+ return register_masks_.Get(stack_map.GetRegisterMaskIndex());
}
- uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
- return encoding.location_catalog.num_bytes;
+ uint32_t GetNumberOfLocationCatalogEntries() const {
+ return location_catalog_entries_;
}
- uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.num_entries;
+ uint32_t GetDexRegisterLocationCatalogSize() const {
+ return location_catalog_.size();
}
- // Get the size of all the stack maps of this CodeInfo object, in bits. Not byte aligned.
- ALWAYS_INLINE size_t GetStackMapsSizeInBits(const CodeInfoEncoding& encoding) const {
- return encoding.stack_map.encoding.BitSize() * GetNumberOfStackMaps(encoding);
+ uint32_t GetNumberOfStackMaps() const {
+ return stack_maps_.NumRows();
}
- InvokeInfo GetInvokeInfo(const CodeInfoEncoding& encoding, size_t index) const {
- return InvokeInfo(encoding.invoke_info.BitRegion(region_, index));
+ InvokeInfo GetInvokeInfo(size_t index) const {
+ return InvokeInfo(&invoke_infos_, index);
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
- const CodeInfoEncoding& encoding,
size_t number_of_dex_registers) const {
- if (!stack_map.HasDexRegisterMap(encoding.stack_map.encoding)) {
+ if (!stack_map.HasDexRegisterMap()) {
return DexRegisterMap();
}
- const uint32_t offset = encoding.dex_register_map.byte_offset +
- stack_map.GetDexRegisterMapOffset(encoding.stack_map.encoding);
- size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ const uint32_t offset = stack_map.GetDexRegisterMapOffset();
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
}
- size_t GetDexRegisterMapsSize(const CodeInfoEncoding& encoding,
- uint32_t number_of_dex_registers) const {
+ size_t GetDexRegisterMapsSize(uint32_t number_of_dex_registers) const {
size_t total = 0;
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- DexRegisterMap map(GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers));
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ DexRegisterMap map(GetDexRegisterMapOf(stack_map, number_of_dex_registers));
total += map.Size();
}
return total;
@@ -1458,38 +892,30 @@ class CodeInfo {
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
InlineInfo inline_info,
- const CodeInfoEncoding& encoding,
uint32_t number_of_dex_registers) const {
- if (!inline_info.HasDexRegisterMapAtDepth(encoding.inline_info.encoding, depth)) {
+ if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
return DexRegisterMap();
} else {
- uint32_t offset = encoding.dex_register_map.byte_offset +
- inline_info.GetDexRegisterMapOffsetAtDepth(encoding.inline_info.encoding, depth);
- size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
- return DexRegisterMap(region_.Subregion(offset, size));
+ uint32_t offset = inline_info.GetDexRegisterMapOffsetAtDepth(depth);
+ size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ return DexRegisterMap(dex_register_maps_.Subregion(offset, size));
}
}
- InlineInfo GetInlineInfo(size_t index, const CodeInfoEncoding& encoding) const {
- // Since we do not know the depth, we just return the whole remaining map. The caller may
- // access the inline info for arbitrary depths. To return the precise inline info we would need
- // to count the depth before returning.
- // TODO: Clean this up.
- const size_t bit_offset = encoding.inline_info.bit_offset +
- index * encoding.inline_info.encoding.BitSize();
- return InlineInfo(BitMemoryRegion(region_, bit_offset, region_.size_in_bits() - bit_offset));
+ InlineInfo GetInlineInfo(size_t index) const {
+ return InlineInfo(&inline_infos_, index);
}
- InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
- DCHECK(stack_map.HasInlineInfo(encoding.stack_map.encoding));
- uint32_t index = stack_map.GetInlineInfoIndex(encoding.stack_map.encoding);
- return GetInlineInfo(index, encoding);
+ InlineInfo GetInlineInfoOf(StackMap stack_map) const {
+ DCHECK(stack_map.HasInlineInfo());
+ uint32_t index = stack_map.GetInlineInfoIndex();
+ return GetInlineInfo(index);
}
- StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+ StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
return stack_map;
}
}
@@ -1498,40 +924,39 @@ class CodeInfo {
// Searches the stack map list backwards because catch stack maps are stored
// at the end.
- StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
- StackMap stack_map = GetStackMapAt(i - 1, encoding);
- if (stack_map.GetDexPc(encoding.stack_map.encoding) == dex_pc) {
+ StackMap GetCatchStackMapForDexPc(uint32_t dex_pc) const {
+ for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+ StackMap stack_map = GetStackMapAt(i - 1);
+ if (stack_map.GetDexPc() == dex_pc) {
return stack_map;
}
}
return StackMap();
}
- StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
- size_t e = GetNumberOfStackMaps(encoding);
+ StackMap GetOsrStackMapForDexPc(uint32_t dex_pc) const {
+ size_t e = GetNumberOfStackMaps();
if (e == 0) {
// There cannot be OSR stack map if there is no stack map.
return StackMap();
}
// Walk over all stack maps. If two consecutive stack maps are identical, then we
// have found a stack map suitable for OSR.
- const StackMapEncoding& stack_map_encoding = encoding.stack_map.encoding;
for (size_t i = 0; i < e - 1; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
- StackMap other = GetStackMapAt(i + 1, encoding);
- if (other.GetDexPc(stack_map_encoding) == dex_pc &&
- other.GetNativePcOffset(stack_map_encoding, kRuntimeISA) ==
- stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA)) {
- DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
- stack_map.GetDexRegisterMapOffset(stack_map_encoding));
- DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
+ StackMap other = GetStackMapAt(i + 1);
+ if (other.GetDexPc() == dex_pc &&
+ other.GetNativePcOffset(kRuntimeISA) ==
+ stack_map.GetNativePcOffset(kRuntimeISA)) {
+ DCHECK_EQ(other.GetDexRegisterMapOffset(),
+ stack_map.GetDexRegisterMapOffset());
+ DCHECK(!stack_map.HasInlineInfo());
if (i < e - 2) {
// Make sure there are not three identical stack maps following each other.
DCHECK_NE(
- stack_map.GetNativePcOffset(stack_map_encoding, kRuntimeISA),
- GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding, kRuntimeISA));
+ stack_map.GetNativePcOffset(kRuntimeISA),
+ GetStackMapAt(i + 2).GetNativePcOffset(kRuntimeISA));
}
return stack_map;
}
@@ -1540,30 +965,27 @@ class CodeInfo {
return StackMap();
}
- StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
- const CodeInfoEncoding& encoding) const {
+ StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
// TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
// maps are not. If we knew that the method does not have try/catch,
// we could do binary search.
- for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
- StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetNativePcOffset(encoding.stack_map.encoding, kRuntimeISA) ==
- native_pc_offset) {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap stack_map = GetStackMapAt(i);
+ if (stack_map.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
return stack_map;
}
}
return StackMap();
}
- InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset,
- const CodeInfoEncoding& encoding) {
- for (size_t index = 0; index < encoding.invoke_info.num_entries; index++) {
- InvokeInfo item = GetInvokeInfo(encoding, index);
- if (item.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA) == native_pc_offset) {
+ InvokeInfo GetInvokeInfoForNativePcOffset(uint32_t native_pc_offset) {
+ for (size_t index = 0; index < invoke_infos_.NumRows(); index++) {
+ InvokeInfo item = GetInvokeInfo(index);
+ if (item.GetNativePcOffset(kRuntimeISA) == native_pc_offset) {
return item;
}
}
- return InvokeInfo(BitMemoryRegion());
+ return InvokeInfo(&invoke_infos_, -1);
}
// Dump this CodeInfo object on `os`. `code_offset` is the (absolute)
@@ -1578,23 +1000,10 @@ class CodeInfo {
InstructionSet instruction_set,
const MethodInfo& method_info) const;
- // Check that the code info has valid stack map and abort if it does not.
- void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
- if (region_.size() != 0 && region_.size_in_bits() < GetStackMapsSizeInBits(encoding)) {
- LOG(FATAL) << region_.size() << "\n"
- << encoding.HeaderSize() << "\n"
- << encoding.NonHeaderSize() << "\n"
- << encoding.location_catalog.num_entries << "\n"
- << encoding.stack_map.num_entries << "\n"
- << encoding.stack_map.encoding.BitSize();
- }
- }
-
private:
// Compute the size of the Dex register map associated to the stack map at
// `dex_register_map_offset_in_code_info`.
- size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
- uint32_t dex_register_map_offset_in_code_info,
+ size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset,
uint16_t number_of_dex_registers) const {
// Offset where the actual mapping data starts within art::DexRegisterMap.
size_t location_mapping_data_offset_in_dex_register_map =
@@ -1602,12 +1011,12 @@ class CodeInfo {
// Create a temporary art::DexRegisterMap to be able to call
// art::DexRegisterMap::GetNumberOfLiveDexRegisters and
DexRegisterMap dex_register_map_without_locations(
- MemoryRegion(region_.Subregion(dex_register_map_offset_in_code_info,
- location_mapping_data_offset_in_dex_register_map)));
+ MemoryRegion(dex_register_maps_.Subregion(dex_register_map_offset,
+ location_mapping_data_offset_in_dex_register_map)));
size_t number_of_live_dex_registers =
dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
size_t location_mapping_data_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries(encoding))
+ DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
* number_of_live_dex_registers;
size_t location_mapping_data_size_in_bytes =
RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
@@ -1616,37 +1025,42 @@ class CodeInfo {
return dex_register_map_size;
}
- // Compute the size of a Dex register location catalog starting at offset `origin`
- // in `region_` and containing `number_of_dex_locations` entries.
- size_t ComputeDexRegisterLocationCatalogSize(uint32_t origin,
- uint32_t number_of_dex_locations) const {
- // TODO: Ideally, we would like to use art::DexRegisterLocationCatalog::Size or
- // art::DexRegisterLocationCatalog::FindLocationOffset, but the
- // DexRegisterLocationCatalog is not yet built. Try to factor common code.
- size_t offset = origin + DexRegisterLocationCatalog::kFixedSize;
-
- // Skip the first `number_of_dex_locations - 1` entries.
- for (uint16_t i = 0; i < number_of_dex_locations; ++i) {
- // Read the first next byte and inspect its first 3 bits to decide
- // whether it is a short or a large location.
- DexRegisterLocationCatalog::ShortLocation first_byte =
- region_.LoadUnaligned<DexRegisterLocationCatalog::ShortLocation>(offset);
- DexRegisterLocation::Kind kind =
- DexRegisterLocationCatalog::ExtractKindFromShortLocation(first_byte);
- if (DexRegisterLocation::IsShortLocationKind(kind)) {
- // Short location. Skip the current byte.
- offset += DexRegisterLocationCatalog::SingleShortEntrySize();
- } else {
- // Large location. Skip the 5 next bytes.
- offset += DexRegisterLocationCatalog::SingleLargeEntrySize();
- }
- }
- size_t size = offset - origin;
- return size;
- }
-
- MemoryRegion region_;
- friend class StackMapStream;
+ MemoryRegion DecodeMemoryRegion(MemoryRegion& region, size_t* bit_offset) {
+ size_t length = DecodeVarintBits(BitMemoryRegion(region), bit_offset);
+ size_t offset = BitsToBytesRoundUp(*bit_offset);;
+ *bit_offset = (offset + length) * kBitsPerByte;
+ return region.Subregion(offset, length);
+ }
+
+ void Decode(const uint8_t* data) {
+ size_t non_header_size = DecodeUnsignedLeb128(&data);
+ MemoryRegion region(const_cast<uint8_t*>(data), non_header_size);
+ BitMemoryRegion bit_region(region);
+ size_t bit_offset = 0;
+ size_ = UnsignedLeb128Size(non_header_size) + non_header_size;
+ dex_register_maps_ = DecodeMemoryRegion(region, &bit_offset);
+ location_catalog_entries_ = DecodeVarintBits(bit_region, &bit_offset);
+ location_catalog_ = DecodeMemoryRegion(region, &bit_offset);
+ stack_maps_.Decode(bit_region, &bit_offset);
+ invoke_infos_.Decode(bit_region, &bit_offset);
+ inline_infos_.Decode(bit_region, &bit_offset);
+ register_masks_.Decode(bit_region, &bit_offset);
+ stack_mask_bits_ = DecodeVarintBits(bit_region, &bit_offset);
+ stack_masks_ = bit_region.Subregion(bit_offset, non_header_size * kBitsPerByte - bit_offset);
+ }
+
+ size_t size_;
+ MemoryRegion dex_register_maps_;
+ uint32_t location_catalog_entries_;
+ MemoryRegion location_catalog_;
+ BitTable<StackMap::Field::kCount> stack_maps_;
+ BitTable<InvokeInfo::Field::kCount> invoke_infos_;
+ BitTable<InlineInfo::Field::kCount> inline_infos_;
+ BitTable<1> register_masks_;
+ uint32_t stack_mask_bits_ = 0;
+ BitMemoryRegion stack_masks_;
+
+ friend class OatDumper;
};
#undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/thread.cc b/runtime/thread.cc
index eada24d257..81ed722fcc 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -3559,16 +3559,15 @@ class ReferenceMapVisitor : public StackVisitor {
StackReference<mirror::Object>* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
reinterpret_cast<uintptr_t>(cur_quick_frame));
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- CodeInfoEncoding encoding = code_info.ExtractEncoding();
- StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
+ CodeInfo code_info(method_header);
+ StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(map.IsValid());
- T vreg_info(m, code_info, encoding, map, visitor_);
+ T vreg_info(m, code_info, map, visitor_);
// Visit stack entries that hold pointers.
- const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding);
- BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map);
+ const size_t number_of_bits = code_info.GetNumberOfStackMaskBits();
+ BitMemoryRegion stack_mask = code_info.GetStackMaskOf(map);
for (size_t i = 0; i < number_of_bits; ++i) {
if (stack_mask.LoadBit(i)) {
StackReference<mirror::Object>* ref_addr = vreg_base + i;
@@ -3583,7 +3582,7 @@ class ReferenceMapVisitor : public StackVisitor {
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map);
+ uint32_t register_mask = code_info.GetRegisterMaskOf(map);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
@@ -3631,7 +3630,6 @@ class ReferenceMapVisitor : public StackVisitor {
struct UndefinedVRegInfo {
UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED,
const CodeInfo& code_info ATTRIBUTE_UNUSED,
- const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED,
const StackMap& map ATTRIBUTE_UNUSED,
RootVisitor& _visitor)
: visitor(_visitor) {
@@ -3662,14 +3660,11 @@ class ReferenceMapVisitor : public StackVisitor {
struct StackMapVRegInfo {
StackMapVRegInfo(ArtMethod* method,
const CodeInfo& _code_info,
- const CodeInfoEncoding& _encoding,
const StackMap& map,
RootVisitor& _visitor)
: number_of_dex_registers(method->DexInstructionData().RegistersSize()),
code_info(_code_info),
- encoding(_encoding),
dex_register_map(code_info.GetDexRegisterMapOf(map,
- encoding,
number_of_dex_registers)),
visitor(_visitor) {
}
@@ -3684,7 +3679,7 @@ class ReferenceMapVisitor : public StackVisitor {
bool found = false;
for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) {
DexRegisterLocation location = dex_register_map.GetDexRegisterLocation(
- dex_reg, number_of_dex_registers, code_info, encoding);
+ dex_reg, number_of_dex_registers, code_info);
if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) {
visitor(ref, dex_reg, stack_visitor);
found = true;
@@ -3718,7 +3713,6 @@ class ReferenceMapVisitor : public StackVisitor {
size_t number_of_dex_registers;
const CodeInfo& code_info;
- const CodeInfoEncoding& encoding;
DexRegisterMap dex_register_map;
RootVisitor& visitor;
};
diff --git a/runtime/var_handles.cc b/runtime/var_handles.cc
new file mode 100644
index 0000000000..f08742fcd7
--- /dev/null
+++ b/runtime/var_handles.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "var_handles.h"
+
+#include "common_throws.h"
+#include "dex/dex_instruction.h"
+#include "handle.h"
+#include "method_handles-inl.h"
+#include "mirror/method_type.h"
+#include "mirror/var_handle.h"
+
+namespace art {
+
+namespace {
+
+bool VarHandleInvokeAccessorWithConversions(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::VarHandle> var_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const mirror::VarHandle::AccessMode access_mode,
+ const InstructionOperands* const operands,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::MethodType> accessor_type(hs.NewHandle(
+ var_handle->GetMethodTypeForAccessMode(self, access_mode)));
+ const size_t num_vregs = accessor_type->NumberOfVRegs();
+ const int num_params = accessor_type->GetPTypes()->GetLength();
+ ShadowFrameAllocaUniquePtr accessor_frame =
+ CREATE_SHADOW_FRAME(num_vregs, nullptr, shadow_frame.GetMethod(), shadow_frame.GetDexPC());
+ ShadowFrameGetter getter(shadow_frame, operands);
+ static const uint32_t kFirstDestinationReg = 0;
+ ShadowFrameSetter setter(accessor_frame.get(), kFirstDestinationReg);
+ if (!PerformConversions(self, callsite_type, accessor_type, &getter, &setter, num_params)) {
+ return false;
+ }
+ RangeInstructionOperands accessor_operands(kFirstDestinationReg,
+ kFirstDestinationReg + num_vregs);
+ if (!var_handle->Access(access_mode, accessor_frame.get(), &accessor_operands, result)) {
+ return false;
+ }
+ return ConvertReturnValue(callsite_type, accessor_type, result);
+}
+
+} // namespace
+
+bool VarHandleInvokeAccessor(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::VarHandle> var_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const mirror::VarHandle::AccessMode access_mode,
+ const InstructionOperands* const operands,
+ JValue* result) {
+ if (var_handle.IsNull()) {
+ ThrowNullPointerExceptionFromDexPC();
+ return false;
+ }
+
+ if (!var_handle->IsAccessModeSupported(access_mode)) {
+ ThrowUnsupportedOperationException();
+ return false;
+ }
+
+ mirror::VarHandle::MatchKind match_kind =
+ var_handle->GetMethodTypeMatchForAccessMode(access_mode, callsite_type.Get());
+ if (LIKELY(match_kind == mirror::VarHandle::MatchKind::kExact)) {
+ return var_handle->Access(access_mode, &shadow_frame, operands, result);
+ } else if (match_kind == mirror::VarHandle::MatchKind::kWithConversions) {
+ return VarHandleInvokeAccessorWithConversions(self,
+ shadow_frame,
+ var_handle,
+ callsite_type,
+ access_mode,
+ operands,
+ result);
+ } else {
+ DCHECK_EQ(match_kind, mirror::VarHandle::MatchKind::kNone);
+ ThrowWrongMethodTypeException(var_handle->PrettyDescriptorForAccessMode(access_mode),
+ callsite_type->PrettyDescriptor());
+ return false;
+ }
+}
+
+} // namespace art
diff --git a/runtime/var_handles.h b/runtime/var_handles.h
new file mode 100644
index 0000000000..2ff8405f03
--- /dev/null
+++ b/runtime/var_handles.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_VAR_HANDLES_H_
+#define ART_RUNTIME_VAR_HANDLES_H_
+
+#include "mirror/var_handle.h"
+
+namespace art {
+
+bool VarHandleInvokeAccessor(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::VarHandle> var_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const mirror::VarHandle::AccessMode access_mode,
+ const InstructionOperands* const operands,
+ JValue* result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+} // namespace art
+
+#endif // ART_RUNTIME_VAR_HANDLES_H_
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 3518d2facd..91cec23dd5 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3086,7 +3086,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
DCHECK(HasFailures());
break;
}
- const uint32_t proto_idx = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+ const uint16_t vRegH = (is_range) ? inst->VRegH_4rcc() : inst->VRegH_45cc();
+ const dex::ProtoIndex proto_idx(vRegH);
const char* return_descriptor =
dex_file_->GetReturnTypeDescriptor(dex_file_->GetProtoId(proto_idx));
const RegType& return_type =
@@ -3117,7 +3118,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
CallSiteArrayValueIterator it(*dex_file_, dex_file_->GetCallSiteId(call_site_idx));
it.Next(); // Skip to name.
it.Next(); // Skip to method type of the method handle
- const uint32_t proto_idx = static_cast<uint32_t>(it.GetJavaValue().i);
+ const dex::ProtoIndex proto_idx(it.GetJavaValue().c);
const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(proto_idx);
DexFileParameterIterator param_it(*dex_file_, proto_id);
// Treat method as static as it has yet to be determined.
@@ -4190,7 +4191,8 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs(
if (UNLIKELY(method_type == METHOD_POLYMORPHIC)) {
// Process the signature of the calling site that is invoking the method handle.
- DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(inst->VRegH()));
+ dex::ProtoIndex proto_idx(inst->VRegH());
+ DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(proto_idx));
return VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, res_method);
} else {
// Process the target method's signature.
@@ -4208,8 +4210,6 @@ bool MethodVerifier::CheckSignaturePolymorphicMethod(ArtMethod* method) {
expected_return_descriptor = mirror::MethodHandle::GetReturnTypeDescriptor(method_name);
} else if (klass == mirror::VarHandle::StaticClass()) {
expected_return_descriptor = mirror::VarHandle::GetReturnTypeDescriptor(method_name);
- // TODO: add compiler support for VarHandle accessor methods (b/71781600)
- Fail(VERIFY_ERROR_FORCE_INTERPRETER);
} else {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "Signature polymorphic method in unsuppported class: " << klass->PrettyDescriptor();
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index b79334ac7f..f7cdf3920a 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -23,6 +23,8 @@
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
+#include "base/enums.h"
+#include "class_linker.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "hidden_api.h"
#include "jni/jni_internal.h"
@@ -30,6 +32,7 @@
#include "mirror/throwable.h"
#include "nativehelper/scoped_local_ref.h"
#include "obj_ptr-inl.h"
+#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "thread-current-inl.h"
@@ -231,19 +234,28 @@ static jmethodID CachePrimitiveBoxingMethod(JNIEnv* env, char prim_name, const c
V(java_lang_String_init_StringBuilder, "(Ljava/lang/StringBuilder;)V", newStringFromStringBuilder, "newStringFromStringBuilder", "(Ljava/lang/StringBuilder;)Ljava/lang/String;", NewStringFromStringBuilder) \
#define STATIC_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, ...) \
- static ArtMethod* init_runtime_name; \
- static ArtMethod* new_runtime_name;
+ static ArtMethod* init_runtime_name = nullptr; \
+ static ArtMethod* new_runtime_name = nullptr;
STRING_INIT_LIST(STATIC_STRING_INIT)
#undef STATIC_STRING_INIT
-void WellKnownClasses::InitStringInit(JNIEnv* env) {
- ScopedObjectAccess soa(Thread::Current());
- #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, \
- new_java_name, new_signature, ...) \
- init_runtime_name = jni::DecodeArtMethod( \
- CacheMethod(env, java_lang_String, false, "<init>", init_signature)); \
- new_runtime_name = jni::DecodeArtMethod( \
- CacheMethod(env, java_lang_StringFactory, true, new_java_name, new_signature));
+void WellKnownClasses::InitStringInit(ObjPtr<mirror::Class> string_class,
+ ObjPtr<mirror::Class> string_builder_class) {
+ PointerSize p_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ auto find_method = [p_size](ObjPtr<mirror::Class> klass,
+ const char* name,
+ const char* sig,
+ bool expext_static) REQUIRES_SHARED(Locks::mutator_lock_) {
+ ArtMethod* ret = klass->FindClassMethod(name, sig, p_size);
+ CHECK(ret != nullptr);
+ CHECK_EQ(expext_static, ret->IsStatic());
+ return ret;
+ };
+
+ #define LOAD_STRING_INIT(init_runtime_name, init_signature, new_runtime_name, \
+ new_java_name, new_signature, ...) \
+ init_runtime_name = find_method(string_class, "<init>", init_signature, false); \
+ new_runtime_name = find_method(string_builder_class, new_java_name, new_signature, true);
STRING_INIT_LIST(LOAD_STRING_INIT)
#undef LOAD_STRING_INIT
}
@@ -252,6 +264,7 @@ void Thread::InitStringEntryPoints() {
QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
#define SET_ENTRY_POINT(init_runtime_name, init_signature, new_runtime_name, \
new_java_name, new_signature, entry_point_name) \
+ DCHECK(!Runtime::Current()->IsStarted() || (new_runtime_name) != nullptr); \
qpoints->p ## entry_point_name = reinterpret_cast<void(*)()>(new_runtime_name);
STRING_INIT_LIST(SET_ENTRY_POINT)
#undef SET_ENTRY_POINT
@@ -260,7 +273,9 @@ void Thread::InitStringEntryPoints() {
ArtMethod* WellKnownClasses::StringInitToStringFactory(ArtMethod* string_init) {
#define TO_STRING_FACTORY(init_runtime_name, init_signature, new_runtime_name, \
new_java_name, new_signature, entry_point_name) \
+ DCHECK((init_runtime_name) != nullptr); \
if (string_init == (init_runtime_name)) { \
+ DCHECK((new_runtime_name) != nullptr); \
return (new_runtime_name); \
}
STRING_INIT_LIST(TO_STRING_FACTORY)
@@ -282,26 +297,9 @@ uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
}
#undef STRING_INIT_LIST
-class ScopedHiddenApiExemption {
- public:
- explicit ScopedHiddenApiExemption(Runtime* runtime)
- : runtime_(runtime),
- initial_policy_(runtime_->GetHiddenApiEnforcementPolicy()) {
- runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kNoChecks);
- }
-
- ~ScopedHiddenApiExemption() {
- runtime_->SetHiddenApiEnforcementPolicy(initial_policy_);
- }
-
- private:
- Runtime* runtime_;
- const hiddenapi::EnforcementPolicy initial_policy_;
- DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiExemption);
-};
-
void WellKnownClasses::Init(JNIEnv* env) {
- ScopedHiddenApiExemption hiddenapi_exemption(Runtime::Current());
+ hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+ hiddenapi::EnforcementPolicy::kNoChecks);
dalvik_annotation_optimization_CriticalNative =
CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
@@ -427,9 +425,6 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_Integer_valueOf = CachePrimitiveBoxingMethod(env, 'I', "java/lang/Integer");
java_lang_Long_valueOf = CachePrimitiveBoxingMethod(env, 'J', "java/lang/Long");
java_lang_Short_valueOf = CachePrimitiveBoxingMethod(env, 'S', "java/lang/Short");
-
- InitStringInit(env);
- Thread::Current()->InitStringEntryPoints();
}
void WellKnownClasses::LateInit(JNIEnv* env) {
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 25c07b27de..c06e4a71ce 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -40,6 +40,9 @@ struct WellKnownClasses {
static void Clear();
+ static void InitStringInit(ObjPtr<mirror::Class> string_class,
+ ObjPtr<mirror::Class> string_builder_class)
+ REQUIRES_SHARED(Locks::mutator_lock_);
static ArtMethod* StringInitToStringFactory(ArtMethod* method);
static uint32_t StringInitToEntryPoint(ArtMethod* method);
@@ -168,9 +171,6 @@ struct WellKnownClasses {
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_length;
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_offset;
static jfieldID org_apache_harmony_dalvik_ddmc_Chunk_type;
-
- private:
- static void InitStringInit(JNIEnv* env);
};
} // namespace art