summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arm/asm_support_arm.S25
-rw-r--r--runtime/arch/arm/instruction_set_features_arm.cc1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S122
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S6
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc2
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S12
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S13
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S9
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S6
-rw-r--r--runtime/art_field-inl.h3
-rw-r--r--runtime/art_field.h1
-rw-r--r--runtime/art_method-inl.h19
-rw-r--r--runtime/art_method.h3
-rw-r--r--runtime/asm_support.h18
-rw-r--r--runtime/class_linker-inl.h28
-rw-r--r--runtime/class_linker.cc27
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/common_runtime_test.cc27
-rw-r--r--runtime/common_throws.cc7
-rw-r--r--runtime/common_throws.h5
-rw-r--r--runtime/dex_file-inl.h78
-rw-r--r--runtime/dex_file.cc55
-rw-r--r--runtime/dex_file.h107
-rw-r--r--runtime/dex_file_test.cc3
-rw-r--r--runtime/dex_file_verifier.cc20
-rw-r--r--runtime/dex_file_verifier.h25
-rw-r--r--runtime/dex_file_verifier_test.cc52
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h1
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc16
-rw-r--r--runtime/entrypoints_order_test.cc15
-rw-r--r--runtime/gc/collector/concurrent_copying.cc68
-rw-r--r--runtime/gc/collector/concurrent_copying.h25
-rw-r--r--runtime/gc/collector/mark_compact.cc92
-rw-r--r--runtime/gc/collector/mark_compact.h11
-rw-r--r--runtime/gc/collector/mark_sweep.cc63
-rw-r--r--runtime/gc/collector/mark_sweep.h26
-rw-r--r--runtime/gc/collector/semi_space-inl.h23
-rw-r--r--runtime/gc/collector/semi_space.cc58
-rw-r--r--runtime/gc/collector/semi_space.h4
-rw-r--r--runtime/gc/space/image_space.cc179
-rw-r--r--runtime/java_vm_ext.cc41
-rw-r--r--runtime/jit/offline_profiling_info.cc57
-rw-r--r--runtime/jit/offline_profiling_info.h5
-rw-r--r--runtime/mem_map.cc7
-rw-r--r--runtime/mirror/class-inl.h13
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/string-inl.h7
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file.cc5
-rw-r--r--runtime/oat_file_assistant_test.cc155
-rw-r--r--runtime/oat_file_manager.cc4
-rw-r--r--runtime/runtime.cc3
-rw-r--r--runtime/signal_set.h4
-rw-r--r--runtime/stack_map.h7
-rw-r--r--runtime/thread.cc101
-rw-r--r--runtime/thread.h18
-rw-r--r--runtime/thread_list.cc53
-rw-r--r--runtime/thread_list.h4
60 files changed, 982 insertions, 764 deletions
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 44c7649dea..38ca76a6a9 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -30,18 +30,17 @@
.arch armv7-a
.thumb
-// Macro to generate the value of Runtime::Current into rDest clobbering rTemp. As it uses labels
+// Macro to generate the value of Runtime::Current into rDest. As it uses labels
// then the labels need to be unique. We bind these to the function name in the ENTRY macros.
-.macro RUNTIME_CURRENT name, num, rDest, rTemp
+.macro RUNTIME_CURRENT name, num, rDest
.if .Lruntime_current\num\()_used
.error
.endif
.set .Lruntime_current\num\()_used, 1
- ldr \rDest, .Lgot_\name\()_\num @ Load offset of the GOT.
- ldr \rTemp, .Lruntime_instance_\name\()_\num @ Load GOT offset of Runtime::instance_.
+ ldr \rDest, .Lruntime_instance_\name\()_\num @ Load GOT_PREL offset of Runtime::instance_.
.Lload_got_\name\()_\num\():
- add \rDest, pc @ Fixup GOT address.
- ldr \rDest, [\rDest, \rTemp] @ Load address of Runtime::instance_.
+ add \rDest, pc @ Fixup GOT_PREL address.
+ ldr \rDest, [\rDest] @ Load address of Runtime::instance_.
ldr \rDest, [\rDest] @ Load Runtime::instance_.
.endm
@@ -90,26 +89,20 @@
DEF_ENTRY .arm, \name
.endm
-// Terminate an ENTRY and generate GOT references.
+// Terminate an ENTRY and generate GOT_PREL references.
.macro END name
// Generate offsets of GOT and Runtime::instance_ used in RUNTIME_CURRENT.
.if .Lruntime_current1_used
- .Lgot_\name\()_1:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_1+4)
.Lruntime_instance_\name\()_1:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_1+4)
.endif
.if .Lruntime_current2_used
- .Lgot_\name\()_2:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_2+4)
.Lruntime_instance_\name\()_2:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_2+4)
.endif
.if .Lruntime_current3_used
- .Lgot_\name\()_3:
- .word _GLOBAL_OFFSET_TABLE_-(.Lload_got_\name\()_3+4)
.Lruntime_instance_\name\()_3:
- .word _ZN3art7Runtime9instance_E(GOT)
+ .word _ZN3art7Runtime9instance_E(GOT_PREL)-(.Lload_got_\name\()_3+4)
.endif
// Remove the RUNTIME_CURRENTx macros so they get rebound in the next function entry.
.purgem RUNTIME_CURRENT1
diff --git a/runtime/arch/arm/instruction_set_features_arm.cc b/runtime/arch/arm/instruction_set_features_arm.cc
index ffac0307b7..c3a5829979 100644
--- a/runtime/arch/arm/instruction_set_features_arm.cc
+++ b/runtime/arch/arm/instruction_set_features_arm.cc
@@ -206,6 +206,7 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromAssembly() {
struct sigaction sa, osa;
sa.sa_flags = SA_ONSTACK | SA_RESTART | SA_SIGINFO;
sa.sa_sigaction = bad_divide_inst_handle;
+ sigemptyset(&sa.sa_mask);
sigaction(SIGILL, &sa, &osa);
bool has_div = false;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 5209bb6ab6..27a41f09ad 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -44,15 +44,15 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
-.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME rTemp
SPILL_ALL_CALLEE_SAVE_GPRS @ 9 words (36 bytes) of callee saves.
vpush {s16-s31} @ 16 words (64 bytes) of floats.
.cfi_adjust_cfa_offset 64
sub sp, #12 @ 3 words of space, bottom word will hold Method*
.cfi_adjust_cfa_offset 12
- RUNTIME_CURRENT1 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kSaveAll Method*.
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT1 \rTemp @ Load Runtime::Current into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kSaveAll Method*.
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
@@ -65,7 +65,7 @@
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME rTemp
push {r5-r8, r10-r11, lr} @ 7 words of callee saves
.cfi_adjust_cfa_offset 28
.cfi_rel_offset r5, 0
@@ -77,9 +77,9 @@
.cfi_rel_offset lr, 24
sub sp, #4 @ bottom word will hold Method*
.cfi_adjust_cfa_offset 4
- RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT2 \rTemp @ Load Runtime::Current into rTemp.
+ ldr \rTemp, [\rTemp, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp is kRefsOnly Method*.
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
// Ugly compile-time check, but we only have the preprocessor.
@@ -88,30 +88,6 @@
#endif
.endm
- /*
- * Macro that sets up the callee save frame to conform with
- * Runtime::CreateCalleeSaveMethod(kRefsOnly)
- * and preserves the value of rTemp2 at entry.
- */
-.macro SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 rTemp1, rTemp2
- push {r5-r8, r10-r11, lr} @ 7 words of callee saves
- .cfi_adjust_cfa_offset 28
- .cfi_rel_offset r5, 0
- .cfi_rel_offset r6, 4
- .cfi_rel_offset r7, 8
- .cfi_rel_offset r8, 12
- .cfi_rel_offset r10, 16
- .cfi_rel_offset r11, 20
- .cfi_rel_offset lr, 24
- sub sp, #4 @ bottom word will hold Method*
- .cfi_adjust_cfa_offset 4
- str \rTemp2, [sp, #0] @ save rTemp2
- RUNTIME_CURRENT2 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET] @ rTemp1 is kRefsOnly Method*.
- ldr \rTemp2, [sp, #0] @ restore rTemp2
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
- str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
-
// Ugly compile-time check, but we only have the preprocessor.
#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 28 + 4)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM) size not as expected."
@@ -164,12 +140,12 @@
#endif
.endm
-.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp1, rTemp2
+.macro SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME rTemp
SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_REGISTERS_ONLY
- RUNTIME_CURRENT3 \rTemp1, \rTemp2 @ Load Runtime::Current into rTemp1.
- @ rTemp1 is kRefsAndArgs Method*.
- ldr \rTemp1, [\rTemp1, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
- str \rTemp1, [sp, #0] @ Place Method* at bottom of stack.
+ RUNTIME_CURRENT3 \rTemp @ Load Runtime::Current into rTemp.
+ @ rTemp is kRefsAndArgs Method*.
+ ldr \rTemp, [\rTemp, #RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET]
+ str \rTemp, [sp, #0] @ Place Method* at bottom of stack.
str sp, [r9, #THREAD_TOP_QUICK_FRAME_OFFSET] @ Place sp in Thread::Current()->top_quick_frame.
.endm
@@ -217,7 +193,7 @@
.macro DELIVER_PENDING_EXCEPTION
.fnend
.fnstart
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 @ save callee saves for throw
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save callee saves for throw
mov r0, r9 @ pass Thread::Current
b artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
@@ -225,7 +201,7 @@
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0 @ save all registers as basis for long jump context
mov r0, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -234,7 +210,7 @@ END \c_name
.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1, r2 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r1 @ save all registers as basis for long jump context
mov r1, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -243,7 +219,7 @@ END \c_name
.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b \cxx_name @ \cxx_name(Thread*)
END \c_name
@@ -275,7 +251,7 @@ END \c_name
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t field_idx, const Method* referrer, Thread*)
@@ -287,7 +263,7 @@ END \name
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl \entrypoint @ (field_idx, Object*, referrer, Thread*)
@@ -299,7 +275,7 @@ END \name
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
ldr r3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
@@ -334,6 +310,12 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+
+ /*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
@@ -360,7 +342,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
*/
.macro INVOKE_TRAMPOLINE_BODY cxx_name
.extern \cxx_name
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case allocation triggers GC
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2 @ save callee saves in case allocation triggers GC
mov r2, r9 @ pass Thread::Current
mov r3, sp
bl \cxx_name @ (method_idx, this, Thread*, SP)
@@ -566,7 +548,7 @@ ENTRY art_quick_lock_object
.Llock_strex_fail:
b .Lretry_lock @ retry
.Lslow_lock:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -575,7 +557,7 @@ ENTRY art_quick_lock_object
END art_quick_lock_object
ENTRY art_quick_lock_object_no_inline
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case we block
mov r1, r9 @ pass Thread::Current
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -632,7 +614,7 @@ ENTRY art_quick_unlock_object
b .Lretry_unlock @ retry
.Lslow_unlock:
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -642,7 +624,7 @@ END art_quick_unlock_object
ENTRY art_quick_unlock_object_no_inline
@ save callee saves in case exception allocation triggers GC
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -677,7 +659,7 @@ ENTRY art_quick_check_cast
.cfi_restore r0
.cfi_restore r1
.cfi_restore lr
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2, r3 // save all registers as basis for long jump context
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
b artThrowClassCastException @ (Class*, Class*, Thread*)
bkpt
@@ -813,7 +795,7 @@ ENTRY art_quick_aput_obj
.Lthrow_array_store_exception:
pop {r0-r2, lr}
/* No need to repeat restore cfi directives, the ones above apply here. */
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3, ip
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r3
mov r1, r2
mov r2, r9 @ pass Thread::Current
b artThrowArrayStoreException @ (Class*, Class*, Thread*)
@@ -824,7 +806,7 @@ END art_quick_aput_obj
.macro ONE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
mov r1, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -836,7 +818,7 @@ END \name
.macro TWO_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl \entrypoint @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -848,7 +830,7 @@ END \name
.macro THREE_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r3 @ save callee saves in case of GC
mov r3, r9 @ pass Thread::Current
@ (uint32_t type_idx, Method* method, int32_t component_count, Thread*)
bl \entrypoint
@@ -861,7 +843,7 @@ END \name
.macro FOUR_ARG_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_RTEMP2 r12, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
.cfi_adjust_cfa_offset 16
bl \entrypoint
@@ -890,7 +872,7 @@ ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_O
*/
.extern artGet64StaticFromCode
ENTRY art_quick_get64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r2, r9 @ pass Thread::Current
bl artGet64StaticFromCode @ (uint32_t field_idx, const Method* referrer, Thread*)
@@ -916,7 +898,7 @@ TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETU
*/
.extern artGet64InstanceFromCode
ENTRY art_quick_get64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
ldr r2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
mov r3, r9 @ pass Thread::Current
bl artGet64InstanceFromCode @ (field_idx, Object*, referrer, Thread*)
@@ -941,7 +923,7 @@ TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_I
*/
.extern artSet64StaticFromCode
ENTRY art_quick_set64_static
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r12 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-16]! @ expand the frame and pass Thread::Current
@@ -966,7 +948,7 @@ THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RE
*/
.extern artSet64InstanceFromCode
ENTRY art_quick_set64_instance
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12, lr @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r12 @ save callee saves in case of GC
@ r2:r3 contain the wide argument
ldr r12, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] @ pass referrer
str r9, [sp, #-12]! @ expand the frame and pass Thread::Current
@@ -1087,7 +1069,7 @@ ENTRY art_quick_alloc_object_rosalloc
bx lr
.Lart_quick_alloc_object_rosalloc_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ save callee saves in case of GC
mov r2, r9 @ pass Thread::Current
bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1172,7 +1154,7 @@ ENTRY art_quick_alloc_object_tlab
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
ALLOC_OBJECT_TLAB_FAST_PATH .Lart_quick_alloc_object_tlab_slow_path
.Lart_quick_alloc_object_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1207,7 +1189,7 @@ ENTRY art_quick_alloc_object_region_tlab
pop {r0, r1, r3, lr}
b .Lart_quick_alloc_object_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_region_tlab_slow_path:
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 // Save callee saves in case of GC.
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 // Save callee saves in case of GC.
mov r2, r9 // Pass Thread::Current.
bl artAllocObjectFromCodeRegionTLAB // (uint32_t type_idx, Method* method, Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
@@ -1227,7 +1209,7 @@ ENTRY art_quick_test_suspend
1:
#endif
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for GC stack crawl
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for GC stack crawl
@ TODO: save FPRs to enable access in the debugger?
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -1235,7 +1217,7 @@ END art_quick_test_suspend
ENTRY art_quick_implicit_suspend
mov r0, rSELF
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves for stack crawl
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1 @ save callee saves for stack crawl
bl artTestSuspendFromCode @ (Thread*)
RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_implicit_suspend
@@ -1298,7 +1280,7 @@ END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
mov r2, r9 @ pass Thread::Current
mov r3, sp @ pass SP
blx artQuickResolutionTrampoline @ (Method* called, receiver, Thread*, SP)
@@ -1403,7 +1385,7 @@ END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
ENTRY art_quick_to_interpreter_bridge
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1, r2
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r1
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
blx artQuickToInterpreterBridge @ (Method* method, Thread*, SP)
@@ -1426,7 +1408,7 @@ END art_quick_to_interpreter_bridge
.extern artInstrumentationMethodExitFromCode
ENTRY art_quick_instrumentation_entry
@ Make stack crawlable and clobber r2 and r3 (post saving)
- SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2, r3
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME r2
@ preserve r0 (not normally an arg) knowing there is a spare slot in kRefsAndArgs.
str r0, [sp, #4]
mov r2, r9 @ pass Thread::Current
@@ -1441,7 +1423,7 @@ ENTRY art_quick_instrumentation_entry
.global art_quick_instrumentation_exit
art_quick_instrumentation_exit:
mov lr, #0 @ link register is to here, so clobber with 0 for later checks
- SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ set up frame knowing r2 and r3 must be dead on exit
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2 @ set up frame knowing r2 and r3 must be dead on exit
mov r12, sp @ remember bottom of caller's frame
push {r0-r1} @ save return value
.cfi_adjust_cfa_offset 8
@@ -1480,7 +1462,7 @@ END art_quick_instrumentation_entry
*/
.extern artDeoptimize
ENTRY art_quick_deoptimize
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimize @ artDeoptimize(Thread*)
END art_quick_deoptimize
@@ -1491,7 +1473,7 @@ END art_quick_deoptimize
*/
.extern artDeoptimizeFromCompiledCode
ENTRY art_quick_deoptimize_from_compiled_code
- SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0, r1
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME r0
mov r0, r9 @ Set up args.
blx artDeoptimizeFromCompiledCode @ artDeoptimizeFromCompiledCode(Thread*)
END art_quick_deoptimize_from_compiled_code
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 1fba09bae3..a6490aed33 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -417,6 +417,12 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+
+ /*
* Called by managed code to create and deliver a StackOverflowError.
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 45e33a8500..833ba1b612 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -268,6 +268,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct.");
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
+ qpoints->pThrowStringBounds = art_quick_throw_string_bounds;
+ static_assert(!IsDirectEntrypoint(kQuickThrowStringBounds), "Non-direct C stub marked direct.");
// Deoptimization from compiled code.
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 3ee26afc4f..bb89674caf 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -529,6 +529,18 @@ ENTRY art_quick_throw_array_bounds
END art_quick_throw_array_bounds
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt().
+ */
+ .extern artThrowStringBoundsFromCode
+ENTRY art_quick_throw_string_bounds
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ la $t9, artThrowStringBoundsFromCode
+ jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_throw_string_bounds
+
+ /*
* Called by managed code to create and deliver a StackOverflowError.
*/
.extern artThrowStackOverflowFromCode
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 8f1a35a693..78ac748e32 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -617,6 +617,19 @@ ENTRY art_quick_throw_array_bounds
END art_quick_throw_array_bounds
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt().
+ */
+ .extern artThrowStringBoundsFromCode
+ENTRY art_quick_throw_string_bounds
+.Lart_quick_throw_string_bounds_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowStringBoundsFromCode
+ jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_throw_string_bounds
+
+ /*
* Called by managed code to create and deliver a StackOverflowError.
*/
.extern artThrowStackOverflowFromCode
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 2d7f664809..b3dd4545f4 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -82,7 +82,7 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG, got_reg, temp_reg)
PUSH edi // Save callee saves (ebx is saved/restored by the upcall)
PUSH esi
PUSH ebp
- pushl REG_VAR(got_reg) // Save got_reg
+ PUSH RAW_VAR(got_reg) // Save got_reg
subl MACRO_LITERAL(8), %esp // Grow stack by 2 words.
CFI_ADJUST_CFA_OFFSET(8)
@@ -97,6 +97,7 @@ MACRO2(SETUP_REFS_ONLY_CALLEE_SAVE_FRAME_PRESERVE_GOT_REG, got_reg, temp_reg)
movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
// Restore got_reg.
movl 12(%esp), REG_VAR(got_reg)
+ CFI_RESTORE(RAW_VAR(got_reg))
// Ugly compile-time check, but we only have the preprocessor.
// Last +4: implicit return address pushed on stack when caller made call.
@@ -310,6 +311,12 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+
+ /*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
* the method_idx. This wrapper will save arg1-arg3 and call the appropriate C helper.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 8064ed696f..205307ce67 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -336,6 +336,12 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
+ * Called by managed code to create and deliver a StringIndexOutOfBoundsException
+ * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit.
+ */
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_string_bounds, artThrowStringBoundsFromCode
+
+ /*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/rdi with the target Method*, arg0/rdi will contain
* the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper.
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 98d33453e2..a102858acc 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -33,9 +33,10 @@
namespace art {
+template<ReadBarrierOption kReadBarrierOption>
inline mirror::Class* ArtField::GetDeclaringClass() {
GcRootSource gc_root_source(this);
- mirror::Class* result = declaring_class_.Read(&gc_root_source);
+ mirror::Class* result = declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
DCHECK(result != nullptr);
DCHECK(result->IsLoaded() || result->IsErroneous()) << result->GetStatus();
return result;
diff --git a/runtime/art_field.h b/runtime/art_field.h
index b64b70fa8d..aaccbf3699 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -41,6 +41,7 @@ class ArtField FINAL {
public:
ArtField();
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7647ad6e57..26450c41c7 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -395,8 +395,9 @@ inline mirror::DexCache* ArtMethod::GetDexCache() {
return GetDeclaringClass()->GetDexCache();
}
+template<ReadBarrierOption kReadBarrierOption>
inline bool ArtMethod::IsProxyMethod() {
- return GetDeclaringClass()->IsProxyClass();
+ return GetDeclaringClass<kReadBarrierOption>()->IsProxyClass();
}
inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
@@ -438,24 +439,24 @@ inline mirror::Class* ArtMethod::GetReturnType(bool resolve, size_t ptr_size) {
return type;
}
-template<typename RootVisitorType>
+template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
- ArtMethod* interface_method = nullptr;
- mirror::Class* klass = declaring_class_.Read();
- if (LIKELY(klass != nullptr)) {
+ if (LIKELY(!declaring_class_.IsNull())) {
+ visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
+ mirror::Class* klass = declaring_class_.Read<kReadBarrierOption>();
if (UNLIKELY(klass->IsProxyClass())) {
// For normal methods, dex cache shortcuts will be visited through the declaring class.
// However, for proxies we need to keep the interface method alive, so we visit its roots.
- interface_method = mirror::DexCache::GetElementPtrSize(
+ ArtMethod* interface_method = mirror::DexCache::GetElementPtrSize(
GetDexCacheResolvedMethods(pointer_size),
GetDexMethodIndex(),
pointer_size);
DCHECK(interface_method != nullptr);
DCHECK_EQ(interface_method,
- Runtime::Current()->GetClassLinker()->FindMethodForProxy(klass, this));
+ Runtime::Current()->GetClassLinker()->FindMethodForProxy<kReadBarrierOption>(
+ klass, this));
interface_method->VisitRoots(visitor, pointer_size);
}
- visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
// We know we don't have profiling information if the class hasn't been verified. Note
// that this check also ensures the IsNative call can be made, as IsNative expects a fully
// created class (and not a retired one).
@@ -463,7 +464,7 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
// Runtime methods and native methods use the same field as the profiling info for
// storing their own data (jni entrypoint for native methods, and ImtConflictTable for
// some runtime methods).
- if (!IsNative() && !IsRuntimeMethod()) {
+ if (!IsNative<kReadBarrierOption>() && !IsRuntimeMethod()) {
ProfilingInfo* profiling_info = GetProfilingInfo(pointer_size);
if (profiling_info != nullptr) {
profiling_info->VisitRoots(visitor);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index b65cb23516..90b2406a1d 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -356,6 +356,7 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
bool SkipAccessChecks() {
@@ -580,7 +581,7 @@ class ArtMethod FINAL {
SHARED_REQUIRES(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
- template<typename RootVisitorType>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 8eb3742b61..2d702f6af7 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -127,32 +127,32 @@ ADD_TEST_EQ(THREAD_TOP_QUICK_FRAME_OFFSET,
ADD_TEST_EQ(THREAD_SELF_OFFSET,
art::Thread::SelfOffset<__SIZEOF_POINTER__>().Int32Value())
-// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
-ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
- art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_pos.
-#define THREAD_LOCAL_POS_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + 2 * __SIZEOF_POINTER__)
+#define THREAD_LOCAL_POS_OFFSET (THREAD_CARD_TABLE_OFFSET + 168 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
art::Thread::ThreadLocalPosOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_end.
#define THREAD_LOCAL_END_OFFSET (THREAD_LOCAL_POS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_objects.
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
+ art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
-#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
+#define THREAD_CURRENT_IBASE_OFFSET (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
-#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+#define THREAD_DEFAULT_IBASE_OFFSET (THREAD_CURRENT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_DEFAULT_IBASE_OFFSET,
art::Thread::MterpDefaultIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_alt_ibase.
-#define THREAD_ALT_IBASE_OFFSET (THREAD_LOCAL_POS_OFFSET + 4 * __SIZEOF_POINTER__)
+#define THREAD_ALT_IBASE_OFFSET (THREAD_DEFAULT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ALT_IBASE_OFFSET,
art::Thread::MterpAltIBaseOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.rosalloc_runs.
-#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 5 * __SIZEOF_POINTER__)
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_ALT_IBASE_OFFSET + __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index f3e260be56..f2575f702f 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -225,6 +225,34 @@ inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
return klass;
}
+template<ReadBarrierOption kReadBarrierOption>
+ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
+ DCHECK(proxy_class->IsProxyClass());
+ DCHECK(proxy_method->IsProxyMethod<kReadBarrierOption>());
+ {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, dex_lock_);
+ // Locate the dex cache of the original interface/Object
+ for (const DexCacheData& data : dex_caches_) {
+ if (!self->IsJWeakCleared(data.weak_root) &&
+ proxy_method->HasSameDexCacheResolvedTypes(data.resolved_types,
+ image_pointer_size_)) {
+ mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
+ self->DecodeJObject(data.weak_root));
+ if (dex_cache != nullptr) {
+ ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
+ proxy_method->GetDexMethodIndex(), image_pointer_size_);
+ CHECK(resolved_method != nullptr);
+ return resolved_method;
+ }
+ }
+ }
+ }
+ LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
+ << PrettyMethod(proxy_method);
+ UNREACHABLE();
+}
+
} // namespace art
#endif // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4406c0aee4..cb34d8a121 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4277,33 +4277,6 @@ std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
return DotToDescriptor(name->ToModifiedUtf8().c_str());
}
-ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
- DCHECK(proxy_class->IsProxyClass());
- DCHECK(proxy_method->IsProxyMethod());
- {
- Thread* const self = Thread::Current();
- ReaderMutexLock mu(self, dex_lock_);
- // Locate the dex cache of the original interface/Object
- for (const DexCacheData& data : dex_caches_) {
- if (!self->IsJWeakCleared(data.weak_root) &&
- proxy_method->HasSameDexCacheResolvedTypes(data.resolved_types,
- image_pointer_size_)) {
- mirror::DexCache* dex_cache = down_cast<mirror::DexCache*>(
- self->DecodeJObject(data.weak_root));
- if (dex_cache != nullptr) {
- ArtMethod* resolved_method = dex_cache->GetResolvedMethod(
- proxy_method->GetDexMethodIndex(), image_pointer_size_);
- CHECK(resolved_method != nullptr);
- return resolved_method;
- }
- }
- }
- }
- LOG(FATAL) << "Didn't find dex cache for " << PrettyClass(proxy_class) << " "
- << PrettyMethod(proxy_method);
- UNREACHABLE();
-}
-
void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
// Create constructor for Proxy that must initialize the method.
CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 18u);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 4832d32184..d6822c5225 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -483,6 +483,7 @@ class ClassLinker {
SHARED_REQUIRES(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
SHARED_REQUIRES(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
REQUIRES(!dex_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f58af5a8da..3509d9aef9 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -284,7 +284,8 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile(
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
MemMap::Init();
- if (!DexFile::Open(location, location, &error_msg, &dex_files)) {
+ static constexpr bool kVerifyChecksum = true;
+ if (!DexFile::Open(location, location, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
} else {
@@ -418,26 +419,6 @@ void CommonRuntimeTestImpl::TearDown() {
(*icu_cleanup_fn)();
Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test
-
- // Manually closing the JNI libraries.
- // Runtime does not support repeatedly doing JNI->CreateVM, thus we need to manually clean up the
- // dynamic linking loader so that gtests would not fail.
- // Bug: 25785594
- if (runtime_->IsStarted()) {
- {
- // We retrieve the handle by calling dlopen on the library. To close it, we need to call
- // dlclose twice, the first time to undo our dlopen and the second time to actually unload it.
- // See man dlopen.
- void* handle = dlopen("libjavacore.so", RTLD_LAZY);
- dlclose(handle);
- CHECK_EQ(0, dlclose(handle));
- }
- {
- void* handle = dlopen("libopenjdkd.so", RTLD_LAZY);
- dlclose(handle);
- CHECK_EQ(0, dlclose(handle));
- }
- }
}
static std::string GetDexFileName(const std::string& jar_prefix, bool host) {
@@ -500,9 +481,11 @@ std::string CommonRuntimeTestImpl::GetTestDexFileName(const char* name) {
std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFiles(
const char* name) {
std::string filename = GetTestDexFileName(name);
+ static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg, &dex_files);
+ bool success = DexFile::Open(
+ filename.c_str(), filename.c_str(), kVerifyChecksum, &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 75cce424e9..4f705f2056 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -654,6 +654,13 @@ void ThrowStackOverflowError(Thread* self) {
}
}
+// StringIndexOutOfBoundsException
+
+void ThrowStringIndexOutOfBoundsException(int index, int length) {
+ ThrowException("Ljava/lang/StringIndexOutOfBoundsException;", nullptr,
+ StringPrintf("length=%d; index=%d", length, index).c_str());
+}
+
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index c3a1f09db3..7a335859e5 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -211,6 +211,11 @@ void ThrowRuntimeException(const char* fmt, ...)
void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+// StringIndexOutOfBoundsException
+
+void ThrowStringIndexOutOfBoundsException(int index, int length)
+ SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
diff --git a/runtime/dex_file-inl.h b/runtime/dex_file-inl.h
index 4e6c3ca279..108a5af908 100644
--- a/runtime/dex_file-inl.h
+++ b/runtime/dex_file-inl.h
@@ -38,10 +38,88 @@ inline const char* DexFile::GetStringDataAndUtf16Length(const StringId& string_i
return reinterpret_cast<const char*>(ptr);
}
+inline const char* DexFile::GetStringData(const StringId& string_id) const {
+ uint32_t ignored;
+ return GetStringDataAndUtf16Length(string_id, &ignored);
+}
+
+inline const char* DexFile::StringDataAndUtf16LengthByIdx(uint32_t idx,
+ uint32_t* utf16_length) const {
+ if (idx == kDexNoIndex) {
+ *utf16_length = 0;
+ return nullptr;
+ }
+ const StringId& string_id = GetStringId(idx);
+ return GetStringDataAndUtf16Length(string_id, utf16_length);
+}
+
+inline const char* DexFile::StringDataByIdx(uint32_t idx) const {
+ uint32_t unicode_length;
+ return StringDataAndUtf16LengthByIdx(idx, &unicode_length);
+}
+
+inline const char* DexFile::StringByTypeIdx(uint32_t idx, uint32_t* unicode_length) const {
+ const TypeId& type_id = GetTypeId(idx);
+ return StringDataAndUtf16LengthByIdx(type_id.descriptor_idx_, unicode_length);
+}
+
+inline const char* DexFile::StringByTypeIdx(uint32_t idx) const {
+ const TypeId& type_id = GetTypeId(idx);
+ return StringDataByIdx(type_id.descriptor_idx_);
+}
+
+inline const char* DexFile::GetTypeDescriptor(const TypeId& type_id) const {
+ return StringDataByIdx(type_id.descriptor_idx_);
+}
+
+inline const char* DexFile::GetFieldTypeDescriptor(const FieldId& field_id) const {
+ const DexFile::TypeId& type_id = GetTypeId(field_id.type_idx_);
+ return GetTypeDescriptor(type_id);
+}
+
+inline const char* DexFile::GetFieldName(const FieldId& field_id) const {
+ return StringDataByIdx(field_id.name_idx_);
+}
+
+inline const char* DexFile::GetMethodDeclaringClassDescriptor(const MethodId& method_id) const {
+ const DexFile::TypeId& type_id = GetTypeId(method_id.class_idx_);
+ return GetTypeDescriptor(type_id);
+}
+
inline const Signature DexFile::GetMethodSignature(const MethodId& method_id) const {
return Signature(this, GetProtoId(method_id.proto_idx_));
}
+inline const char* DexFile::GetMethodName(const MethodId& method_id) const {
+ return StringDataByIdx(method_id.name_idx_);
+}
+
+inline const char* DexFile::GetMethodShorty(uint32_t idx) const {
+ return StringDataByIdx(GetProtoId(GetMethodId(idx).proto_idx_).shorty_idx_);
+}
+
+inline const char* DexFile::GetMethodShorty(const MethodId& method_id) const {
+ return StringDataByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_);
+}
+
+inline const char* DexFile::GetMethodShorty(const MethodId& method_id, uint32_t* length) const {
+ // Using the UTF16 length is safe here as shorties are guaranteed to be ASCII characters.
+ return StringDataAndUtf16LengthByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_, length);
+}
+
+inline const char* DexFile::GetClassDescriptor(const ClassDef& class_def) const {
+ return StringByTypeIdx(class_def.class_idx_);
+}
+
+inline const char* DexFile::GetReturnTypeDescriptor(const ProtoId& proto_id) const {
+ return StringByTypeIdx(proto_id.return_type_idx_);
+}
+
+inline const char* DexFile::GetShorty(uint32_t proto_idx) const {
+ const ProtoId& proto_id = GetProtoId(proto_idx);
+ return StringDataByIdx(proto_id.shorty_idx_);
+}
+
inline const DexFile::TryItem* DexFile::GetTryItems(const CodeItem& code_item, uint32_t offset) {
const uint16_t* insns_end_ = &code_item.insns_[code_item.insns_size_in_code_units_];
return reinterpret_cast<const TryItem*>
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 05c95e069e..5a203afd1a 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -109,7 +109,7 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
}
if (IsDexMagic(magic)) {
std::unique_ptr<const DexFile> dex_file(
- DexFile::OpenFile(fd.release(), filename, false, error_msg));
+ DexFile::OpenFile(fd.release(), filename, false, false, error_msg));
if (dex_file.get() == nullptr) {
return false;
}
@@ -120,7 +120,10 @@ bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string*
return false;
}
-bool DexFile::Open(const char* filename, const char* location, std::string* error_msg,
+bool DexFile::Open(const char* filename,
+ const char* location,
+ bool verify_checksum,
+ std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace(std::string("Open dex file ") + location);
DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
@@ -131,10 +134,13 @@ bool DexFile::Open(const char* filename, const char* location, std::string* erro
return false;
}
if (IsZipMagic(magic)) {
- return DexFile::OpenZip(fd.release(), location, error_msg, dex_files);
+ return DexFile::OpenZip(fd.release(), location, verify_checksum, error_msg, dex_files);
}
if (IsDexMagic(magic)) {
- std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(), location, true,
+ std::unique_ptr<const DexFile> dex_file(DexFile::OpenFile(fd.release(),
+ location,
+ /* verify */ true,
+ verify_checksum,
error_msg));
if (dex_file.get() != nullptr) {
dex_files->push_back(std::move(dex_file));
@@ -207,6 +213,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
bool verify,
+ bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
std::unique_ptr<const DexFile> dex_file = OpenMemory(base,
@@ -220,6 +227,7 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size,
dex_file->Begin(),
dex_file->Size(),
location.c_str(),
+ verify_checksum,
error_msg)) {
return nullptr;
}
@@ -227,7 +235,10 @@ std::unique_ptr<const DexFile> DexFile::Open(const uint8_t* base, size_t size,
return dex_file;
}
-std::unique_ptr<const DexFile> DexFile::OpenFile(int fd, const char* location, bool verify,
+std::unique_ptr<const DexFile> DexFile::OpenFile(int fd,
+ const char* location,
+ bool verify,
+ bool verify_checksum,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file ") + location);
CHECK(location != nullptr);
@@ -276,7 +287,9 @@ std::unique_ptr<const DexFile> DexFile::OpenFile(int fd, const char* location, b
}
if (verify && !DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
- location, error_msg)) {
+ location,
+ verify_checksum,
+ error_msg)) {
return nullptr;
}
@@ -285,7 +298,10 @@ std::unique_ptr<const DexFile> DexFile::OpenFile(int fd, const char* location, b
const char* DexFile::kClassesDex = "classes.dex";
-bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_msg,
+bool DexFile::OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace("Dex file open Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
@@ -294,7 +310,7 @@ bool DexFile::OpenZip(int fd, const std::string& location, std::string* error_ms
DCHECK(!error_msg->empty());
return false;
}
- return DexFile::OpenFromZip(*zip_archive, location, error_msg, dex_files);
+ return DexFile::OpenFromZip(*zip_archive, location, verify_checksum, error_msg, dex_files);
}
std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location,
@@ -310,8 +326,11 @@ std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location,
error_msg);
}
-std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, const char* entry_name,
- const std::string& location, std::string* error_msg,
+std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
ZipOpenErrorCode* error_code) {
ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
CHECK(!location.empty());
@@ -342,7 +361,9 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
}
CHECK(dex_file->IsReadOnly()) << location;
if (!DexFileVerifier::Verify(dex_file.get(), dex_file->Begin(), dex_file->Size(),
- location.c_str(), error_msg)) {
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
*error_code = ZipOpenErrorCode::kVerifyError;
return nullptr;
}
@@ -356,14 +377,16 @@ std::unique_ptr<const DexFile> DexFile::Open(const ZipArchive& zip_archive, cons
// seems an excessive number.
static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
+bool DexFile::OpenFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
ScopedTrace trace("Dex file open from Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
ZipOpenErrorCode error_code;
- std::unique_ptr<const DexFile> dex_file(Open(zip_archive, kClassesDex, location, error_msg,
- &error_code));
+ std::unique_ptr<const DexFile> dex_file(
+ Open(zip_archive, kClassesDex, location, verify_checksum, error_msg, &error_code));
if (dex_file.get() == nullptr) {
return false;
} else {
@@ -378,8 +401,8 @@ bool DexFile::OpenFromZip(const ZipArchive& zip_archive, const std::string& loca
for (size_t i = 1; ; ++i) {
std::string name = GetMultiDexClassesDexName(i);
std::string fake_location = GetMultiDexLocation(i, location.c_str());
- std::unique_ptr<const DexFile> next_dex_file(Open(zip_archive, name.c_str(), fake_location,
- error_msg, &error_code));
+ std::unique_ptr<const DexFile> next_dex_file(
+ Open(zip_archive, name.c_str(), fake_location, verify_checksum, error_msg, &error_code));
if (next_dex_file.get() == nullptr) {
if (error_code != ZipOpenErrorCode::kEntryNotFound) {
LOG(WARNING) << error_msg;
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 638821bfb7..3dffe4b6f1 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -416,7 +416,10 @@ class DexFile {
static bool GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg);
// Opens .dex files found in the container, guessing the container format based on file extension.
- static bool Open(const char* filename, const char* location, std::string* error_msg,
+ static bool Open(const char* filename,
+ const char* location,
+ bool verify_checksum,
+ std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
// Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
@@ -429,10 +432,13 @@ class DexFile {
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
bool verify,
+ bool verify_checksum,
std::string* error_msg);
// Open all classesXXX.dex files from a zip archive.
- static bool OpenFromZip(const ZipArchive& zip_archive, const std::string& location,
+ static bool OpenFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify_checksum,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
@@ -522,25 +528,12 @@ class DexFile {
// as the string length of the string data.
const char* GetStringDataAndUtf16Length(const StringId& string_id, uint32_t* utf16_length) const;
- const char* GetStringData(const StringId& string_id) const {
- uint32_t ignored;
- return GetStringDataAndUtf16Length(string_id, &ignored);
- }
+ const char* GetStringData(const StringId& string_id) const;
// Index version of GetStringDataAndUtf16Length.
- const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const {
- if (idx == kDexNoIndex) {
- *utf16_length = 0;
- return nullptr;
- }
- const StringId& string_id = GetStringId(idx);
- return GetStringDataAndUtf16Length(string_id, utf16_length);
- }
+ const char* StringDataAndUtf16LengthByIdx(uint32_t idx, uint32_t* utf16_length) const;
- const char* StringDataByIdx(uint32_t idx) const {
- uint32_t unicode_length;
- return StringDataAndUtf16LengthByIdx(idx, &unicode_length);
- }
+ const char* StringDataByIdx(uint32_t idx) const;
// Looks up a string id for a given modified utf8 string.
const StringId* FindStringId(const char* string) const;
@@ -571,20 +564,12 @@ class DexFile {
}
// Get the descriptor string associated with a given type index.
- const char* StringByTypeIdx(uint32_t idx, uint32_t* unicode_length) const {
- const TypeId& type_id = GetTypeId(idx);
- return StringDataAndUtf16LengthByIdx(type_id.descriptor_idx_, unicode_length);
- }
+ const char* StringByTypeIdx(uint32_t idx, uint32_t* unicode_length) const;
- const char* StringByTypeIdx(uint32_t idx) const {
- const TypeId& type_id = GetTypeId(idx);
- return StringDataByIdx(type_id.descriptor_idx_);
- }
+ const char* StringByTypeIdx(uint32_t idx) const;
// Returns the type descriptor string of a type id.
- const char* GetTypeDescriptor(const TypeId& type_id) const {
- return StringDataByIdx(type_id.descriptor_idx_);
- }
+ const char* GetTypeDescriptor(const TypeId& type_id) const;
// Looks up a type for the given string index
const TypeId* FindTypeId(uint32_t string_idx) const;
@@ -619,15 +604,10 @@ class DexFile {
}
// Returns the class descriptor string of a field id.
- const char* GetFieldTypeDescriptor(const FieldId& field_id) const {
- const DexFile::TypeId& type_id = GetTypeId(field_id.type_idx_);
- return GetTypeDescriptor(type_id);
- }
+ const char* GetFieldTypeDescriptor(const FieldId& field_id) const;
// Returns the name of a field id.
- const char* GetFieldName(const FieldId& field_id) const {
- return StringDataByIdx(field_id.name_idx_);
- }
+ const char* GetFieldName(const FieldId& field_id) const;
// Returns the number of method identifiers in the .dex file.
size_t NumMethodIds() const {
@@ -653,10 +633,7 @@ class DexFile {
const DexFile::ProtoId& signature) const;
// Returns the declaring class descriptor string of a method id.
- const char* GetMethodDeclaringClassDescriptor(const MethodId& method_id) const {
- const DexFile::TypeId& type_id = GetTypeId(method_id.class_idx_);
- return GetTypeDescriptor(type_id);
- }
+ const char* GetMethodDeclaringClassDescriptor(const MethodId& method_id) const;
// Returns the prototype of a method id.
const ProtoId& GetMethodPrototype(const MethodId& method_id) const {
@@ -667,23 +644,15 @@ class DexFile {
const Signature GetMethodSignature(const MethodId& method_id) const;
// Returns the name of a method id.
- const char* GetMethodName(const MethodId& method_id) const {
- return StringDataByIdx(method_id.name_idx_);
- }
+ const char* GetMethodName(const MethodId& method_id) const;
// Returns the shorty of a method by its index.
- const char* GetMethodShorty(uint32_t idx) const {
- return StringDataByIdx(GetProtoId(GetMethodId(idx).proto_idx_).shorty_idx_);
- }
+ const char* GetMethodShorty(uint32_t idx) const;
// Returns the shorty of a method id.
- const char* GetMethodShorty(const MethodId& method_id) const {
- return StringDataByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_);
- }
- const char* GetMethodShorty(const MethodId& method_id, uint32_t* length) const {
- // Using the UTF16 length is safe here as shorties are guaranteed to be ASCII characters.
- return StringDataAndUtf16LengthByIdx(GetProtoId(method_id.proto_idx_).shorty_idx_, length);
- }
+ const char* GetMethodShorty(const MethodId& method_id) const;
+ const char* GetMethodShorty(const MethodId& method_id, uint32_t* length) const;
+
// Returns the number of class definitions in the .dex file.
uint32_t NumClassDefs() const {
DCHECK(header_ != nullptr) << GetLocation();
@@ -703,9 +672,7 @@ class DexFile {
}
// Returns the class descriptor string of a class definition.
- const char* GetClassDescriptor(const ClassDef& class_def) const {
- return StringByTypeIdx(class_def.class_idx_);
- }
+ const char* GetClassDescriptor(const ClassDef& class_def) const;
// Looks up a class definition by its class descriptor. Hash must be
// ComputeModifiedUtf8Hash(descriptor).
@@ -743,9 +710,7 @@ class DexFile {
}
}
- const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const {
- return StringByTypeIdx(proto_id.return_type_idx_);
- }
+ const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
// Returns the number of prototype identifiers in the .dex file.
size_t NumProtoIds() const {
@@ -782,10 +747,7 @@ class DexFile {
const Signature CreateSignature(const StringPiece& signature) const;
// Returns the short form method descriptor for the given prototype.
- const char* GetShorty(uint32_t proto_idx) const {
- const ProtoId& proto_id = GetProtoId(proto_idx);
- return StringDataByIdx(proto_id.shorty_idx_);
- }
+ const char* GetShorty(uint32_t proto_idx) const;
const TypeList* GetProtoParameters(const ProtoId& proto_id) const {
if (proto_id.parameters_off_ == 0) {
@@ -1177,11 +1139,17 @@ class DexFile {
private:
// Opens a .dex file
- static std::unique_ptr<const DexFile> OpenFile(int fd, const char* location,
- bool verify, std::string* error_msg);
+ static std::unique_ptr<const DexFile> OpenFile(int fd,
+ const char* location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
// Opens dex files from within a .jar, .zip, or .apk file
- static bool OpenZip(int fd, const std::string& location, std::string* error_msg,
+ static bool OpenZip(int fd,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
enum class ZipOpenErrorCode { // private
@@ -1195,8 +1163,11 @@ class DexFile {
// Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
// return.
- static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive, const char* entry_name,
- const std::string& location, std::string* error_msg,
+ static std::unique_ptr<const DexFile> Open(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify_checksum,
+ std::string* error_msg,
ZipOpenErrorCode* error_code);
// Opens a .dex file at the given address backed by a MemMap
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 796701d86b..4f8e6f1fc0 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -154,9 +154,10 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
+ static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFile::Open(location, location, &error_msg, &tmp);
+ bool success = DexFile::Open(location, location, kVerifyChecksum, &error_msg, &tmp);
CHECK(success) << error_msg;
EXPECT_EQ(1U, tmp.size());
std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 1d243490ab..5132efc03c 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -128,9 +128,14 @@ const DexFile::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const
error_stmt; \
}
-bool DexFileVerifier::Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
- const char* location, std::string* error_msg) {
- std::unique_ptr<DexFileVerifier> verifier(new DexFileVerifier(dex_file, begin, size, location));
+bool DexFileVerifier::Verify(const DexFile* dex_file,
+ const uint8_t* begin,
+ size_t size,
+ const char* location,
+ bool verify_checksum,
+ std::string* error_msg) {
+ std::unique_ptr<DexFileVerifier> verifier(
+ new DexFileVerifier(dex_file, begin, size, location, verify_checksum));
if (!verifier->Verify()) {
*error_msg = verifier->FailureReason();
return false;
@@ -273,8 +278,13 @@ bool DexFileVerifier::CheckHeader() {
const uint8_t* non_sum_ptr = reinterpret_cast<const uint8_t*>(header_) + non_sum;
adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
if (adler_checksum != header_->checksum_) {
- ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
- return false;
+ if (verify_checksum_) {
+ ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
+ return false;
+ } else {
+ LOG(WARNING) << StringPrintf(
+ "Ignoring bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
+ }
}
// Check the contents of the header.
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index 90409db44b..133e4326bc 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -26,17 +26,31 @@ namespace art {
class DexFileVerifier {
public:
- static bool Verify(const DexFile* dex_file, const uint8_t* begin, size_t size,
- const char* location, std::string* error_msg);
+ static bool Verify(const DexFile* dex_file,
+ const uint8_t* begin,
+ size_t size,
+ const char* location,
+ bool verify_checksum,
+ std::string* error_msg);
const std::string& FailureReason() const {
return failure_reason_;
}
private:
- DexFileVerifier(const DexFile* dex_file, const uint8_t* begin, size_t size, const char* location)
- : dex_file_(dex_file), begin_(begin), size_(size), location_(location),
- header_(&dex_file->GetHeader()), ptr_(nullptr), previous_item_(nullptr) {
+ DexFileVerifier(const DexFile* dex_file,
+ const uint8_t* begin,
+ size_t size,
+ const char* location,
+ bool verify_checksum)
+ : dex_file_(dex_file),
+ begin_(begin),
+ size_(size),
+ location_(location),
+ verify_checksum_(verify_checksum),
+ header_(&dex_file->GetHeader()),
+ ptr_(nullptr),
+ previous_item_(nullptr) {
}
bool Verify();
@@ -176,6 +190,7 @@ class DexFileVerifier {
const uint8_t* const begin_;
const size_t size_;
const char* const location_;
+ const bool verify_checksum_;
const DexFile::Header* const header_;
struct OffsetTypeMapEmptyFn {
diff --git a/runtime/dex_file_verifier_test.cc b/runtime/dex_file_verifier_test.cc
index 4e53914374..71c0ad9295 100644
--- a/runtime/dex_file_verifier_test.cc
+++ b/runtime/dex_file_verifier_test.cc
@@ -122,6 +122,10 @@ static void FixUpChecksum(uint8_t* dex_file) {
class DexFileVerifierTest : public CommonRuntimeTest {
protected:
+ DexFile* GetDexFile(const uint8_t* dex_bytes, size_t length) {
+ return new DexFile(dex_bytes, length, "tmp", 0, nullptr, nullptr);
+ }
+
void VerifyModification(const char* dex_file_base64_content,
const char* location,
std::function<void(DexFile*)> f,
@@ -130,16 +134,17 @@ class DexFileVerifierTest : public CommonRuntimeTest {
std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(dex_file_base64_content, &length);
CHECK(dex_bytes != nullptr);
// Note: `dex_file` will be destroyed before `dex_bytes`.
- std::unique_ptr<DexFile> dex_file(
- new DexFile(dex_bytes.get(), length, "tmp", 0, nullptr, nullptr));
+ std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
f(dex_file.get());
FixUpChecksum(const_cast<uint8_t*>(dex_file->Begin()));
+ static constexpr bool kVerifyChecksum = true;
std::string error_msg;
bool success = DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
location,
+ kVerifyChecksum,
&error_msg);
if (expected_error == nullptr) {
EXPECT_TRUE(success) << error_msg;
@@ -175,7 +180,7 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFile::Open(location, location, error_msg, &tmp);
+ bool success = DexFile::Open(location, location, true, error_msg, &tmp);
CHECK(success) << error_msg;
EXPECT_EQ(1U, tmp.size());
std::unique_ptr<const DexFile> dex_file = std::move(tmp[0]);
@@ -1697,4 +1702,45 @@ TEST_F(DexFileVerifierTest, CircularInterfaceImplementation) {
" implemented interface with type idx: '0'");
}
+TEST_F(DexFileVerifierTest, Checksum) {
+ size_t length;
+ std::unique_ptr<uint8_t[]> dex_bytes = DecodeBase64(kGoodTestDex, &length);
+ CHECK(dex_bytes != nullptr);
+ // Note: `dex_file` will be destroyed before `dex_bytes`.
+ std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
+ std::string error_msg;
+
+ // Good checksum: all pass.
+ EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ "good checksum, no verify",
+ /*verify_checksum*/ false,
+ &error_msg));
+ EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ "good checksum, verify",
+ /*verify_checksum*/ true,
+ &error_msg));
+
+ // Bad checksum: !verify_checksum passes verify_checksum fails.
+ DexFile::Header* header = reinterpret_cast<DexFile::Header*>(
+ const_cast<uint8_t*>(dex_file->Begin()));
+ header->checksum_ = 0;
+ EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ "bad checksum, no verify",
+ /*verify_checksum*/ false,
+ &error_msg));
+ EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ "bad checksum, verify",
+ /*verify_checksum*/ true,
+ &error_msg));
+ EXPECT_NE(error_msg.find("Bad checksum"), std::string::npos) << error_msg;
+}
+
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index f3a0d2f3ef..771e14396e 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -121,5 +121,6 @@ extern "C" void art_quick_throw_div_zero();
extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
extern "C" void art_quick_throw_null_pointer_exception();
extern "C" void art_quick_throw_stack_overflow(void*);
+extern "C" void art_quick_throw_string_bounds(int32_t index, int32_t limit);
#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 5dafa8b599..f98de95fcb 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -114,6 +114,7 @@ void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints)
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+ qpoints->pThrowStringBounds = art_quick_throw_string_bounds;
// Deoptimize
qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 79d1c1377b..30b639eaec 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -140,6 +140,7 @@
V(ThrowNoSuchMethod, void, int32_t) \
V(ThrowNullPointer, void, void) \
V(ThrowStackOverflow, void, void*) \
+ V(ThrowStringBounds, void, int32_t, int32_t) \
V(Deoptimize, void, void) \
\
V(A64Load, int64_t, volatile const int64_t *) \
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 5256feae2b..2778e32ece 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -29,7 +29,7 @@ extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
self->QuickDeliverException();
}
-// Called by generated call to throw an exception.
+// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
/*
@@ -48,7 +48,7 @@ extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* excepti
self->QuickDeliverException();
}
-// Called by generated call to throw a NPE exception.
+// Called by generated code to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -58,7 +58,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
self->QuickDeliverException();
}
-// Called by generated call to throw an arithmetic divide by zero exception.
+// Called by generated code to throw an arithmetic divide by zero exception.
extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -66,7 +66,7 @@ extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
self->QuickDeliverException();
}
-// Called by generated call to throw an array index out of bounds exception.
+// Called by generated code to throw an array index out of bounds exception.
extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
@@ -74,6 +74,14 @@ extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thr
self->QuickDeliverException();
}
+// Called by generated code to throw a string index out of bounds exception.
+extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Thread* self)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ ScopedQuickEntrypointChecks sqec(self);
+ ThrowStringIndexOutOfBoundsException(index, length);
+ self->QuickDeliverException();
+}
+
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index c621672ae7..7a624b211c 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -112,17 +112,19 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, name, pthread_self, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, pthread_self, last_no_thread_suspension_cause,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_functions,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, last_no_thread_suspension_cause, checkpoint_function,
sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_functions, jni_entrypoints,
- sizeof(void*) * 6);
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, checkpoint_function, active_suspend_barriers,
+ sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, active_suspend_barriers, jni_entrypoints,
+ sizeof(Thread::tls_ptr_sized_values::active_suspend_barriers));
// Skip across the entrypoints structures.
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, thread_local_start, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, mterp_current_ibase, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_alt_ibase, rosalloc_runs, sizeof(void*));
@@ -285,7 +287,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pDeoptimize, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pThrowStringBounds, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStringBounds, pDeoptimize, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pDeoptimize, pA64Load, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, sizeof(void*));
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 3f8f6284c0..dd750060b8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -192,7 +192,7 @@ void ConcurrentCopying::InitializePhase() {
}
// Used to switch the thread roots of a thread from from-space refs to to-space refs.
-class ThreadFlipVisitor : public Closure {
+class ConcurrentCopying::ThreadFlipVisitor : public Closure {
public:
ThreadFlipVisitor(ConcurrentCopying* concurrent_copying, bool use_tlab)
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
@@ -229,7 +229,7 @@ class ThreadFlipVisitor : public Closure {
};
// Called back from Runtime::FlipThreadRoots() during a pause.
-class FlipCallback : public Closure {
+class ConcurrentCopying::FlipCallback : public Closure {
public:
explicit FlipCallback(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {
@@ -304,10 +304,9 @@ void ConcurrentCopying::RecordLiveStackFreezeSize(Thread* self) {
}
// Used to visit objects in the immune spaces.
-class ConcurrentCopyingImmuneSpaceObjVisitor {
+class ConcurrentCopying::ImmuneSpaceObjVisitor {
public:
- explicit ConcurrentCopyingImmuneSpaceObjVisitor(ConcurrentCopying* cc)
- : collector_(cc) {}
+ explicit ImmuneSpaceObjVisitor(ConcurrentCopying* cc) : collector_(cc) {}
void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_)
SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
@@ -388,7 +387,7 @@ void ConcurrentCopying::MarkingPhase() {
for (auto& space : immune_spaces_.GetSpaces()) {
DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- ConcurrentCopyingImmuneSpaceObjVisitor visitor(this);
+ ImmuneSpaceObjVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->Limit()),
visitor);
@@ -487,7 +486,7 @@ void ConcurrentCopying::ReenableWeakRefAccess(Thread* self) {
Runtime::Current()->BroadcastForNewSystemWeaks();
}
-class DisableMarkingCheckpoint : public Closure {
+class ConcurrentCopying::DisableMarkingCheckpoint : public Closure {
public:
explicit DisableMarkingCheckpoint(ConcurrentCopying* concurrent_copying)
: concurrent_copying_(concurrent_copying) {
@@ -683,9 +682,9 @@ accounting::ObjectStack* ConcurrentCopying::GetLiveStack() {
// The following visitors are used to verify that there's no references to the from-space left after
// marking.
-class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* ref) const
@@ -712,16 +711,16 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor : public SingleRootVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsFieldVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
@@ -739,7 +738,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor visitor(collector_);
+ VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -747,9 +746,9 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
+class ConcurrentCopying::VerifyNoFromSpaceRefsObjectVisitor {
public:
- explicit ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
+ explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -761,7 +760,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
- ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor visitor(collector);
+ VerifyNoFromSpaceRefsFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
if (kUseBakerReadBarrier) {
CHECK(obj->GetReadBarrierPointer() == ReadBarrier::WhitePtr())
@@ -785,16 +784,15 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
CHECK(!thread->GetIsGcMarking());
}
}
- ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor visitor(this);
+ VerifyNoFromSpaceRefsObjectVisitor visitor(this);
// Roots.
{
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
+ VerifyNoFromSpaceRefsVisitor ref_visitor(this);
Runtime::Current()->VisitRoots(&ref_visitor);
}
// The to-space.
- region_space_->WalkToSpace(ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor::ObjectCallback,
- this);
+ region_space_->WalkToSpace(VerifyNoFromSpaceRefsObjectVisitor::ObjectCallback, this);
// Non-moving spaces.
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -802,7 +800,7 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
}
// The alloc stack.
{
- ConcurrentCopyingVerifyNoFromSpaceRefsVisitor ref_visitor(this);
+ VerifyNoFromSpaceRefsVisitor ref_visitor(this);
for (auto* it = heap_->allocation_stack_->Begin(), *end = heap_->allocation_stack_->End();
it < end; ++it) {
mirror::Object* const obj = it->AsMirrorPtr();
@@ -817,9 +815,9 @@ void ConcurrentCopying::VerifyNoFromSpaceReferences() {
}
// The following visitors are used to assert the to-space invariant.
-class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantRefsVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantRefsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* ref) const
@@ -835,16 +833,16 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantFieldVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantFieldVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
@@ -861,7 +859,7 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector_);
+ AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -869,9 +867,9 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
ConcurrentCopying* const collector_;
};
-class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
+class ConcurrentCopying::AssertToSpaceInvariantObjectVisitor {
public:
- explicit ConcurrentCopyingAssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
+ explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -884,7 +882,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
space::RegionSpace* region_space = collector->RegionSpace();
CHECK(!region_space->IsInFromSpace(obj)) << "Scanning object " << obj << " in from space";
collector->AssertToSpaceInvariant(nullptr, MemberOffset(0), obj);
- ConcurrentCopyingAssertToSpaceInvariantFieldVisitor visitor(collector);
+ AssertToSpaceInvariantFieldVisitor visitor(collector);
obj->VisitReferences(visitor, visitor);
}
@@ -892,7 +890,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
ConcurrentCopying* const collector_;
};
-class RevokeThreadLocalMarkStackCheckpoint : public Closure {
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
public:
RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
bool disable_weak_ref_access)
@@ -1112,7 +1110,7 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
region_space_->AddLiveBytes(to_ref, alloc_size);
}
if (ReadBarrier::kEnableToSpaceInvariantChecks || kIsDebugBuild) {
- ConcurrentCopyingAssertToSpaceInvariantObjectVisitor visitor(this);
+ AssertToSpaceInvariantObjectVisitor visitor(this);
visitor(to_ref);
}
}
@@ -1484,9 +1482,9 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o
}
// Used to scan ref fields of an object.
-class ConcurrentCopyingRefFieldsVisitor {
+class ConcurrentCopying::RefFieldsVisitor {
public:
- explicit ConcurrentCopyingRefFieldsVisitor(ConcurrentCopying* collector)
+ explicit RefFieldsVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
@@ -1522,7 +1520,7 @@ class ConcurrentCopyingRefFieldsVisitor {
// Scan ref fields of an object.
inline void ConcurrentCopying::Scan(mirror::Object* to_ref) {
DCHECK(!region_space_->IsInFromSpace(to_ref));
- ConcurrentCopyingRefFieldsVisitor visitor(this);
+ RefFieldsVisitor visitor(this);
// Disable the read barrier for a performance reason.
to_ref->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index afdc0f1f98..a986a7a1db 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -243,16 +243,21 @@ class ConcurrentCopying : public GarbageCollector {
accounting::ReadBarrierTable* rb_table_;
bool force_evacuate_all_; // True if all regions are evacuated.
- friend class ConcurrentCopyingRefFieldsVisitor;
- friend class ConcurrentCopyingImmuneSpaceObjVisitor;
- friend class ConcurrentCopyingVerifyNoFromSpaceRefsVisitor;
- friend class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor;
- friend class ConcurrentCopyingClearBlackPtrsVisitor;
- friend class ConcurrentCopyingLostCopyVisitor;
- friend class ThreadFlipVisitor;
- friend class FlipCallback;
- friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
- friend class RevokeThreadLocalMarkStackCheckpoint;
+ class AssertToSpaceInvariantFieldVisitor;
+ class AssertToSpaceInvariantObjectVisitor;
+ class AssertToSpaceInvariantRefsVisitor;
+ class ClearBlackPtrsVisitor;
+ class ComputeUnevacFromSpaceLiveRatioVisitor;
+ class DisableMarkingCheckpoint;
+ class FlipCallback;
+ class ImmuneSpaceObjVisitor;
+ class LostCopyVisitor;
+ class RefFieldsVisitor;
+ class RevokeThreadLocalMarkStackCheckpoint;
+ class VerifyNoFromSpaceRefsFieldVisitor;
+ class VerifyNoFromSpaceRefsObjectVisitor;
+ class VerifyNoFromSpaceRefsVisitor;
+ class ThreadFlipVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 6beb60608c..43482eb7cc 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -52,8 +52,9 @@ void MarkCompact::BindBitmaps() {
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
- space_(nullptr), collector_name_(name_), updating_references_(false) {
-}
+ space_(nullptr),
+ collector_name_(name_),
+ updating_references_(false) {}
void MarkCompact::RunPhases() {
Thread* self = Thread::Current();
@@ -85,30 +86,20 @@ void MarkCompact::ForwardObject(mirror::Object* obj) {
++live_objects_in_space_;
}
-class CalculateObjectForwardingAddressVisitor {
- public:
- explicit CalculateObjectForwardingAddressVisitor(MarkCompact* collector)
- : collector_(collector) {}
- void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_,
- Locks::heap_bitmap_lock_) {
- DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
- DCHECK(collector_->IsMarked(obj) != nullptr);
- collector_->ForwardObject(obj);
- }
-
- private:
- MarkCompact* const collector_;
-};
void MarkCompact::CalculateObjectForwardingAddresses() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// The bump pointer in the space where the next forwarding address will be.
bump_pointer_ = reinterpret_cast<uint8_t*>(space_->Begin());
// Visit all the marked objects in the bitmap.
- CalculateObjectForwardingAddressVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
+ DCHECK(IsMarked(obj) != nullptr);
+ ForwardObject(obj);
+ });
}
void MarkCompact::InitializePhase() {
@@ -129,17 +120,6 @@ void MarkCompact::ProcessReferences(Thread* self) {
false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
-class BitmapSetSlowPathVisitor {
- public:
- void operator()(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
- // Marking a large object, make sure its aligned as a sanity check.
- if (!IsAligned<kPageSize>(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
- LOG(FATAL) << obj;
- }
- }
-};
-
inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
if (obj == nullptr) {
return nullptr;
@@ -155,8 +135,15 @@ inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
}
} else {
DCHECK(!space_->HasAddress(obj));
- BitmapSetSlowPathVisitor visitor;
- if (!mark_bitmap_->Set(obj, visitor)) {
+ auto slow_path = [this](const mirror::Object* ref)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ // Marking a large object, make sure its aligned as a sanity check.
+ if (!IsAligned<kPageSize>(ref)) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ LOG(FATAL) << ref;
+ }
+ };
+ if (!mark_bitmap_->Set(obj, slow_path)) {
// This object was not previously marked.
MarkStackPush(obj);
}
@@ -296,10 +283,9 @@ void MarkCompact::VisitRoots(
}
}
-class UpdateRootVisitor : public RootVisitor {
+class MarkCompact::UpdateRootVisitor : public RootVisitor {
public:
- explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateRootVisitor(MarkCompact* collector) : collector_(collector) {}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
@@ -332,10 +318,10 @@ class UpdateRootVisitor : public RootVisitor {
MarkCompact* const collector_;
};
-class UpdateObjectReferencesVisitor {
+class MarkCompact::UpdateObjectReferencesVisitor {
public:
- explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {}
+
void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->UpdateObjectReferences(obj);
@@ -423,10 +409,9 @@ inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Objec
}
}
-class UpdateReferenceVisitor {
+class MarkCompact::UpdateReferenceVisitor {
public:
- explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
ALWAYS_INLINE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -501,19 +486,6 @@ bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
return space != space_ && !immune_spaces_.ContainsSpace(space);
}
-class MoveObjectVisitor {
- public:
- explicit MoveObjectVisitor(MarkCompact* collector) : collector_(collector) {
- }
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
- REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
- collector_->MoveObject(obj, obj->SizeOf());
- }
-
- private:
- MarkCompact* const collector_;
-};
-
void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
// Look at the forwarding address stored in the lock word to know where to copy.
DCHECK(space_->HasAddress(obj)) << obj;
@@ -534,10 +506,13 @@ void MarkCompact::MoveObject(mirror::Object* obj, size_t len) {
void MarkCompact::MoveObjects() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Move the objects in the before forwarding bitmap.
- MoveObjectVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
- visitor);
+ [this](mirror::Object* obj)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ MoveObject(obj, obj->SizeOf());
+ });
CHECK(lock_words_to_restore_.empty());
}
@@ -572,10 +547,9 @@ void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
-class MarkCompactMarkObjectVisitor {
+class MarkCompact::MarkObjectVisitor {
public:
- explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
- }
+ explicit MarkObjectVisitor(MarkCompact* collector) : collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -608,7 +582,7 @@ class MarkCompactMarkObjectVisitor {
// Visit all of the references of an object and update.
void MarkCompact::ScanObject(mirror::Object* obj) {
- MarkCompactMarkObjectVisitor visitor(this);
+ MarkObjectVisitor visitor(this);
obj->VisitReferences(visitor, visitor);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 48311570b5..16abfb73b8 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -222,13 +222,10 @@ class MarkCompact : public GarbageCollector {
bool updating_references_;
private:
- friend class BitmapSetSlowPathVisitor;
- friend class CalculateObjectForwardingAddressVisitor;
- friend class MarkCompactMarkObjectVisitor;
- friend class MoveObjectVisitor;
- friend class UpdateObjectReferencesVisitor;
- friend class UpdateReferenceVisitor;
- friend class UpdateRootVisitor;
+ class MarkObjectVisitor;
+ class UpdateObjectReferencesVisitor;
+ class UpdateReferenceVisitor;
+ class UpdateRootVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
};
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 894ceba216..9f54f1cdd4 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -266,7 +266,7 @@ void MarkSweep::MarkingPhase() {
PreCleanCards();
}
-class ScanObjectVisitor {
+class MarkSweep::ScanObjectVisitor {
public:
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
@@ -393,12 +393,14 @@ bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref
return IsMarked(ref->AsMirrorPtr());
}
-class MarkSweepMarkObjectSlowPath {
+class MarkSweep::MarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep,
- mirror::Object* holder = nullptr,
- MemberOffset offset = MemberOffset(0))
- : mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {}
+ explicit MarkObjectSlowPath(MarkSweep* mark_sweep,
+ mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
+ : mark_sweep_(mark_sweep),
+ holder_(holder),
+ offset_(offset) {}
void operator()(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
@@ -444,27 +446,8 @@ class MarkSweepMarkObjectSlowPath {
}
PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
- {
- LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
- Thread* self = Thread::Current();
- if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
- mark_sweep_->VerifyRoots();
- } else {
- const bool heap_bitmap_exclusive_locked =
- Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
- if (heap_bitmap_exclusive_locked) {
- Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
- }
- {
- ScopedThreadSuspension(self, kSuspended);
- ScopedSuspendAll ssa(__FUNCTION__);
- mark_sweep_->VerifyRoots();
- }
- if (heap_bitmap_exclusive_locked) {
- Locks::heap_bitmap_lock_->ExclusiveLock(self);
- }
- }
- }
+ LOG(INTERNAL_FATAL) << "Attempting see if it's a bad thread root\n";
+ mark_sweep_->VerifySuspendedThreadRoots();
LOG(FATAL) << "Can't mark invalid object";
}
}
@@ -499,7 +482,7 @@ inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj,
if (kCountMarkedObjects) {
++mark_slowpath_count_;
}
- MarkSweepMarkObjectSlowPath visitor(this, holder, offset);
+ MarkObjectSlowPath visitor(this, holder, offset);
// TODO: We already know that the object is not in the current_space_bitmap_ but MarkBitmap::Set
// will check again.
if (!mark_bitmap_->Set(obj, visitor)) {
@@ -534,7 +517,7 @@ inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
if (LIKELY(object_bitmap->HasAddress(obj))) {
return !object_bitmap->AtomicTestAndSet(obj);
}
- MarkSweepMarkObjectSlowPath visitor(this);
+ MarkObjectSlowPath visitor(this);
return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
@@ -553,7 +536,7 @@ inline void MarkSweep::MarkObject(mirror::Object* obj,
}
}
-class VerifyRootMarkedVisitor : public SingleRootVisitor {
+class MarkSweep::VerifyRootMarkedVisitor : public SingleRootVisitor {
public:
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
@@ -582,7 +565,7 @@ void MarkSweep::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
}
}
-class VerifyRootVisitor : public SingleRootVisitor {
+class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -591,15 +574,15 @@ class VerifyRootVisitor : public SingleRootVisitor {
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
space::LargeObjectSpace* large_object_space = heap->GetLargeObjectsSpace();
if (large_object_space != nullptr && !large_object_space->Contains(root)) {
- LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info;
+ LOG(INTERNAL_FATAL) << "Found invalid root: " << root << " " << info << "\n";
}
}
}
};
-void MarkSweep::VerifyRoots() {
+void MarkSweep::VerifySuspendedThreadRoots() {
VerifyRootVisitor visitor;
- Runtime::Current()->GetThreadList()->VisitRoots(&visitor);
+ Runtime::Current()->GetThreadList()->VisitRootsForSuspendedThreads(&visitor);
}
void MarkSweep::MarkRoots(Thread* self) {
@@ -629,7 +612,7 @@ void MarkSweep::MarkConcurrentRoots(VisitRootFlags flags) {
this, static_cast<VisitRootFlags>(flags | kVisitRootFlagNonMoving));
}
-class DelayReferenceReferentVisitor {
+class MarkSweep::DelayReferenceReferentVisitor {
public:
explicit DelayReferenceReferentVisitor(MarkSweep* collector) : collector_(collector) {}
@@ -644,7 +627,7 @@ class DelayReferenceReferentVisitor {
};
template <bool kUseFinger = false>
-class MarkStackTask : public Task {
+class MarkSweep::MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -802,7 +785,7 @@ class MarkStackTask : public Task {
}
};
-class CardScanTask : public MarkStackTask<false> {
+class MarkSweep::CardScanTask : public MarkStackTask<false> {
public:
CardScanTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -967,7 +950,7 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
}
}
-class RecursiveMarkTask : public MarkStackTask<false> {
+class MarkSweep::RecursiveMarkTask : public MarkStackTask<false> {
public:
RecursiveMarkTask(ThreadPool* thread_pool,
MarkSweep* mark_sweep,
@@ -1080,7 +1063,7 @@ void MarkSweep::SweepSystemWeaks(Thread* self) {
Runtime::Current()->SweepSystemWeaks(this);
}
-class VerifySystemWeakVisitor : public IsMarkedVisitor {
+class MarkSweep::VerifySystemWeakVisitor : public IsMarkedVisitor {
public:
explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
@@ -1109,7 +1092,7 @@ void MarkSweep::VerifySystemWeaks() {
Runtime::Current()->SweepSystemWeaks(&visitor);
}
-class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
+class MarkSweep::CheckpointMarkThreadRoots : public Closure, public RootVisitor {
public:
CheckpointMarkThreadRoots(MarkSweep* mark_sweep,
bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index c19107a626..9747031152 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -250,8 +250,8 @@ class MarkSweep : public GarbageCollector {
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
- void VerifyRoots()
- NO_THREAD_SAFETY_ANALYSIS;
+ void VerifySuspendedThreadRoots()
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
void ExpandMarkStack()
@@ -353,17 +353,17 @@ class MarkSweep : public GarbageCollector {
std::unique_ptr<MemMap> sweep_array_free_buffer_mem_map_;
private:
- friend class CardScanTask;
- friend class CheckBitmapVisitor;
- friend class CheckReferenceVisitor;
- friend class CheckpointMarkThreadRoots;
- friend class Heap;
- friend class FifoMarkStackChunk;
- friend class MarkObjectVisitor;
- template<bool kUseFinger> friend class MarkStackTask;
- friend class MarkSweepMarkObjectSlowPath;
- friend class VerifyRootMarkedVisitor;
- friend class VerifyRootVisitor;
+ class CardScanTask;
+ class CheckpointMarkThreadRoots;
+ class DelayReferenceReferentVisitor;
+ template<bool kUseFinger> class MarkStackTask;
+ class MarkObjectSlowPath;
+ class RecursiveMarkTask;
+ class ScanObjectParallelVisitor;
+ class ScanObjectVisitor;
+ class VerifyRootMarkedVisitor;
+ class VerifyRootVisitor;
+ class VerifySystemWeakVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
};
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index e87b5ff332..78fb2d24ae 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -26,21 +26,6 @@ namespace art {
namespace gc {
namespace collector {
-class BitmapSetSlowPathVisitor {
- public:
- explicit BitmapSetSlowPathVisitor(SemiSpace* semi_space) : semi_space_(semi_space) {
- }
-
- void operator()(const mirror::Object* obj) const {
- CHECK(!semi_space_->to_space_->HasAddress(obj)) << "Marking " << obj << " in to_space_";
- // Marking a large object, make sure its aligned as a sanity check.
- CHECK_ALIGNED(obj, kPageSize);
- }
-
- private:
- SemiSpace* const semi_space_;
-};
-
inline mirror::Object* SemiSpace::GetForwardingAddressInFromSpace(mirror::Object* obj) const {
DCHECK(from_space_->HasAddress(obj));
LockWord lock_word = obj->GetLockWord(false);
@@ -76,8 +61,12 @@ inline void SemiSpace::MarkObject(
obj_ptr->Assign(forward_address);
} else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) {
DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space";
- BitmapSetSlowPathVisitor visitor(this);
- if (!mark_bitmap_->Set(obj, visitor)) {
+ auto slow_path = [this](const mirror::Object* ref) {
+ CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_";
+ // Marking a large object, make sure its aligned as a sanity check.
+ CHECK_ALIGNED(ref, kPageSize);
+ };
+ if (!mark_bitmap_->Set(obj, slow_path)) {
// This object was not previously marked.
MarkStackPush(obj);
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index f37daa54e9..7a4c025c30 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -282,22 +282,11 @@ void SemiSpace::MarkingPhase() {
}
}
-class SemiSpaceScanObjectVisitor {
- public:
- explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- DCHECK(obj != nullptr);
- semi_space_->ScanObject(obj);
- }
- private:
- SemiSpace* const semi_space_;
-};
-
// Used to verify that there's no references to the from-space.
-class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
+class SemiSpace::VerifyNoFromSpaceReferencesVisitor {
public:
- explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) :
- from_space_(from_space) {}
+ explicit VerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space)
+ : from_space_(from_space) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
@@ -331,23 +320,10 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
- SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
+ VerifyNoFromSpaceReferencesVisitor visitor(from_space_);
obj->VisitReferences(visitor, VoidFunctor());
}
-class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
- public:
- explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
- void operator()(Object* obj) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- DCHECK(obj != nullptr);
- semi_space_->VerifyNoFromSpaceReferences(obj);
- }
-
- private:
- SemiSpace* const semi_space_;
-};
-
void SemiSpace::MarkReachableObjects() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
{
@@ -390,10 +366,12 @@ void SemiSpace::MarkReachableObjects() {
} else {
TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ScanObject(obj);
+ });
}
if (kIsDebugBuild) {
// Verify that there are no from-space references that
@@ -401,10 +379,13 @@ void SemiSpace::MarkReachableObjects() {
// card table) didn't miss any from-space references in the
// space.
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
- SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
- visitor);
+ [this](Object* obj)
+ SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ DCHECK(obj != nullptr);
+ VerifyNoFromSpaceReferences(obj);
+ });
}
}
}
@@ -424,10 +405,12 @@ void SemiSpace::MarkReachableObjects() {
// classes (primitive array classes) that could move though they
// don't contain any other references.
accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
- SemiSpaceScanObjectVisitor visitor(this);
large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(los->Begin()),
reinterpret_cast<uintptr_t>(los->End()),
- visitor);
+ [this](mirror::Object* obj)
+ REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ ScanObject(obj);
+ });
}
// Recursively process the mark stack.
ProcessMarkStack();
@@ -697,10 +680,9 @@ void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference*
heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
-class SemiSpaceMarkObjectVisitor {
+class SemiSpace::MarkObjectVisitor {
public:
- explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) {
- }
+ explicit MarkObjectVisitor(SemiSpace* collector) : collector_(collector) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -739,7 +721,7 @@ class SemiSpaceMarkObjectVisitor {
// Visit all of the references of an object and update.
void SemiSpace::ScanObject(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
- SemiSpaceMarkObjectVisitor visitor(this);
+ MarkObjectVisitor visitor(this);
obj->VisitReferences(visitor, visitor);
}
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 0199e1ae56..694e536b7d 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -272,7 +272,9 @@ class SemiSpace : public GarbageCollector {
bool swap_semi_spaces_;
private:
- friend class BitmapSetSlowPathVisitor;
+ class BitmapSetSlowPathVisitor;
+ class MarkObjectVisitor;
+ class VerifyNoFromSpaceReferencesVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(SemiSpace);
};
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 1ebe5cc47b..8cadc2e0fc 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1158,6 +1158,80 @@ static bool RelocateInPlace(ImageHeader& image_header,
return true;
}
+static MemMap* LoadImageFile(const char* image_filename,
+ const char* image_location,
+ const ImageHeader& image_header,
+ uint8_t* address,
+ int fd,
+ TimingLogger& logger,
+ std::string* error_msg) {
+ TimingLogger::ScopedTiming timing("MapImageFile", &logger);
+ const ImageHeader::StorageMode storage_mode = image_header.GetStorageMode();
+ if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+ return MemMap::MapFileAtAddress(address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ image_filename,
+ error_msg);
+ }
+
+ if (storage_mode != ImageHeader::kStorageModeLZ4 &&
+ storage_mode != ImageHeader::kStorageModeLZ4HC) {
+ *error_msg = StringPrintf("Invalid storage mode in image header %d",
+ static_cast<int>(storage_mode));
+ return nullptr;
+ }
+
+ // Reserve output and decompress into it.
+ std::unique_ptr<MemMap> map(MemMap::MapAnonymous(image_location,
+ address,
+ image_header.GetImageSize(),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/true,
+ /*reuse*/false,
+ error_msg));
+ if (map != nullptr) {
+ const size_t stored_size = image_header.GetDataSize();
+ const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
+ std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ fd,
+ /*offset*/0,
+ /*low_4gb*/false,
+ image_filename,
+ error_msg));
+ if (temp_map == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ memcpy(map->Begin(), &image_header, sizeof(ImageHeader));
+ const uint64_t start = NanoTime();
+ // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
+ TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
+ const size_t decompressed_size = LZ4_decompress_safe(
+ reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
+ reinterpret_cast<char*>(map->Begin()) + decompress_offset,
+ stored_size,
+ map->Size() - decompress_offset);
+ VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
+ if (decompressed_size + sizeof(ImageHeader) != image_header.GetImageSize()) {
+ *error_msg = StringPrintf(
+ "Decompressed size does not match expected image size %zu vs %zu",
+ decompressed_size + sizeof(ImageHeader),
+ image_header.GetImageSize());
+ return nullptr;
+ }
+ }
+
+ return map.release();
+}
+
ImageSpace* ImageSpace::Init(const char* image_filename,
const char* image_location,
bool validate_oat_file,
@@ -1235,91 +1309,30 @@ ImageSpace* ImageSpace::Init(const char* image_filename,
return nullptr;
}
- // The preferred address to map the image, null specifies any address. If we manage to map the
- // image at the image begin, the amount of fixup work required is minimized.
- std::vector<uint8_t*> addresses(1, image_header->GetImageBegin());
- if (image_header->IsPic()) {
- // Can also map at a random low_4gb address since we can relocate in-place.
- addresses.push_back(nullptr);
- }
-
- // Note: The image header is part of the image due to mmap page alignment required of offset.
std::unique_ptr<MemMap> map;
- std::string temp_error_msg;
- for (uint8_t* address : addresses) {
- TimingLogger::ScopedTiming timing("MapImageFile", &logger);
- // Only care about the error message for the last address in addresses. We want to avoid the
- // overhead of printing the process maps if we can relocate.
- std::string* out_error_msg = (address == addresses.back()) ? &temp_error_msg : nullptr;
- const ImageHeader::StorageMode storage_mode = image_header->GetStorageMode();
- if (storage_mode == ImageHeader::kStorageModeUncompressed) {
- map.reset(MemMap::MapFileAtAddress(address,
- image_header->GetImageSize(),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- file->Fd(),
- 0,
- /*low_4gb*/true,
- /*reuse*/false,
- image_filename,
- /*out*/out_error_msg));
- } else {
- if (storage_mode != ImageHeader::kStorageModeLZ4 &&
- storage_mode != ImageHeader::kStorageModeLZ4HC) {
- *error_msg = StringPrintf("Invalid storage mode in image header %d",
- static_cast<int>(storage_mode));
- return nullptr;
- }
- // Reserve output and decompress into it.
- map.reset(MemMap::MapAnonymous(image_location,
- address,
- image_header->GetImageSize(),
- PROT_READ | PROT_WRITE,
- /*low_4gb*/true,
- /*reuse*/false,
- /*out*/out_error_msg));
- if (map != nullptr) {
- const size_t stored_size = image_header->GetDataSize();
- const size_t decompress_offset = sizeof(ImageHeader); // Skip the header.
- std::unique_ptr<MemMap> temp_map(MemMap::MapFile(sizeof(ImageHeader) + stored_size,
- PROT_READ,
- MAP_PRIVATE,
- file->Fd(),
- /*offset*/0,
- /*low_4gb*/false,
- image_filename,
- out_error_msg));
- if (temp_map == nullptr) {
- DCHECK(!out_error_msg->empty());
- return nullptr;
- }
- memcpy(map->Begin(), image_header, sizeof(ImageHeader));
- const uint64_t start = NanoTime();
- // LZ4HC and LZ4 have same internal format, both use LZ4_decompress.
- TimingLogger::ScopedTiming timing2("LZ4 decompress image", &logger);
- const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(temp_map->Begin()) + sizeof(ImageHeader),
- reinterpret_cast<char*>(map->Begin()) + decompress_offset,
- stored_size,
- map->Size() - decompress_offset);
- VLOG(image) << "Decompressing image took " << PrettyDuration(NanoTime() - start);
- if (decompressed_size + sizeof(ImageHeader) != image_header->GetImageSize()) {
- *error_msg = StringPrintf(
- "Decompressed size does not match expected image size %zu vs %zu",
- decompressed_size + sizeof(ImageHeader),
- image_header->GetImageSize());
- return nullptr;
- }
- }
- }
- if (map != nullptr) {
- break;
- }
- }
-
+ // GetImageBegin is the preferred address to map the image. If we manage to map the
+ // image at the image begin, the amount of fixup work required is minimized.
+ map.reset(LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ image_header->GetImageBegin(),
+ file->Fd(),
+ logger,
+ error_msg));
+ // If the header specifies PIC mode, we can also map at a random low_4gb address since we can
+ // relocate in-place.
+ if (map == nullptr && image_header->IsPic()) {
+ map.reset(LoadImageFile(image_filename,
+ image_location,
+ *image_header,
+ /* address */ nullptr,
+ file->Fd(),
+ logger,
+ error_msg));
+ }
+ // Were we able to load something and continue?
if (map == nullptr) {
- DCHECK(!temp_error_msg.empty());
- *error_msg = temp_error_msg;
+ DCHECK(!error_msg->empty());
return nullptr;
}
DCHECK_EQ(0, memcmp(image_header, map->Begin(), sizeof(ImageHeader)));
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index d983a9fa19..c644cde5db 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -74,6 +74,10 @@ class SharedLibrary {
if (self != nullptr) {
self->GetJniEnv()->DeleteWeakGlobalRef(class_loader_);
}
+
+ if (!needs_native_bridge_) {
+ android::CloseNativeLibrary(handle_);
+ }
}
jweak GetClassLoader() const {
@@ -271,8 +275,7 @@ class Libraries {
REQUIRES(!Locks::jni_libraries_lock_)
SHARED_REQUIRES(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
- std::vector<JNI_OnUnloadFn> unload_functions;
+ std::vector<SharedLibrary*> unload_libraries;
{
MutexLock mu(soa.Self(), *Locks::jni_libraries_lock_);
for (auto it = libraries_.begin(); it != libraries_.end(); ) {
@@ -283,15 +286,7 @@ class Libraries {
// the native libraries of the boot class loader.
if (class_loader != nullptr &&
soa.Self()->IsJWeakCleared(class_loader)) {
- void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
- if (sym == nullptr) {
- VLOG(jni) << "[No JNI_OnUnload found in \"" << library->GetPath() << "\"]";
- } else {
- VLOG(jni) << "[JNI_OnUnload found for \"" << library->GetPath() << "\"]";
- JNI_OnUnloadFn jni_on_unload = reinterpret_cast<JNI_OnUnloadFn>(sym);
- unload_functions.push_back(jni_on_unload);
- }
- delete library;
+ unload_libraries.push_back(library);
it = libraries_.erase(it);
} else {
++it;
@@ -299,9 +294,17 @@ class Libraries {
}
}
// Do this without holding the jni libraries lock to prevent possible deadlocks.
- for (JNI_OnUnloadFn fn : unload_functions) {
- VLOG(jni) << "Calling JNI_OnUnload";
- (*fn)(soa.Vm(), nullptr);
+ typedef void (*JNI_OnUnloadFn)(JavaVM*, void*);
+ for (auto library : unload_libraries) {
+ void* const sym = library->FindSymbol("JNI_OnUnload", nullptr);
+ if (sym == nullptr) {
+ VLOG(jni) << "[No JNI_OnUnload found in \"" << library->GetPath() << "\"]";
+ } else {
+ VLOG(jni) << "[JNI_OnUnload found for \"" << library->GetPath() << "\"]: Calling...";
+ JNI_OnUnloadFn jni_on_unload = reinterpret_cast<JNI_OnUnloadFn>(sym);
+ jni_on_unload(soa.Vm(), nullptr);
+ }
+ delete library;
}
}
@@ -739,8 +742,14 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
// As the incoming class loader is reachable/alive during the call of this function,
// it's okay to decode it without worrying about unexpectedly marking it alive.
mirror::ClassLoader* loader = soa.Decode<mirror::ClassLoader*>(class_loader);
- class_loader_allocator =
- Runtime::Current()->GetClassLinker()->GetAllocatorForClassLoader(loader);
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (class_linker->IsBootClassLoader(soa, loader)) {
+ loader = nullptr;
+ class_loader = nullptr;
+ }
+
+ class_loader_allocator = class_linker->GetAllocatorForClassLoader(loader);
CHECK(class_loader_allocator != nullptr);
}
if (library != nullptr) {
diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc
index c99d3636a1..5039d2de07 100644
--- a/runtime/jit/offline_profiling_info.cc
+++ b/runtime/jit/offline_profiling_info.cc
@@ -19,6 +19,7 @@
#include "errno.h"
#include <limits.h>
#include <vector>
+#include <stdlib.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/uio.h>
@@ -40,6 +41,11 @@ const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '1', '\0'
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
+// Debug flag to ignore checksums when testing if a method or a class is present in the profile.
+// Used to facilitate testing profile guided compilation across a large number of apps
+// using the same test profile.
+static constexpr bool kDebugIgnoreChecksum = false;
+
// Transform the actual dex location into relative paths.
// Note: this is OK because we don't store profiles of different apps into the same file.
// Apps with split apks don't cause trouble because each split has a different name and will not
@@ -547,10 +553,14 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
return true;
}
+static bool ChecksumMatch(const DexFile& dex_file, uint32_t checksum) {
+ return kDebugIgnoreChecksum || dex_file.GetLocationChecksum() == checksum;
+}
+
bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const {
auto info_it = info_.find(GetProfileDexFileKey(method_ref.dex_file->GetLocation()));
if (info_it != info_.end()) {
- if (method_ref.dex_file->GetLocationChecksum() != info_it->second.checksum) {
+ if (!ChecksumMatch(*method_ref.dex_file, info_it->second.checksum)) {
return false;
}
const std::set<uint16_t>& methods = info_it->second.method_set;
@@ -562,7 +572,7 @@ bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) c
bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, uint16_t class_def_idx) const {
auto info_it = info_.find(GetProfileDexFileKey(dex_file.GetLocation()));
if (info_it != info_.end()) {
- if (dex_file.GetLocationChecksum() != info_it->second.checksum) {
+ if (!ChecksumMatch(dex_file, info_it->second.checksum)) {
return false;
}
const std::set<uint16_t>& classes = info_it->second.class_set;
@@ -659,4 +669,47 @@ void ProfileCompilationInfo::ClearResolvedClasses() {
}
}
+// Naive implementation to generate a random profile file suitable for testing.
+bool ProfileCompilationInfo::GenerateTestProfile(int fd,
+ uint16_t number_of_dex_files,
+ uint16_t method_ratio,
+ uint16_t class_ratio) {
+ const std::string base_dex_location = "base.apk";
+ ProfileCompilationInfo info;
+ // The limits are defined by the dex specification.
+ uint16_t max_method = std::numeric_limits<uint16_t>::max();
+ uint16_t max_classes = std::numeric_limits<uint16_t>::max();
+ uint16_t number_of_methods = max_method * method_ratio / 100;
+ uint16_t number_of_classes = max_classes * class_ratio / 100;
+
+ srand(MicroTime());
+
+ // Make sure we generate more samples with a low index value.
+ // This makes it more likely to hit valid method/class indices in small apps.
+ const uint16_t kFavorFirstN = 10000;
+ const uint16_t kFavorSplit = 2;
+
+ for (uint16_t i = 0; i < number_of_dex_files; i++) {
+ std::string dex_location = DexFile::GetMultiDexLocation(i, base_dex_location.c_str());
+ std::string profile_key = GetProfileDexFileKey(dex_location);
+
+ for (uint16_t m = 0; m < number_of_methods; m++) {
+ uint16_t method_idx = rand() % max_method;
+ if (m < (number_of_methods / kFavorSplit)) {
+ method_idx %= kFavorFirstN;
+ }
+ info.AddMethodIndex(profile_key, 0, method_idx);
+ }
+
+ for (uint16_t c = 0; c < number_of_classes; c++) {
+ uint16_t class_idx = rand() % max_classes;
+ if (c < (number_of_classes / kFavorSplit)) {
+ class_idx %= kFavorFirstN;
+ }
+ info.AddClassIndex(profile_key, 0, class_idx);
+ }
+ }
+ return info.Save(fd);
+}
+
} // namespace art
diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h
index 5a07da79a1..0b26f9bd0c 100644
--- a/runtime/jit/offline_profiling_info.h
+++ b/runtime/jit/offline_profiling_info.h
@@ -87,6 +87,11 @@ class ProfileCompilationInfo {
// Clears the resolved classes from the current object.
void ClearResolvedClasses();
+ static bool GenerateTestProfile(int fd,
+ uint16_t number_of_dex_files,
+ uint16_t method_ratio,
+ uint16_t class_ratio);
+
private:
enum ProfileLoadSatus {
kProfileLoadIOError,
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 771f8ed290..c047ba20f5 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -157,6 +157,8 @@ bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string*
}
return false;
}
+
+ ScopedBacktraceMapIteratorLock lock(map.get());
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
if ((begin >= it->start && begin < it->end) // start of new within old
&& (end > it->start && end <= it->end)) { // end of new within old
@@ -180,6 +182,7 @@ static bool CheckNonOverlapping(uintptr_t begin,
*error_msg = StringPrintf("Failed to build process map");
return false;
}
+ ScopedBacktraceMapIteratorLock(map.get());
for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
if ((begin >= it->start && begin < it->end) // start of new within old
|| (end > it->start && end < it->end) // end of new within old
@@ -339,7 +342,9 @@ MemMap* MemMap::MapAnonymous(const char* name,
if (actual == MAP_FAILED) {
if (error_msg != nullptr) {
- PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ if (kIsDebugBuild || VLOG_IS_ON(oat)) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
+ }
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
"See process maps in the log.",
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 0d95bb175b..b783a019e7 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -521,7 +521,7 @@ template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline MemberOffset Class::GetFirstReferenceInstanceFieldOffset() {
Class* super_class = GetSuperClass<kVerifyFlags, kReadBarrierOption>();
return (super_class != nullptr)
- ? MemberOffset(RoundUp(super_class->GetObjectSize(),
+ ? MemberOffset(RoundUp(super_class->GetObjectSize<kVerifyFlags, kReadBarrierOption>(),
sizeof(mirror::HeapReference<mirror::Object>)))
: ClassOffset();
}
@@ -766,7 +766,8 @@ inline void Class::VisitReferences(mirror::Class* klass, const Visitor& visitor)
}
if (kVisitNativeRoots) {
// Since this class is reachable, we must also visit the associated roots when we scan it.
- VisitNativeRoots(visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ VisitNativeRoots<kReadBarrierOption>(
+ visitor, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
}
@@ -905,24 +906,24 @@ inline GcRoot<String>* Class::GetDexCacheStrings() {
return GetFieldPtr<GcRoot<String>*>(DexCacheStringsOffset());
}
-template<class Visitor>
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
void mirror::Class::VisitNativeRoots(Visitor& visitor, size_t pointer_size) {
for (ArtField& field : GetSFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
}
}
for (ArtField& field : GetIFieldsUnchecked()) {
// Visit roots first in case the declaring class gets moved.
field.VisitRoots(visitor);
if (kIsDebugBuild && IsResolved()) {
- CHECK_EQ(field.GetDeclaringClass(), this) << GetStatus();
+ CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
}
}
for (ArtMethod& method : GetMethods(pointer_size)) {
- method.VisitRoots(visitor, pointer_size);
+ method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index b1abf948ab..9670accf56 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1152,7 +1152,7 @@ class MANAGED Class FINAL : public Object {
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
- template<class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitNativeRoots(Visitor& visitor, size_t pointer_size)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 628554225c..96f2098ab0 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -20,6 +20,7 @@
#include "array.h"
#include "base/bit_utils.h"
#include "class.h"
+#include "common_throws.h"
#include "gc/heap-inl.h"
#include "globals.h"
#include "intern_table.h"
@@ -33,7 +34,7 @@ namespace art {
namespace mirror {
inline uint32_t String::ClassSize(size_t pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength + 56;
+ uint32_t vtable_entries = Object::kVTableLength + 57;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
@@ -134,9 +135,7 @@ inline String* String::Intern() {
inline uint16_t String::CharAt(int32_t index) {
int32_t count = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_));
if (UNLIKELY((index < 0) || (index >= count))) {
- Thread* self = Thread::Current();
- self->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
- "length=%i; index=%i", count, index);
+ ThrowStringIndexOutOfBoundsException(index, count);
return 0;
}
return GetValue()[index];
diff --git a/runtime/oat.h b/runtime/oat.h
index 57675dc738..6243660494 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '7', '9', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '8', '2', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 62c723e76f..61dc287927 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1167,12 +1167,15 @@ size_t OatFile::OatDexFile::FileSize() const {
std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* error_msg) const {
ScopedTrace trace(__PRETTY_FUNCTION__);
+ static constexpr bool kVerify = false;
+ static constexpr bool kVerifyChecksum = false;
return DexFile::Open(dex_file_pointer_,
FileSize(),
dex_file_location_,
dex_file_location_checksum_,
this,
- false /* verify */,
+ kVerify,
+ kVerifyChecksum,
error_msg);
}
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index c79a9a67b3..e3cc77f2c0 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -56,12 +56,11 @@ class OatFileAssistantTest : public CommonRuntimeTest {
odex_dir_ = odex_oat_dir_ + "/" + std::string(GetInstructionSetString(kRuntimeISA));
ASSERT_EQ(0, mkdir(odex_dir_.c_str(), 0700));
-
// Verify the environment is as we expect
uint32_t checksum;
std::string error_msg;
- ASSERT_TRUE(OS::FileExists(GetImageFile().c_str()))
- << "Expected pre-compiled boot image to be at: " << GetImageFile();
+ ASSERT_TRUE(OS::FileExists(GetSystemImageFile().c_str()))
+ << "Expected pre-compiled boot image to be at: " << GetSystemImageFile();
ASSERT_TRUE(OS::FileExists(GetDexSrc1().c_str()))
<< "Expected dex file to be at: " << GetDexSrc1();
ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
@@ -73,20 +72,42 @@ class OatFileAssistantTest : public CommonRuntimeTest {
// GetMultiDexSrc2 should have the same primary dex checksum as
// GetMultiDexSrc1, but a different secondary dex checksum.
+ static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> multi1;
ASSERT_TRUE(DexFile::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(), &error_msg, &multi1)) << error_msg;
+ GetMultiDexSrc1().c_str(), kVerifyChecksum, &error_msg, &multi1)) << error_msg;
ASSERT_GT(multi1.size(), 1u);
std::vector<std::unique_ptr<const DexFile>> multi2;
ASSERT_TRUE(DexFile::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(), &error_msg, &multi2)) << error_msg;
+ GetMultiDexSrc2().c_str(), kVerifyChecksum, &error_msg, &multi2)) << error_msg;
ASSERT_GT(multi2.size(), 1u);
ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
ASSERT_NE(multi1[1]->GetLocationChecksum(), multi2[1]->GetLocationChecksum());
}
+ // Pre-Relocate the image to a known non-zero offset so we don't have to
+ // deal with the runtime randomly relocating the image by 0 and messing up
+ // the expected results of the tests.
+ bool PreRelocateImage(std::string* error_msg) {
+ std::string image;
+ if (!GetCachedImageFile(&image, error_msg)) {
+ return false;
+ }
+
+ std::string patchoat = GetAndroidRoot();
+ patchoat += kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat";
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+ argv.push_back("--input-image-location=" + GetImageLocation());
+ argv.push_back("--output-image-file=" + image);
+ argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
+ argv.push_back("--base-offset-delta=0x00008000");
+ return Exec(argv, error_msg);
+ }
+
virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
// options->push_back(std::make_pair("-verbose:oat", nullptr));
@@ -99,6 +120,9 @@ class OatFileAssistantTest : public CommonRuntimeTest {
}
virtual void PreRuntimeCreate() {
+ std::string error_msg;
+ ASSERT_TRUE(PreRelocateImage(&error_msg)) << error_msg;
+
UnreserveImageSpace();
}
@@ -144,11 +168,16 @@ class OatFileAssistantTest : public CommonRuntimeTest {
return GetImageDirectory() + "/core.art";
}
- std::string GetImageFile() {
+ std::string GetSystemImageFile() {
return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
+ "/core.art";
}
+ bool GetCachedImageFile(/*out*/std::string* image, std::string* error_msg) {
+ std::string cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA), true);
+ return GetDalvikCacheFilename(GetImageLocation().c_str(), cache.c_str(), image, error_msg);
+ }
+
std::string GetDexSrc1() {
return GetTestDexFileName("Main");
}
@@ -189,34 +218,31 @@ class OatFileAssistantTest : public CommonRuntimeTest {
// The generated odex file will be un-relocated.
void GenerateOdexForTest(const std::string& dex_location,
const std::string& odex_location,
- CompilerFilter::Filter filter) {
- // To generate an un-relocated odex file, we first compile a relocated
- // version of the file, then manually call patchoat to make it look as if
- // it is unrelocated.
- std::string relocated_odex_location = odex_location + ".relocated";
+ CompilerFilter::Filter filter,
+ bool pic = false,
+ bool with_patch_info = true) {
+ // Temporarily redirect the dalvik cache so dex2oat doesn't find the
+ // relocated image file.
+ std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
+ setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + relocated_odex_location);
+ args.push_back("--oat-file=" + odex_location);
args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
+ args.push_back("--runtime-arg");
+ args.push_back("-Xnorelocate");
- // We need to use the quick compiler to generate non-PIC code, because
- // the optimizing compiler always generates PIC.
- args.push_back("--compiler-backend=Quick");
- args.push_back("--include-patch-information");
+ if (pic) {
+ args.push_back("--compile-pic");
+ }
+
+ if (with_patch_info) {
+ args.push_back("--include-patch-information");
+ }
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
-
- // Use patchoat to unrelocate the relocated odex file.
- Runtime* runtime = Runtime::Current();
- std::vector<std::string> argv;
- argv.push_back(runtime->GetPatchoatExecutable());
- argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
- argv.push_back("--input-oat-file=" + relocated_odex_location);
- argv.push_back("--output-oat-file=" + odex_location);
- argv.push_back("--base-offset-delta=0x00008000");
- std::string command_line(Join(argv, ' '));
- ASSERT_TRUE(Exec(argv, &error_msg)) << error_msg;
+ setenv("ANDROID_DATA", android_data_.c_str(), 1);
// Verify the odex file was generated as expected and really is
// unrelocated.
@@ -229,13 +255,13 @@ class OatFileAssistantTest : public CommonRuntimeTest {
dex_location.c_str(),
&error_msg));
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- EXPECT_FALSE(odex_file->IsPic());
- EXPECT_TRUE(odex_file->HasPatchInfo());
+ EXPECT_EQ(pic, odex_file->IsPic());
+ EXPECT_EQ(with_patch_info, odex_file->HasPatchInfo());
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
if (CompilerFilter::IsBytecodeCompilationEnabled(filter)) {
const std::vector<gc::space::ImageSpace*> image_spaces =
- runtime->GetHeap()->GetBootImageSpaces();
+ Runtime::Current()->GetHeap()->GetBootImageSpaces();
ASSERT_TRUE(!image_spaces.empty() && image_spaces[0] != nullptr);
const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
const OatHeader& oat_header = odex_file->GetOatHeader();
@@ -250,71 +276,15 @@ class OatFileAssistantTest : public CommonRuntimeTest {
void GeneratePicOdexForTest(const std::string& dex_location,
const std::string& odex_location,
CompilerFilter::Filter filter) {
- // Temporarily redirect the dalvik cache so dex2oat doesn't find the
- // relocated image file.
- std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
- setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + odex_location);
- args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
- args.push_back("--compile-pic");
- args.push_back("--runtime-arg");
- args.push_back("-Xnorelocate");
- std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
- setenv("ANDROID_DATA", android_data_.c_str(), 1);
-
- // Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
- odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- dex_location.c_str(),
- &error_msg));
- ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- EXPECT_TRUE(odex_file->IsPic());
- EXPECT_EQ(filter, odex_file->GetCompilerFilter());
+ GenerateOdexForTest(dex_location, odex_location, filter, true, false);
}
// Generate a non-PIC odex file without patch information for the purposes
// of test. The generated odex file will be un-relocated.
- // TODO: This won't work correctly if we depend on the boot image being
- // randomly relocated by a non-zero amount. We should have a better solution
- // for avoiding that flakiness and duplicating code to generate odex and oat
- // files for test.
void GenerateNoPatchOdexForTest(const std::string& dex_location,
const std::string& odex_location,
CompilerFilter::Filter filter) {
- // Temporarily redirect the dalvik cache so dex2oat doesn't find the
- // relocated image file.
- std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
- setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
- std::vector<std::string> args;
- args.push_back("--dex-file=" + dex_location);
- args.push_back("--oat-file=" + odex_location);
- args.push_back("--compiler-filter=" + CompilerFilter::NameOfFilter(filter));
- args.push_back("--runtime-arg");
- args.push_back("-Xnorelocate");
- std::string error_msg;
- ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
- setenv("ANDROID_DATA", android_data_.c_str(), 1);
-
- // Verify the odex file was generated as expected.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
- odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- dex_location.c_str(),
- &error_msg));
- ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
- EXPECT_FALSE(odex_file->IsPic());
- EXPECT_FALSE(odex_file->HasPatchInfo());
- EXPECT_EQ(filter, odex_file->GetCompilerFilter());
+ GenerateOdexForTest(dex_location, odex_location, filter, false, false);
}
private:
@@ -326,11 +296,10 @@ class OatFileAssistantTest : public CommonRuntimeTest {
MemMap::Init();
// Ensure a chunk of memory is reserved for the image space.
- uintptr_t reservation_start = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MIN_DELTA;
- uintptr_t reservation_end = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MAX_DELTA
- // Include the main space that has to come right after the
- // image in case of the GSS collector.
- + 384 * MB;
+ // The reservation_end includes room for the main space that has to come
+ // right after the image in case of the GSS collector.
+ uintptr_t reservation_start = ART_BASE_ADDRESS;
+ uintptr_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index fbae1daf43..b7e604036e 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -696,7 +696,9 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (dex_files.empty()) {
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- if (!DexFile::Open(dex_location, dex_location, /*out*/ &error_msg, &dex_files)) {
+ static constexpr bool kVerifyChecksum = true;
+ if (!DexFile::Open(
+ dex_location, dex_location, kVerifyChecksum, /*out*/ &error_msg, &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
+ " because: " + error_msg);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index caf554550e..40e1b1363a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -883,12 +883,13 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i].c_str();
const char* dex_location = dex_locations[i].c_str();
+ static constexpr bool kVerifyChecksum = true;
std::string error_msg;
if (!OS::FileExists(dex_filename)) {
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFile::Open(dex_filename, dex_location, &error_msg, dex_files)) {
+ if (!DexFile::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
diff --git a/runtime/signal_set.h b/runtime/signal_set.h
index c272514f61..6f888525cb 100644
--- a/runtime/signal_set.h
+++ b/runtime/signal_set.h
@@ -38,8 +38,8 @@ class SignalSet {
}
void Block() {
- if (sigprocmask(SIG_BLOCK, &set_, nullptr) == -1) {
- PLOG(FATAL) << "sigprocmask failed";
+ if (pthread_sigmask(SIG_BLOCK, &set_, nullptr) != 0) {
+ PLOG(FATAL) << "pthread_sigmask failed";
}
}
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 7c50f97d39..4647d67699 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -19,6 +19,7 @@
#include "base/bit_vector.h"
#include "base/bit_utils.h"
+#include "dex_file.h"
#include "memory_region.h"
#include "leb128.h"
@@ -892,7 +893,11 @@ class InlineInfoEncoding {
total_bit_size_ += MinimumBitsToStore(method_index_max);
dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
- total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
+ // Note: We're not encoding the dex pc if there is none. That's the case
+ // for an intrinsified native method, such as String.charAt().
+ if (dex_pc_max != DexFile::kDexNoIndex) {
+ total_bit_size_ += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
+ }
invoke_type_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
total_bit_size_ += MinimumBitsToStore(invoke_type_max);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index f1f4a122b4..b9ee4421b9 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -924,10 +924,22 @@ bool Thread::InitStackHwm() {
Runtime* runtime = Runtime::Current();
bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
+
+ // Valgrind on arm doesn't give the right values here. Do not install the guard page, and
+ // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting
+ // stack_begin to 0.
+ const bool valgrind_on_arm =
+ (kRuntimeISA == kArm || kRuntimeISA == kArm64) &&
+ kMemoryToolIsValgrind &&
+ RUNNING_ON_MEMORY_TOOL != 0;
+ if (valgrind_on_arm) {
+ tlsPtr_.stack_begin = nullptr;
+ }
+
ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
- if (implicit_stack_check) {
+ if (implicit_stack_check && !valgrind_on_arm) {
// The thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
@@ -1122,32 +1134,36 @@ void Thread::ClearSuspendBarrier(AtomicInteger* target) {
}
void Thread::RunCheckpointFunction() {
- Closure *checkpoints[kMaxCheckpoints];
-
- // Grab the suspend_count lock and copy the current set of
- // checkpoints. Then clear the list and the flag. The RequestCheckpoint
- // function will also grab this lock so we prevent a race between setting
- // the kCheckpointRequest flag and clearing it.
- {
- MutexLock mu(this, *Locks::thread_suspend_count_lock_);
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- checkpoints[i] = tlsPtr_.checkpoint_functions[i];
- tlsPtr_.checkpoint_functions[i] = nullptr;
+ bool done = false;
+ do {
+ // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is
+ // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock
+ // to prevent a race between setting the kCheckpointRequest flag and clearing it.
+ Closure* checkpoint = nullptr;
+ {
+ MutexLock mu(this, *Locks::thread_suspend_count_lock_);
+ if (tlsPtr_.checkpoint_function != nullptr) {
+ checkpoint = tlsPtr_.checkpoint_function;
+ if (!checkpoint_overflow_.empty()) {
+ // Overflow list not empty, copy the first one out and continue.
+ tlsPtr_.checkpoint_function = checkpoint_overflow_.front();
+ checkpoint_overflow_.pop_front();
+ } else {
+ // No overflow checkpoints, this means that we are on the last pending checkpoint.
+ tlsPtr_.checkpoint_function = nullptr;
+ AtomicClearFlag(kCheckpointRequest);
+ done = true;
+ }
+ } else {
+ LOG(FATAL) << "Checkpoint flag set without pending checkpoint";
+ }
}
- AtomicClearFlag(kCheckpointRequest);
- }
- // Outside the lock, run all the checkpoint functions that
- // we collected.
- bool found_checkpoint = false;
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- if (checkpoints[i] != nullptr) {
- ScopedTrace trace("Run checkpoint function");
- checkpoints[i]->Run(this);
- found_checkpoint = true;
- }
- }
- CHECK(found_checkpoint);
+ // Outside the lock, run the checkpoint functions that we collected.
+ ScopedTrace trace("Run checkpoint function");
+ DCHECK(checkpoint != nullptr);
+ checkpoint->Run(this);
+ } while (!done);
}
bool Thread::RequestCheckpoint(Closure* function) {
@@ -1157,20 +1173,6 @@ bool Thread::RequestCheckpoint(Closure* function) {
return false; // Fail, thread is suspended and so can't run a checkpoint.
}
- uint32_t available_checkpoint = kMaxCheckpoints;
- for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
- if (tlsPtr_.checkpoint_functions[i] == nullptr) {
- available_checkpoint = i;
- break;
- }
- }
- if (available_checkpoint == kMaxCheckpoints) {
- // No checkpoint functions available, we can't run a checkpoint
- return false;
- }
- tlsPtr_.checkpoint_functions[available_checkpoint] = function;
-
- // Checkpoint function installed now install flag bit.
// We must be runnable to request a checkpoint.
DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
union StateAndFlags new_state_and_flags;
@@ -1178,11 +1180,13 @@ bool Thread::RequestCheckpoint(Closure* function) {
new_state_and_flags.as_struct.flags |= kCheckpointRequest;
bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
old_state_and_flags.as_int, new_state_and_flags.as_int);
- if (UNLIKELY(!success)) {
- // The thread changed state before the checkpoint was installed.
- CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
- tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
- } else {
+ if (success) {
+ // Succeeded setting checkpoint flag, now insert the actual checkpoint.
+ if (tlsPtr_.checkpoint_function == nullptr) {
+ tlsPtr_.checkpoint_function = function;
+ } else {
+ checkpoint_overflow_.push_back(function);
+ }
CHECK_EQ(ReadFlag(kCheckpointRequest), true);
TriggerSuspend();
}
@@ -1624,9 +1628,7 @@ Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupte
std::fill(tlsPtr_.rosalloc_runs,
tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
gc::allocator::RosAlloc::GetDedicatedFullRun());
- for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
- tlsPtr_.checkpoint_functions[i] = nullptr;
- }
+ tlsPtr_.checkpoint_function = nullptr;
for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
tlsPtr_.active_suspend_barriers[i] = nullptr;
}
@@ -1767,9 +1769,8 @@ Thread::~Thread() {
}
CHECK_NE(GetState(), kRunnable);
CHECK_NE(ReadFlag(kCheckpointRequest), true);
- CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
- CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
- CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
+ CHECK(tlsPtr_.checkpoint_function == nullptr);
+ CHECK_EQ(checkpoint_overflow_.size(), 0u);
CHECK(tlsPtr_.flip_function == nullptr);
CHECK_EQ(tls32_.suspended_at_suspend_check, false);
diff --git a/runtime/thread.h b/runtime/thread.h
index 3c367ee5b6..9b6a20ec59 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1220,9 +1220,6 @@ class Thread {
static void ThreadExitCallback(void* arg);
- // Maximum number of checkpoint functions.
- static constexpr uint32_t kMaxCheckpoints = 3;
-
// Maximum number of suspend barriers.
static constexpr uint32_t kMaxSuspendBarriers = 3;
@@ -1352,8 +1349,8 @@ class Thread {
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
- last_no_thread_suspension_cause(nullptr), thread_local_objects(0),
- thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
+ last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
+ thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
mterp_current_ibase(nullptr), mterp_default_ibase(nullptr), mterp_alt_ibase(nullptr),
thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr),
@@ -1452,9 +1449,9 @@ class Thread {
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
- // Pending checkpoint function or null if non-pending. Installation guarding by
- // Locks::thread_suspend_count_lock_.
- Closure* checkpoint_functions[kMaxCheckpoints];
+ // Pending checkpoint function or null if non-pending. If this checkpoint is set and someone\
+ // requests another checkpoint, it goes to the checkpoint overflow list.
+ Closure* checkpoint_function GUARDED_BY(Locks::thread_suspend_count_lock_);
// Pending barriers that require passing or NULL if non-pending. Installation guarding by
// Locks::thread_suspend_count_lock_.
@@ -1468,12 +1465,12 @@ class Thread {
QuickEntryPoints quick_entrypoints;
// Thread-local allocation pointer.
- size_t thread_local_objects;
uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
+ size_t thread_local_objects;
// Mterp jump table bases.
void* mterp_current_ibase;
@@ -1517,6 +1514,9 @@ class Thread {
// Debug disable read barrier count, only is checked for debug builds and only in the runtime.
uint8_t debug_disallow_read_barrier_ = 0;
+ // Pending extra checkpoints if checkpoint_function_ is already used.
+ std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
+
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 80b99fcd02..97bcb7d406 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1146,9 +1146,10 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
void ThreadList::SuspendAllDaemonThreadsForShutdown() {
ScopedTrace trace(__PRETTY_FUNCTION__);
Thread* self = Thread::Current();
- MutexLock mu(self, *Locks::thread_list_lock_);
size_t daemons_left = 0;
- { // Tell all the daemons it's time to suspend.
+ {
+ // Tell all the daemons it's time to suspend.
+ MutexLock mu(self, *Locks::thread_list_lock_);
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
for (const auto& thread : list_) {
// This is only run after all non-daemon threads have exited, so the remainder should all be
@@ -1177,13 +1178,16 @@ void ThreadList::SuspendAllDaemonThreadsForShutdown() {
static constexpr size_t kSleepMicroseconds = 1000;
for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
bool all_suspended = true;
- for (const auto& thread : list_) {
- if (thread != self && thread->GetState() == kRunnable) {
- if (!have_complained) {
- LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
- have_complained = true;
+ {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ for (const auto& thread : list_) {
+ if (thread != self && thread->GetState() == kRunnable) {
+ if (!have_complained) {
+ LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
+ have_complained = true;
+ }
+ all_suspended = false;
}
- all_suspended = false;
}
}
if (all_suspended) {
@@ -1300,6 +1304,39 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
}
}
+void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
+ Thread* const self = Thread::Current();
+ std::vector<Thread*> threads_to_visit;
+
+ // Tell threads to suspend and copy them into list.
+ {
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ for (Thread* thread : list_) {
+ thread->ModifySuspendCount(self, +1, nullptr, false);
+ if (thread == self || thread->IsSuspended()) {
+ threads_to_visit.push_back(thread);
+ } else {
+ thread->ModifySuspendCount(self, -1, nullptr, false);
+ }
+ }
+ }
+
+ // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
+ // order violations.
+ for (Thread* thread : threads_to_visit) {
+ thread->VisitRoots(visitor);
+ }
+
+ // Restore suspend counts.
+ {
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ for (Thread* thread : threads_to_visit) {
+ thread->ModifySuspendCount(self, -1, nullptr, false);
+ }
+ }
+}
+
void ThreadList::VisitRoots(RootVisitor* visitor) const {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
for (const auto& thread : list_) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index df81ad1a7b..49f65e16a7 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -144,6 +144,10 @@ class ThreadList {
void VisitRoots(RootVisitor* visitor) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRootsForSuspendedThreads(RootVisitor* visitor)
+ REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
// Return a copy of the thread list.
std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
return list_;