summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S375
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S351
-rw-r--r--runtime/art_field-inl.h15
-rw-r--r--runtime/art_field.cc4
-rw-r--r--runtime/art_field.h2
-rw-r--r--runtime/art_method-inl.h13
-rw-r--r--runtime/art_method.cc11
-rw-r--r--runtime/art_method.h2
-rw-r--r--runtime/class_linker-inl.h45
-rw-r--r--runtime/class_linker.cc99
-rw-r--r--runtime/class_linker.h8
-rw-r--r--runtime/class_linker_test.cc8
-rw-r--r--runtime/compiler_filter.cc9
-rw-r--r--runtime/compiler_filter.h1
-rw-r--r--runtime/debugger.cc39
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h8
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc22
-rw-r--r--runtime/gc/collector/concurrent_copying.cc22
-rw-r--r--runtime/gc/collector/garbage_collector.cc20
-rw-r--r--runtime/gc/collector/garbage_collector.h4
-rw-r--r--runtime/gc/heap.cc4
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/instrumentation.cc59
-rw-r--r--runtime/instrumentation.h14
-rw-r--r--runtime/interpreter/interpreter_common.cc22
-rw-r--r--runtime/interpreter/interpreter_common.h10
-rw-r--r--runtime/jit/jit_code_cache.cc2
-rw-r--r--runtime/mirror/class.cc3
-rw-r--r--runtime/mirror/dex_cache-inl.h110
-rw-r--r--runtime/mirror/dex_cache.cc20
-rw-r--r--runtime/mirror/dex_cache.h77
-rw-r--r--runtime/mirror/dex_cache_test.cc3
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc17
-rw-r--r--runtime/native/java_lang_DexCache.cc14
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file_assistant.cc2
-rw-r--r--runtime/openjdkjvmti/ti_class_loader.cc9
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc112
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h12
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/quick_exception_handler.cc4
-rw-r--r--runtime/runtime.cc76
-rw-r--r--runtime/runtime.h23
-rw-r--r--runtime/runtime_android.cc53
-rw-r--r--runtime/runtime_common.cc414
-rw-r--r--runtime/runtime_common.h79
-rw-r--r--runtime/runtime_linux.cc378
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/thread.cc5
-rw-r--r--runtime/thread_list.cc10
-rw-r--r--runtime/thread_pool.cc11
-rw-r--r--runtime/thread_pool.h9
-rw-r--r--runtime/thread_pool_test.cc52
-rw-r--r--runtime/trace.cc3
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h19
-rw-r--r--runtime/verifier/method_verifier.cc8
58 files changed, 1258 insertions, 1446 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 196c65e11a..540df5a554 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -186,6 +186,7 @@ cc_defaults {
"reflection.cc",
"runtime.cc",
"runtime_callbacks.cc",
+ "runtime_common.cc",
"runtime_options.cc",
"signal_catcher.cc",
"stack.cc",
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 76218fb542..2d5eca003d 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1450,316 +1450,83 @@ ENTRY art_quick_aput_obj
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
- /*
- * Called by managed code to resolve a static field and load a byte primitive value.
- */
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
-
- /*
- * Called by managed code to resolve a static field and load a char primitive value.
- */
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64StaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- lw $a1, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjStaticFromCode
- jalr $t9 # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetBooleanInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetByteInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetCharInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetShortInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artGetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*, $sp)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32StaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- lw $a1, 0($sp) # pass referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet64StaticFromCode
- jalr $t9 # (field_idx, referrer, new_val, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- lw $a2, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjStaticFromCode
- jalr $t9 # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet8InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet16InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSet32InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- lw $t1, 0($sp) # load referrer's Method*
- # 64 bit new_val is in a2:a3 pair
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- sw rSELF, 20($sp) # pass Thread::Current
- la $t9, artSet64InstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw $t1, 16($sp) # pass referrer's Method*
- RETURN_IF_ZERO
-END art_quick_set64_instance
+.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ la $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, 64-bit new_val, Thread*) or
+ # (field_idx, 64-bit new_val, Thread*)
+ # Note that a 64-bit new_val needs to be aligned with
+ # an even-numbered register, hence A1 may be skipped
+ # for new_val to reside in A2-A3.
+ sw rSELF, 16($sp) # pass Thread::Current
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve an instance field and store an object reference.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- lw $a3, 0($sp) # pass referrer's Method*
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- la $t9, artSetObjInstanceFromCode
- jalr $t9 # (field_idx, Object*, new_val, referrer, Thread*)
- sw rSELF, 16($sp) # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index b53fd100fa..f3629d90d3 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1416,296 +1416,77 @@ ENTRY art_quick_aput_obj
move $a2, rSELF # pass Thread::Current
END art_quick_aput_obj
- /*
- * Called by managed code to resolve a static field and load a boolean primitive value.
- */
- .extern artGetBooleanStaticFromCode
-ENTRY art_quick_get_boolean_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_static
-
- /*
- * Called by managed code to resolve a static field and load a byte primitive value.
- */
- .extern artGetByteStaticFromCode
-ENTRY art_quick_get_byte_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_static
-
- /*
- * Called by managed code to resolve a static field and load a char primitive value.
- */
- .extern artGetCharStaticFromCode
-ENTRY art_quick_get_char_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_static
-
- /*
- * Called by managed code to resolve a static field and load a short primitive value.
- */
- .extern artGetShortStaticFromCode
-ENTRY art_quick_get_short_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_static
-
- /*
- * Called by managed code to resolve a static field and load a 32-bit primitive value.
- */
- .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static
-
- /*
- * Called by managed code to resolve a static field and load a 64-bit primitive value.
- */
- .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static
-
- /*
- * Called by managed code to resolve a static field and load an object reference.
- */
- .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
- move $a2, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static
-
- /*
- * Called by managed code to resolve an instance field and load a boolean primitive value.
- */
- .extern artGetBooleanInstanceFromCode
-ENTRY art_quick_get_boolean_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_boolean_instance
-
- /*
- * Called by managed code to resolve an instance field and load a byte primitive value.
- */
- .extern artGetByteInstanceFromCode
-ENTRY art_quick_get_byte_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_byte_instance
-
- /*
- * Called by managed code to resolve an instance field and load a char primitive value.
- */
- .extern artGetCharInstanceFromCode
-ENTRY art_quick_get_char_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_char_instance
-
- /*
- * Called by managed code to resolve an instance field and load a short primitive value.
- */
- .extern artGetShortInstanceFromCode
-ENTRY art_quick_get_short_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_short_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 32-bit primitive value.
- */
- .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- sll $v0, $v0, 0 # sign-extend result
- RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance
-
- /*
- * Called by managed code to resolve an instance field and load a 64-bit primitive value.
- */
- .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance
-
- /*
- * Called by managed code to resolve an instance field and load an object reference.
- */
- .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance
-
- /*
- * Called by managed code to resolve a static field and store a 8-bit primitive value.
- */
- .extern artSet8StaticFromCode
-ENTRY art_quick_set8_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_static
-
- /*
- * Called by managed code to resolve a static field and store a 16-bit primitive value.
- */
- .extern artSet16StaticFromCode
-ENTRY art_quick_set16_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_static
-
- /*
- * Called by managed code to resolve a static field and store a 32-bit primitive value.
- */
- .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_static
-
- /*
- * Called by managed code to resolve a static field and store a 64-bit primitive value.
- */
- .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- # a2 contains the new val
- ld $a1, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_static
-
- /*
- * Called by managed code to resolve a static field and store an object reference.
- */
- .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
- move $a3, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_static
-
- /*
- * Called by managed code to resolve an instance field and store a 8-bit primitive value.
- */
- .extern artSet8InstanceFromCode
-ENTRY art_quick_set8_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set8_instance
-
- /*
- * Called by managed code to resolve an instance field and store a 16-bit primitive value.
- */
- .extern artSet16InstanceFromCode
-ENTRY art_quick_set16_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set16_instance
+// Macros taking opportunity of code similarities for downcalls.
+.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 32-bit primitive value.
- */
- .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set32_instance
+.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, Thread*) or
+ # (field_idx, new_val, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
- /*
- * Called by managed code to resolve an instance field and store a 64-bit primitive value.
- */
- .extern artSet64InstanceFromCode
-ENTRY art_quick_set64_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set64_instance
+.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
+ dla $t9, \entrypoint
+ jalr $t9 # (field_idx, Object*, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ .if \extend
+ sll $v0, $v0, 0 # sign-extend 32-bit result
+ .endif
+ \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
+END \name
+.endm
/*
- * Called by managed code to resolve an instance field and store an object reference.
+ * Called by managed code to resolve a static/instance field and load/store a value.
*/
- .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
- SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC
- ld $a3, FRAME_SIZE_SAVE_REFS_ONLY($sp) # pass referrer's Method*
- jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
- move $a4, rSELF # pass Thread::Current
- RETURN_IF_ZERO
-END art_quick_set_obj_instance
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
// Macro to facilitate adding new allocation entrypoints.
.macro ONE_ARG_DOWNCALL name, entrypoint, return
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 80af8e7bde..16b73c681f 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -311,6 +311,8 @@ inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
template <bool kResolve>
inline ObjPtr<mirror::Class> ArtField::GetType() {
+ // TODO: Refactor this function into two functions, ResolveType() and LookupType()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
const uint32_t field_index = GetDexFieldIndex();
ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
if (UNLIKELY(declaring_class->IsProxyClass())) {
@@ -320,9 +322,16 @@ inline ObjPtr<mirror::Class> ArtField::GetType() {
const DexFile* const dex_file = dex_cache->GetDexFile();
const DexFile::FieldId& field_id = dex_file->GetFieldId(field_index);
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(field_id.type_idx_);
- if (kResolve && UNLIKELY(type == nullptr)) {
- type = ResolveGetType(field_id.type_idx_);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (UNLIKELY(type == nullptr)) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (kResolve) {
+ type = class_linker->ResolveType(*dex_file, field_id.type_idx_, declaring_class);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_file, field_id.type_idx_, dex_cache, declaring_class->GetClassLoader());
+ DCHECK(!Thread::Current()->IsExceptionPending());
+ }
}
return type;
}
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index a4a6e5a4fb..7e131040be 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -48,10 +48,6 @@ ObjPtr<mirror::Class> ArtField::ProxyFindSystemClass(const char* descriptor) {
return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
}
-ObjPtr<mirror::Class> ArtField::ResolveGetType(dex::TypeIndex type_idx) {
- return Runtime::Current()->GetClassLinker()->ResolveType(type_idx, this);
-}
-
ObjPtr<mirror::String> ArtField::ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 427e103749..75dd981136 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -217,8 +217,6 @@ class ArtField FINAL {
private:
ObjPtr<mirror::Class> ProxyFindSystemClass(const char* descriptor)
REQUIRES_SHARED(Locks::mutator_lock_);
- ObjPtr<mirror::Class> ResolveGetType(dex::TypeIndex type_idx)
- REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<mirror::String> ResolveGetStringName(Thread* self,
const DexFile& dex_file,
dex::StringIndex string_idx,
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 7ec3900aa9..efcdbbff5a 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -175,12 +175,19 @@ inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other, PointerS
}
inline mirror::Class* ArtMethod::GetClassFromTypeIndex(dex::TypeIndex type_idx, bool resolve) {
+ // TODO: Refactor this function into two functions, Resolve...() and Lookup...()
+ // so that we can properly annotate it with no-suspension possible / suspension possible.
ObjPtr<mirror::DexCache> dex_cache = GetDexCache();
ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
- if (UNLIKELY(type == nullptr) && resolve) {
+ if (UNLIKELY(type == nullptr)) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- type = class_linker->ResolveType(type_idx, this);
- CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ if (resolve) {
+ type = class_linker->ResolveType(type_idx, this);
+ CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
+ } else {
+ type = class_linker->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, GetClassLoader());
+ }
}
return type.Ptr();
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index ec789f51ef..61ff41742b 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -55,6 +55,17 @@ extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*,
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
+ArtMethod* ArtMethod::GetNonObsoleteMethod() {
+ DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+ if (LIKELY(!IsObsolete())) {
+ return this;
+ } else if (IsDirect()) {
+ return &GetDeclaringClass()->GetDirectMethodsSlice(kRuntimePointerSize)[GetMethodIndex()];
+ } else {
+ return GetDeclaringClass()->GetVTableEntry(GetMethodIndex(), kRuntimePointerSize);
+ }
+}
+
ArtMethod* ArtMethod::GetSingleImplementation(PointerSize pointer_size) {
DCHECK(!IsNative());
if (!IsAbstract()) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index f145d7c416..e4db2c7324 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -570,6 +570,8 @@ class ArtMethod FINAL {
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetNonObsoleteMethod() REQUIRES_SHARED(Locks::mutator_lock_);
+
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 2e17dd85e6..e928344fb6 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -68,22 +68,28 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self,
inline mirror::String* ClassLinker::ResolveString(dex::StringIndex string_idx,
ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_, declaring_class->GetDexFile().NumStringIds());
- ObjPtr<mirror::String> string =
- mirror::StringDexCachePair::Lookup(declaring_class->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string = referrer->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string == nullptr)) {
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
const DexFile& dex_file = *dex_cache->GetDexFile();
string = ResolveString(dex_file, string_idx, dex_cache);
}
return string.Ptr();
}
+inline ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(
+ dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader) {
+ ObjPtr<mirror::Class> type = dex_cache->GetResolvedType(type_idx);
+ if (type == nullptr) {
+ type = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+ *dex_cache->GetDexFile(), type_idx, dex_cache, class_loader);
+ }
+ return type;
+}
+
inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMethod* referrer) {
Thread::PoisonObjectPointersIfDebug();
if (kIsDebugBuild) {
@@ -93,29 +99,10 @@ inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtMetho
if (UNLIKELY(resolved_type == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
- const DexFile& dex_file = *dex_cache->GetDexFile();
- resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
- }
- return resolved_type.Ptr();
-}
-
-inline mirror::Class* ClassLinker::ResolveType(dex::TypeIndex type_idx, ArtField* referrer) {
- Thread::PoisonObjectPointersIfDebug();
- ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ObjPtr<mirror::DexCache> dex_cache_ptr = declaring_class->GetDexCache();
- ObjPtr<mirror::Class> resolved_type = dex_cache_ptr->GetResolvedType(type_idx);
- if (UNLIKELY(resolved_type == nullptr)) {
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_cache_ptr));
+ Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- // Note: We cannot check here to see whether we added the type to the cache. The type
- // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type.Ptr();
}
@@ -159,7 +146,7 @@ inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
if (UNLIKELY(resolved_method == nullptr)) {
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
+ Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile* dex_file = h_dex_cache->GetDexFile();
resolved_method = ResolveMethod<kResolveMode>(*dex_file,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 02b26c6568..866936739a 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1171,6 +1171,23 @@ static void CopyNonNull(const T* src, size_t count, T* dst, const NullPred& pred
}
}
+template <typename T>
+static void CopyDexCachePairs(const std::atomic<mirror::DexCachePair<T>>* src,
+ size_t count,
+ std::atomic<mirror::DexCachePair<T>>* dst) {
+ DCHECK_NE(count, 0u);
+ DCHECK(!src[0].load(std::memory_order_relaxed).object.IsNull() ||
+ src[0].load(std::memory_order_relaxed).index != 0u);
+ for (size_t i = 0; i < count; ++i) {
+ DCHECK_EQ(dst[i].load(std::memory_order_relaxed).index, 0u);
+ DCHECK(dst[i].load(std::memory_order_relaxed).object.IsNull());
+ mirror::DexCachePair<T> source = src[i].load(std::memory_order_relaxed);
+ if (source.index != 0u || !source.object.IsNull()) {
+ dst[i].store(source, std::memory_order_relaxed);
+ }
+ }
+}
+
bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1224,7 +1241,10 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
- const size_t num_types = dex_file->NumTypeIds();
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
const size_t num_methods = dex_file->NumMethodIds();
const size_t num_fields = dex_file->NumFieldIds();
size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
@@ -1243,28 +1263,14 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::StringDexCacheType* const image_resolved_strings = dex_cache->GetStrings();
mirror::StringDexCacheType* const strings =
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- for (size_t j = 0; j < num_strings; ++j) {
- DCHECK_EQ(strings[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(strings[j].load(std::memory_order_relaxed).object.IsNull());
- strings[j].store(image_resolved_strings[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
- mirror::StringDexCachePair::Initialize(strings);
+ CopyDexCachePairs(image_resolved_strings, num_strings, strings);
dex_cache->SetStrings(strings);
}
if (num_types != 0u) {
- GcRoot<mirror::Class>* const image_resolved_types = dex_cache->GetResolvedTypes();
- GcRoot<mirror::Class>* const types =
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
- for (size_t j = 0; kIsDebugBuild && j < num_types; ++j) {
- DCHECK(types[j].IsNull());
- }
- CopyNonNull(image_resolved_types,
- num_types,
- types,
- [](const GcRoot<mirror::Class>& elem) {
- return elem.IsNull();
- });
+ mirror::TypeDexCacheType* const image_resolved_types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types =
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
+ CopyDexCachePairs(image_resolved_types, num_types, types);
dex_cache->SetResolvedTypes(types);
}
if (num_methods != 0u) {
@@ -1305,15 +1311,7 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
mirror::MethodTypeDexCacheType* const method_types =
reinterpret_cast<mirror::MethodTypeDexCacheType*>(
raw_arrays + layout.MethodTypesOffset());
- for (size_t j = 0; j < num_method_types; ++j) {
- DCHECK_EQ(method_types[j].load(std::memory_order_relaxed).index, 0u);
- DCHECK(method_types[j].load(std::memory_order_relaxed).object.IsNull());
- method_types[j].store(
- image_resolved_method_types[j].load(std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
-
- mirror::MethodTypeDexCachePair::Initialize(method_types);
+ CopyDexCachePairs(image_resolved_method_types, num_method_types, method_types);
dex_cache->SetResolvedMethodTypes(method_types);
}
}
@@ -1335,11 +1333,11 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
}
if (kIsDebugBuild) {
CHECK(new_class_set != nullptr);
- GcRoot<mirror::Class>* const types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
const size_t num_types = dex_cache->NumResolvedTypes();
- for (int32_t j = 0; j < static_cast<int32_t>(num_types); j++) {
+ for (size_t j = 0; j != num_types; ++j) {
// The image space is not yet added to the heap, avoid read barriers.
- ObjPtr<mirror::Class> klass = types[j].Read();
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (space->HasAddress(klass.Ptr())) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
auto it = new_class_set->Find(ClassTable::TableSlot(klass));
@@ -1700,9 +1698,9 @@ bool ClassLinker::AddImageSpace(
// The current dex file field is bogus, overwrite it so that we can get the dex file in the
// loop below.
h_dex_cache->SetDexFile(dex_file.get());
- GcRoot<mirror::Class>* const types = h_dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* const types = h_dex_cache->GetResolvedTypes();
for (int32_t j = 0, num_types = h_dex_cache->NumResolvedTypes(); j < num_types; j++) {
- ObjPtr<mirror::Class> klass = types[j].Read();
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
if (klass != nullptr) {
DCHECK(!klass->IsErroneous()) << klass->GetStatus();
}
@@ -2862,9 +2860,12 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
return true;
}
- if (runtime->IsFullyDeoptable()) {
- // We need to be able to deoptimize at any time so we should always just ignore precompiled
- // code and go to the interpreter assuming we don't already have jitted code.
+ if (runtime->IsJavaDebuggable()) {
+ // For simplicity, we ignore precompiled code and go to the interpreter
+ // assuming we don't already have jitted code.
+ // We could look at the oat file where `quick_code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
jit::Jit* jit = Runtime::Current()->GetJit();
return (jit == nullptr) || !jit->GetCodeCache()->ContainsPc(quick_code);
}
@@ -2872,18 +2873,13 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void*
if (runtime->IsNativeDebuggable()) {
DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse());
// If we are doing native debugging, ignore application's AOT code,
- // since we want to JIT it with extra stackmaps for native debugging.
- // On the other hand, keep all AOT code from the boot image, since the
- // blocking JIT would results in non-negligible performance impact.
+ // since we want to JIT it (at first use) with extra stackmaps for native
+ // debugging. We keep however all AOT code from the boot image,
+ // since the JIT-at-first-use is blocking and would result in non-negligible
+ // startup performance impact.
return !runtime->GetHeap()->IsInBootImageOatFile(quick_code);
}
- if (Dbg::IsDebuggerActive()) {
- // Boot image classes may be AOT-compiled as non-debuggable.
- // This is not suitable for the Java debugger, so ignore the AOT code.
- return runtime->GetHeap()->IsInBootImageOatFile(quick_code);
- }
-
return false;
}
@@ -7700,7 +7696,9 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
uint32_t utf16_length;
const char* utf8_data = dex_file.StringDataAndUtf16LengthByIdx(string_idx, &utf16_length);
ObjPtr<mirror::String> string = intern_table_->InternStrong(utf16_length, utf8_data);
- dex_cache->SetResolvedString(string_idx, string);
+ if (string != nullptr) {
+ dex_cache->SetResolvedString(string_idx, string);
+ }
return string.Ptr();
}
@@ -7743,6 +7741,7 @@ ObjPtr<mirror::Class> ClassLinker::LookupResolvedType(const DexFile& dex_file,
}
}
if (type != nullptr && type->IsResolved()) {
+ dex_cache->SetResolvedType(type_idx, type);
return type.Ptr();
}
return nullptr;
@@ -7765,6 +7764,12 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file,
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> resolved = dex_cache->GetResolvedType(type_idx);
if (resolved == nullptr) {
+ // TODO: Avoid this lookup as it duplicates work done in FindClass(). It is here
+ // as a workaround for FastNative JNI to avoid AssertNoPendingException() when
+ // trying to resolve annotations while an exception may be pending. Bug: 34659969
+ resolved = LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get());
+ }
+ if (resolved == nullptr) {
Thread* self = Thread::Current();
const char* descriptor = dex_file.StringByTypeIdx(type_idx);
resolved = FindClass(self, descriptor, class_loader);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 5042fb7609..21edd513ac 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -262,10 +262,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- mirror::Class* ResolveType(dex::TypeIndex type_idx, ArtField* referrer)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
-
// Look up a resolved type with the given ID from the DexFile. The ClassLoader is used to search
// for the type, since it may be referenced from but not contained within the given DexFile.
ObjPtr<mirror::Class> LookupResolvedType(const DexFile& dex_file,
@@ -273,6 +269,10 @@ class ClassLinker {
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static ObjPtr<mirror::Class> LookupResolvedType(dex::TypeIndex type_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a type with the given ID from the DexFile, storing the
// result in DexCache. The ClassLoader is used to search for the
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 17510bb598..6eee0bd617 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -914,7 +914,7 @@ TEST_F(ClassLinkerTest, LookupResolvedType) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
klass);
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache, class_loader.Get()),
@@ -949,7 +949,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeArray) {
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
array_klass);
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(array_idx, nullptr);
+ dex_cache->ClearResolvedType(array_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(array_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, array_idx, dex_cache.Get(), class_loader.Get()),
@@ -972,7 +972,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
@@ -990,7 +990,7 @@ TEST_F(ClassLinkerTest, LookupResolvedTypeErroneousInit) {
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
klass.Get());
// Zero out the resolved type and make sure LookupResolvedType() still finds it.
- dex_cache->SetResolvedType(type_idx, nullptr);
+ dex_cache->ClearResolvedType(type_idx);
EXPECT_TRUE(dex_cache->GetResolvedType(type_idx) == nullptr);
EXPECT_OBJ_PTR_EQ(
class_linker_->LookupResolvedType(dex_file, type_idx, dex_cache.Get(), class_loader.Get()),
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index dc89d32f50..cb8c11db96 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -33,7 +33,6 @@ bool CompilerFilter::IsBytecodeCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -53,7 +52,6 @@ bool CompilerFilter::IsJniCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -73,7 +71,6 @@ bool CompilerFilter::IsAnyMethodCompilationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -93,7 +90,6 @@ bool CompilerFilter::IsVerificationEnabled(Filter filter) {
case CompilerFilter::kTime:
case CompilerFilter::kSpeedProfile:
case CompilerFilter::kSpeed:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile:
case CompilerFilter::kEverything: return true;
}
@@ -120,7 +116,6 @@ bool CompilerFilter::DependsOnProfile(Filter filter) {
case CompilerFilter::kVerifyProfile:
case CompilerFilter::kSpaceProfile:
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
case CompilerFilter::kEverythingProfile: return true;
}
UNREACHABLE();
@@ -145,7 +140,6 @@ CompilerFilter::Filter CompilerFilter::GetNonProfileDependentFilterFrom(Filter f
return CompilerFilter::kSpace;
case CompilerFilter::kSpeedProfile:
- case CompilerFilter::kLayoutProfile:
return CompilerFilter::kSpeed;
case CompilerFilter::kEverythingProfile:
@@ -171,7 +165,6 @@ std::string CompilerFilter::NameOfFilter(Filter filter) {
case CompilerFilter::kTime: return "time";
case CompilerFilter::kSpeedProfile: return "speed-profile";
case CompilerFilter::kSpeed: return "speed";
- case CompilerFilter::kLayoutProfile: return "layout-profile";
case CompilerFilter::kEverythingProfile: return "everything-profile";
case CompilerFilter::kEverything: return "everything";
}
@@ -199,8 +192,6 @@ bool CompilerFilter::ParseCompilerFilter(const char* option, Filter* filter) {
*filter = kSpeed;
} else if (strcmp(option, "speed-profile") == 0) {
*filter = kSpeedProfile;
- } else if (strcmp(option, "layout-profile") == 0) {
- *filter = kLayoutProfile;
} else if (strcmp(option, "everything") == 0) {
*filter = kEverything;
} else if (strcmp(option, "everything-profile") == 0) {
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 7eb5f9a90f..796f4aad0c 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -39,7 +39,6 @@ class CompilerFilter FINAL {
kSpace, // Maximize space savings.
kBalanced, // Good performance return on compilation investment.
kSpeedProfile, // Maximize runtime performance based on profile.
- kLayoutProfile, // Temporary filter for dexlayout. Will be merged with kSpeedProfile.
kSpeed, // Maximize runtime performance.
kEverythingProfile, // Compile everything capable of being compiled based on profile.
kEverything, // Compile everything capable of being compiled.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 22a31635a6..1a0cec075c 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -588,29 +588,6 @@ bool Dbg::RequiresDeoptimization() {
return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
}
-// Used to patch boot image method entry point to interpreter bridge.
-class UpdateEntryPointsClassVisitor : public ClassVisitor {
- public:
- explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
- : instrumentation_(instrumentation) {}
-
- bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
- auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- for (auto& m : klass->GetMethods(pointer_size)) {
- const void* code = m.GetEntryPointFromQuickCompiledCode();
- if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
- !m.IsNative() &&
- !m.IsProxyMethod()) {
- instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge());
- }
- }
- return true;
- }
-
- private:
- instrumentation::Instrumentation* const instrumentation_;
-};
-
void Dbg::GoActive() {
// Enable all debugging features, including scans for breakpoints.
// This is a no-op if we're already active.
@@ -639,14 +616,16 @@ void Dbg::GoActive() {
}
Runtime* runtime = Runtime::Current();
- // Since boot image code may be AOT compiled as not debuggable, we need to patch
- // entry points of methods in boot image to interpreter bridge.
- // However, the performance cost of this is non-negligible during native-debugging due to the
+ // Best effort deoptimization if the runtime is non-Java debuggable. This happens when
+ // ro.debuggable is set, but the application is not debuggable, or when a standalone
+ // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable).
+ //
+ // The performance cost of this is non-negligible during native-debugging due to the
// forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
- if (!runtime->GetInstrumentation()->IsForcedInterpretOnly() && !runtime->IsNativeDebuggable()) {
- ScopedObjectAccess soa(self);
- UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
- runtime->GetClassLinker()->VisitClasses(&visitor);
+ if (!runtime->IsJavaDebuggable() &&
+ !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
+ !runtime->IsNativeDebuggable()) {
+ runtime->DeoptimizeBootImage();
}
ScopedSuspendAll ssa(__FUNCTION__);
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index ac0ce36016..1b267eb991 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -705,10 +705,10 @@ inline ArtMethod* FindMethodFast(uint32_t method_idx,
return resolved_method;
} else if (type == kSuper) {
// TODO This lookup is rather slow.
- dex::TypeIndex method_type_idx =
- referrer->GetDexFile()->GetMethodId(method_idx).class_idx_;
- mirror::Class* method_reference_class =
- referrer->GetDexCache()->GetResolvedType(method_type_idx);
+ ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+ dex::TypeIndex method_type_idx = dex_cache->GetDexFile()->GetMethodId(method_idx).class_idx_;
+ ObjPtr<mirror::Class> method_reference_class = ClassLinker::LookupResolvedType(
+ method_type_idx, dex_cache, referrer->GetClassLoader());
if (method_reference_class == nullptr) {
// Need to do full type resolution...
return nullptr;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index eb76fb6b88..bde9009f7b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -781,15 +781,19 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc.
if (UNLIKELY(caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- Dbg::IsForcedInterpreterNeededForUpcall(self, caller) &&
- Runtime::Current()->IsDeoptimizeable(caller_pc))) {
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result, shorty[0] == 'L', /* from_code */ false, self->GetException());
-
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
+ Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ } else {
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(
+ result, shorty[0] == 'L', /* from_code */ false, self->GetException());
+
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
+ }
}
// No need to restore the args since the method has already been run by the interpreter.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9dc72f0791..0819ba04f7 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -25,6 +25,7 @@
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/gc_pause_listener.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/space-inl.h"
@@ -139,7 +140,7 @@ void ConcurrentCopying::RunPhases() {
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification || kIsDebugBuild) {
TimingLogger::ScopedTiming split("(Paused)VerifyNoFromSpaceReferences", GetTimings());
- ScopedPause pause(this);
+ ScopedPause pause(this, false);
CheckEmptyMarkStack();
if (kVerboseMode) {
LOG(INFO) << "Verifying no from-space refs";
@@ -439,8 +440,27 @@ void ConcurrentCopying::FlipThreadRoots() {
gc_barrier_->Init(self, 0);
ThreadFlipVisitor thread_flip_visitor(this, heap_->use_tlab_);
FlipCallback flip_callback(this);
+
+ // This is the point where Concurrent-Copying will pause all threads. We report a pause here, if
+ // necessary. This is slightly over-reporting, as this includes the time to actually suspend
+ // threads.
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
+ }
+
size_t barrier_count = Runtime::Current()->FlipThreadRoots(
&thread_flip_visitor, &flip_callback, this);
+
+ {
+ GcPauseListener* pause_listener = GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
+ }
+
{
ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
gc_barrier_->Increment(self, barrier_count);
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 01bcb7df19..14fd332b57 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -158,22 +158,26 @@ void GarbageCollector::ResetMeasurements() {
total_freed_bytes_ = 0;
}
-GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector)
- : start_time_(NanoTime()), collector_(collector) {
+GarbageCollector::ScopedPause::ScopedPause(GarbageCollector* collector, bool with_reporting)
+ : start_time_(NanoTime()), collector_(collector), with_reporting_(with_reporting) {
Runtime* runtime = Runtime::Current();
runtime->GetThreadList()->SuspendAll(__FUNCTION__);
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->StartPause();
+ if (with_reporting) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->StartPause();
+ }
}
}
GarbageCollector::ScopedPause::~ScopedPause() {
collector_->RegisterPause(NanoTime() - start_time_);
Runtime* runtime = Runtime::Current();
- GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
- if (pause_listener != nullptr) {
- pause_listener->EndPause();
+ if (with_reporting_) {
+ GcPauseListener* pause_listener = runtime->GetHeap()->GetGcPauseListener();
+ if (pause_listener != nullptr) {
+ pause_listener->EndPause();
+ }
}
runtime->GetThreadList()->ResumeAll();
}
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 0177e2a1ad..95601d736d 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -126,12 +126,14 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
public:
class SCOPED_LOCKABLE ScopedPause {
public:
- explicit ScopedPause(GarbageCollector* collector) EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
+ explicit ScopedPause(GarbageCollector* collector, bool with_reporting = true)
+ EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_);
~ScopedPause() UNLOCK_FUNCTION();
private:
const uint64_t start_time_;
GarbageCollector* const collector_;
+ bool with_reporting_;
};
GarbageCollector(Heap* heap, const std::string& name);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 70449797c1..f5bf935323 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3352,7 +3352,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
void Heap::PreGcVerification(collector::GarbageCollector* gc) {
if (verify_pre_gc_heap_ || verify_missing_card_marks_ || verify_mod_union_table_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PreGcVerificationPaused(gc);
}
}
@@ -3420,7 +3420,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) {
void Heap::PostGcVerification(collector::GarbageCollector* gc) {
if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
- collector::GarbageCollector::ScopedPause pause(gc);
+ collector::GarbageCollector::ScopedPause pause(gc, false);
PostGcVerificationPaused(gc);
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index e03958d717..e56f0dc613 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -1225,9 +1225,9 @@ class ImageSpaceLoader {
}
dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
}
- GcRoot<mirror::Class>* types = dex_cache->GetResolvedTypes();
+ mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
if (types != nullptr) {
- GcRoot<mirror::Class>* new_types = fixup_adapter.ForwardObject(types);
+ mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
if (types != new_types) {
dex_cache->SetResolvedTypes(new_types);
}
diff --git a/runtime/image.cc b/runtime/image.cc
index 54b099eb14..87f429568d 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '6', '\0' }; // Erroneous resolved class.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '7', '\0' }; // hash-based DexCache types
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index bbd6d352d3..f11e2cba10 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -105,10 +105,9 @@ static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
-bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
+bool Instrumentation::NeedDebugVersionFor(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::IsDebuggerActive() &&
- Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ Runtime::Current()->IsJavaDebuggable() &&
!method->IsNative() &&
!method->IsProxyMethod();
}
@@ -132,9 +131,10 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
if ((forced_interpret_only_ || IsDeoptimized(method)) && !method->IsNative()) {
new_quick_code = GetQuickToInterpreterBridge();
} else if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
new_quick_code = GetQuickToInterpreterBridge();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -148,13 +148,14 @@ void Instrumentation::InstallStubsForMethod(ArtMethod* method) {
// class, all its static methods code will be set to the instrumentation entry point.
// For more details, see ClassLinker::FixupStaticTrampolines.
if (is_class_initialized || !method->IsStatic() || method->IsConstructor()) {
- new_quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, new_quick_code)) {
+ if (NeedDebugVersionFor(method)) {
// Oat code should not be used. Don't install instrumentation stub and
// use interpreter for instrumentation.
new_quick_code = GetQuickToInterpreterBridge();
} else if (entry_exit_stubs_installed_) {
new_quick_code = GetQuickInstrumentationEntryPoint();
+ } else {
+ new_quick_code = class_linker->GetQuickOatCodeFor(method);
}
} else {
new_quick_code = GetQuickResolutionStub();
@@ -557,10 +558,8 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
- if (interpreter_stubs_installed_ && interpret_only_) {
+ if (interpreter_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInterpreter;
- } else if (interpreter_stubs_installed_) {
- return InstrumentationLevel::kInstrumentWithInterpreterAndJit;
} else if (entry_exit_stubs_installed_) {
return InstrumentationLevel::kInstrumentWithInstrumentationStubs;
} else {
@@ -569,11 +568,8 @@ Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentation
}
bool Instrumentation::RequiresInstrumentationInstallation(InstrumentationLevel new_level) const {
- // We need to reinstall instrumentation if we go to a different level or if the current level is
- // kInstrumentWithInterpreterAndJit since that level does not force all code to always use the
- // interpreter and so we might have started running optimized code again.
- return new_level == InstrumentationLevel::kInstrumentWithInterpreterAndJit ||
- GetCurrentInstrumentationLevel() != new_level;
+ // We need to reinstall instrumentation if we go to a different level.
+ return GetCurrentInstrumentationLevel() != new_level;
}
void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desired_level) {
@@ -604,7 +600,7 @@ void Instrumentation::ConfigureStubs(const char* key, InstrumentationLevel desir
Locks::mutator_lock_->AssertExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
if (requested_level > InstrumentationLevel::kInstrumentNothing) {
- if (requested_level >= InstrumentationLevel::kInstrumentWithInterpreterAndJit) {
+ if (requested_level == InstrumentationLevel::kInstrumentWithInterpreter) {
interpreter_stubs_installed_ = true;
entry_exit_stubs_installed_ = true;
} else {
@@ -731,10 +727,12 @@ void Instrumentation::UpdateMethodsCode(ArtMethod* method, const void* quick_cod
UpdateMethodsCodeImpl(method, quick_code);
}
-void Instrumentation::UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code) {
- // When debugger attaches, we may update the entry points of all methods of a class
- // to the interpreter bridge. A method's declaring class might not be in resolved
- // state yet in that case.
+void Instrumentation::UpdateMethodsCodeForJavaDebuggable(ArtMethod* method,
+ const void* quick_code) {
+ // When the runtime is set to Java debuggable, we may update the entry points of
+ // all methods of a class to the interpreter bridge. A method's declaring class
+ // might not be in resolved state yet in that case, so we bypass the DCHECK in
+ // UpdateMethodsCode.
UpdateMethodsCodeImpl(method, quick_code);
}
@@ -819,10 +817,9 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
!method->GetDeclaringClass()->IsInitialized()) {
UpdateEntrypoints(method, GetQuickResolutionStub());
} else {
- const void* quick_code = class_linker->GetQuickOatCodeFor(method);
- if (NeedDebugVersionForBootImageCode(method, quick_code)) {
- quick_code = GetQuickToInterpreterBridge();
- }
+ const void* quick_code = NeedDebugVersionFor(method)
+ ? GetQuickToInterpreterBridge()
+ : class_linker->GetQuickOatCodeFor(method);
UpdateEntrypoints(method, quick_code);
}
@@ -879,14 +876,6 @@ bool Instrumentation::ShouldNotifyMethodEnterExitEvents() const {
return !deoptimization_enabled_ && !interpreter_stubs_installed_;
}
-// TODO we don't check deoptimization_enabled_ because currently there isn't really any support for
-// multiple users of instrumentation. Since this is just a temporary state anyway pending work to
-// ensure that the current_method doesn't get kept across suspend points this should be okay.
-// TODO Remove once b/33630159 is resolved.
-void Instrumentation::ReJitEverything(const char* key) {
- ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreterAndJit);
-}
-
void Instrumentation::DeoptimizeEverything(const char* key) {
CHECK(deoptimization_enabled_);
ConfigureStubs(key, InstrumentationLevel::kInstrumentWithInterpreter);
@@ -1114,7 +1103,7 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
bool deoptimize = (visitor.caller != nullptr) &&
(interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
- if (deoptimize && Runtime::Current()->IsDeoptimizeable(*return_pc)) {
+ if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
if (kVerboseInstrumentation) {
LOG(INFO) << "Deoptimizing "
<< visitor.caller->PrettyMethod()
@@ -1132,6 +1121,10 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt
return GetTwoWordSuccessValue(*return_pc,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
+ if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
+ }
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
<< " to PC " << reinterpret_cast<void*>(*return_pc);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 05c0aaa081..01071a541f 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -133,9 +133,6 @@ class Instrumentation {
enum class InstrumentationLevel {
kInstrumentNothing, // execute without instrumentation
kInstrumentWithInstrumentationStubs, // execute with instrumentation entry/exit stubs
- kInstrumentWithInterpreterAndJit, // execute with interpreter initially and later the JIT
- // (if it is enabled). This level is special in that it
- // always requires re-instrumentation.
kInstrumentWithInterpreter // execute with interpreter
};
@@ -166,13 +163,6 @@ class Instrumentation {
}
bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
- // Executes everything with the interpreter/jit (if available).
- void ReJitEverything(const char* key)
- REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !deoptimized_methods_lock_);
-
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
@@ -239,7 +229,7 @@ class Instrumentation {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Update the code of a method respecting any installed stubs from debugger.
- void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code)
+ void UpdateMethodsCodeForJavaDebuggable(ArtMethod* method, const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -264,7 +254,7 @@ class Instrumentation {
// Code is in boot image oat file which isn't compiled as debuggable.
// Need debug version (interpreter or jitted) if that's the case.
- bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
+ bool NeedDebugVersionFor(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_);
bool AreExitStubsInstalled() const {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 28bcb97105..c235317020 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -438,14 +438,22 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args) {
// about ALWAYS_INLINE (-Werror, -Wgcc-compat) in definitions.
//
+// b/30419309
+#if defined(__i386__)
+#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE __attribute__((optnone))
+#else
+#define IF_X86_OPTNONE_ELSE_ALWAYS_INLINE ALWAYS_INLINE
+#endif
+
template <bool is_range, bool do_assignability_check>
-static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method,
- Thread* self,
- ShadowFrame& shadow_frame,
- JValue* result,
- uint16_t number_of_inputs,
- uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
+IF_X86_OPTNONE_ELSE_ALWAYS_INLINE
+static bool DoCallCommon(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ JValue* result,
+ uint16_t number_of_inputs,
+ uint32_t (&arg)[Instruction::kMaxVarArgRegs],
+ uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool is_range>
ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index aeb438f05f..7ef3508164 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -255,17 +255,11 @@ static inline ObjPtr<mirror::String> ResolveString(Thread* self,
}
}
ArtMethod* method = shadow_frame.GetMethod();
- // MethodVerifier refuses methods with string_idx out of bounds.
- DCHECK_LT(string_idx.index_ % mirror::DexCache::kDexCacheStringCacheSize,
- method->GetDexFile()->NumStringIds());
- ObjPtr<mirror::String> string_ptr =
- mirror::StringDexCachePair::Lookup(method->GetDexCache()->GetStrings(),
- string_idx.index_,
- mirror::DexCache::kDexCacheStringCacheSize).Read();
+ ObjPtr<mirror::String> string_ptr = method->GetDexCache()->GetResolvedString(string_idx);
if (UNLIKELY(string_ptr == nullptr)) {
StackHandleScope<1> hs(self);
Handle<mirror::DexCache> dex_cache(hs.NewHandle(method->GetDexCache()));
- string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*method->GetDexFile(),
+ string_ptr = Runtime::Current()->GetClassLinker()->ResolveString(*dex_cache->GetDexFile(),
string_idx,
dex_cache);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 6336cddc07..45611a93f7 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -594,7 +594,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
return nullptr;
}
- DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsDebuggable())
+ DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
<< "Should not be using cha on debuggable apps/runs!";
for (ArtMethod* single_impl : cha_single_implementation_list) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f08d4daf95..85636fb5b1 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -951,7 +951,8 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3
return interfaces->Get(idx);
} else {
dex::TypeIndex type_idx = klass->GetDirectInterfaceTypeIdx(idx);
- ObjPtr<Class> interface = klass->GetDexCache()->GetResolvedType(type_idx);
+ ObjPtr<Class> interface = ClassLinker::LookupResolvedType(
+ type_idx, klass->GetDexCache(), klass->GetClassLoader());
return interface;
}
}
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index a59bb7b880..bef3ad29a3 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -40,14 +40,22 @@ inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
-inline mirror::String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+inline uint32_t DexCache::StringSlotIndex(dex::StringIndex string_idx) {
DCHECK_LT(string_idx.index_, GetDexFile()->NumStringIds());
- return StringDexCachePair::Lookup(GetStrings(), string_idx.index_, NumStrings()).Read();
+ const uint32_t slot_idx = string_idx.index_ % kDexCacheStringCacheSize;
+ DCHECK_LT(slot_idx, NumStrings());
+ return slot_idx;
}
-inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
- ObjPtr<mirror::String> resolved) {
- StringDexCachePair::Assign(GetStrings(), string_idx.index_, resolved.Ptr(), NumStrings());
+inline String* DexCache::GetResolvedString(dex::StringIndex string_idx) {
+ return GetStrings()[StringSlotIndex(string_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(string_idx.index_);
+}
+
+inline void DexCache::SetResolvedString(dex::StringIndex string_idx, ObjPtr<String> resolved) {
+ DCHECK(resolved != nullptr);
+ GetStrings()[StringSlotIndex(string_idx)].store(
+ StringDexCachePair(resolved, string_idx.index_), std::memory_order_relaxed);
Runtime* const runtime = Runtime::Current();
if (UNLIKELY(runtime->IsActiveTransaction())) {
DCHECK(runtime->IsAotCompiler());
@@ -58,50 +66,70 @@ inline void DexCache::SetResolvedString(dex::StringIndex string_idx,
}
inline void DexCache::ClearString(dex::StringIndex string_idx) {
- const uint32_t slot_idx = string_idx.index_ % NumStrings();
DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = StringSlotIndex(string_idx);
StringDexCacheType* slot = &GetStrings()[slot_idx];
// This is racy but should only be called from the transactional interpreter.
if (slot->load(std::memory_order_relaxed).index == string_idx.index_) {
- StringDexCachePair cleared(
- nullptr,
- StringDexCachePair::InvalidIndexForSlot(slot_idx));
+ StringDexCachePair cleared(nullptr, StringDexCachePair::InvalidIndexForSlot(slot_idx));
slot->store(cleared, std::memory_order_relaxed);
}
}
+inline uint32_t DexCache::TypeSlotIndex(dex::TypeIndex type_idx) {
+ DCHECK_LT(type_idx.index_, GetDexFile()->NumTypeIds());
+ const uint32_t slot_idx = type_idx.index_ % kDexCacheTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedTypes());
+ return slot_idx;
+}
+
inline Class* DexCache::GetResolvedType(dex::TypeIndex type_idx) {
// It is theorized that a load acquire is not required since obtaining the resolved class will
// always have an address dependency or a lock.
- DCHECK_LT(type_idx.index_, NumResolvedTypes());
- return GetResolvedTypes()[type_idx.index_].Read();
+ return GetResolvedTypes()[TypeSlotIndex(type_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(type_idx.index_);
}
inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) {
- DCHECK_LT(type_idx.index_, NumResolvedTypes()); // NOTE: Unchecked, i.e. not throwing AIOOB.
+ DCHECK(resolved != nullptr);
// TODO default transaction support.
// Use a release store for SetResolvedType. This is done to prevent other threads from seeing a
// class but not necessarily seeing the loaded members like the static fields array.
// See b/32075261.
- reinterpret_cast<Atomic<GcRoot<mirror::Class>>&>(GetResolvedTypes()[type_idx.index_]).
- StoreRelease(GcRoot<Class>(resolved));
+ GetResolvedTypes()[TypeSlotIndex(type_idx)].store(
+ TypeDexCachePair(resolved, type_idx.index_), std::memory_order_release);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
-inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
- DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
- DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
- return MethodTypeDexCachePair::Lookup(
- GetResolvedMethodTypes(), proto_idx, NumResolvedMethodTypes()).Read();
+inline void DexCache::ClearResolvedType(dex::TypeIndex type_idx) {
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ uint32_t slot_idx = TypeSlotIndex(type_idx);
+ TypeDexCacheType* slot = &GetResolvedTypes()[slot_idx];
+ // This is racy but should only be called from the single-threaded ImageWriter and tests.
+ if (slot->load(std::memory_order_relaxed).index == type_idx.index_) {
+ TypeDexCachePair cleared(nullptr, TypeDexCachePair::InvalidIndexForSlot(slot_idx));
+ slot->store(cleared, std::memory_order_relaxed);
+ }
}
-inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+inline uint32_t DexCache::MethodTypeSlotIndex(uint32_t proto_idx) {
DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
DCHECK_LT(proto_idx, GetDexFile()->NumProtoIds());
+ const uint32_t slot_idx = proto_idx % kDexCacheMethodTypeCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedMethodTypes());
+ return slot_idx;
+}
- MethodTypeDexCachePair::Assign(GetResolvedMethodTypes(), proto_idx, resolved,
- NumResolvedMethodTypes());
+inline MethodType* DexCache::GetResolvedMethodType(uint32_t proto_idx) {
+ return GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].load(
+ std::memory_order_relaxed).GetObjectForIndex(proto_idx);
+}
+
+inline void DexCache::SetResolvedMethodType(uint32_t proto_idx, MethodType* resolved) {
+ DCHECK(resolved != nullptr);
+ GetResolvedMethodTypes()[MethodTypeSlotIndex(proto_idx)].store(
+ MethodTypeDexCachePair(resolved, proto_idx), std::memory_order_relaxed);
// TODO: Fine-grained marking, so that we don't need to go through all arrays in full.
Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(this);
}
@@ -198,49 +226,49 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- VisitDexCachePairs<mirror::String, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
GetStrings(), NumStrings(), visitor);
- GcRoot<mirror::Class>* resolved_types = GetResolvedTypes();
- for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) {
- visitor.VisitRootIfNonNull(resolved_types[i].AddressWithoutBarrier());
- }
+ VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
+ GetResolvedTypes(), NumResolvedTypes(), visitor);
- VisitDexCachePairs<mirror::MethodType, kReadBarrierOption, Visitor>(
+ VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupStrings(mirror::StringDexCacheType* dest, const Visitor& visitor) {
- mirror::StringDexCacheType* src = GetStrings();
+inline void DexCache::FixupStrings(StringDexCacheType* dest, const Visitor& visitor) {
+ StringDexCacheType* src = GetStrings();
for (size_t i = 0, count = NumStrings(); i < count; ++i) {
StringDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::String* ptr = source.object.Read<kReadBarrierOption>();
- mirror::String* new_source = visitor(ptr);
+ String* ptr = source.object.Read<kReadBarrierOption>();
+ String* new_source = visitor(ptr);
source.object = GcRoot<String>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor) {
- GcRoot<mirror::Class>* src = GetResolvedTypes();
+inline void DexCache::FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor) {
+ TypeDexCacheType* src = GetResolvedTypes();
for (size_t i = 0, count = NumResolvedTypes(); i < count; ++i) {
- mirror::Class* source = src[i].Read<kReadBarrierOption>();
- mirror::Class* new_source = visitor(source);
- dest[i] = GcRoot<mirror::Class>(new_source);
+ TypeDexCachePair source = src[i].load(std::memory_order_relaxed);
+ Class* ptr = source.object.Read<kReadBarrierOption>();
+ Class* new_source = visitor(ptr);
+ source.object = GcRoot<Class>(new_source);
+ dest[i].store(source, std::memory_order_relaxed);
}
}
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
-inline void DexCache::FixupResolvedMethodTypes(mirror::MethodTypeDexCacheType* dest,
+inline void DexCache::FixupResolvedMethodTypes(MethodTypeDexCacheType* dest,
const Visitor& visitor) {
- mirror::MethodTypeDexCacheType* src = GetResolvedMethodTypes();
+ MethodTypeDexCacheType* src = GetResolvedMethodTypes();
for (size_t i = 0, count = NumResolvedMethodTypes(); i < count; ++i) {
MethodTypeDexCachePair source = src[i].load(std::memory_order_relaxed);
- mirror::MethodType* ptr = source.object.Read<kReadBarrierOption>();
- mirror::MethodType* new_source = visitor(ptr);
+ MethodType* ptr = source.object.Read<kReadBarrierOption>();
+ MethodType* new_source = visitor(ptr);
source.object = GcRoot<MethodType>(new_source);
dest[i].store(source, std::memory_order_relaxed);
}
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 741cf3bb47..3103a92c83 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -58,8 +58,8 @@ void DexCache::InitializeDexCache(Thread* self,
mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
reinterpret_cast<mirror::StringDexCacheType*>(raw_arrays + layout.StringsOffset());
- GcRoot<mirror::Class>* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
- reinterpret_cast<GcRoot<mirror::Class>*>(raw_arrays + layout.TypesOffset());
+ mirror::TypeDexCacheType* types = (dex_file->NumTypeIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
@@ -69,6 +69,10 @@ void DexCache::InitializeDexCache(Thread* self,
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
+ size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (dex_file->NumTypeIds() < num_types) {
+ num_types = dex_file->NumTypeIds();
+ }
// Note that we allocate the method type dex caches regardless of this flag,
// and we make sure here that they're not used by the runtime. This is in the
@@ -104,8 +108,9 @@ void DexCache::InitializeDexCache(Thread* self,
CHECK_EQ(strings[i].load(std::memory_order_relaxed).index, 0u);
CHECK(strings[i].load(std::memory_order_relaxed).object.IsNull());
}
- for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
- CHECK(types[i].IsNull());
+ for (size_t i = 0; i < num_types; ++i) {
+ CHECK_EQ(types[i].load(std::memory_order_relaxed).index, 0u);
+ CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
}
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
@@ -121,6 +126,9 @@ void DexCache::InitializeDexCache(Thread* self,
if (strings != nullptr) {
mirror::StringDexCachePair::Initialize(strings);
}
+ if (types != nullptr) {
+ mirror::TypeDexCachePair::Initialize(types);
+ }
if (method_types != nullptr) {
mirror::MethodTypeDexCachePair::Initialize(method_types);
}
@@ -129,7 +137,7 @@ void DexCache::InitializeDexCache(Thread* self,
strings,
num_strings,
types,
- dex_file->NumTypeIds(),
+ num_types,
methods,
dex_file->NumMethodIds(),
fields,
@@ -143,7 +151,7 @@ void DexCache::Init(const DexFile* dex_file,
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6f88cc5df4..e68b0c7219 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -18,14 +18,14 @@
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
#include "array.h"
-#include "art_field.h"
-#include "class.h"
+#include "base/bit_utils.h"
#include "dex_file_types.h"
#include "object.h"
#include "object_array.h"
namespace art {
+class ArtField;
class ArtMethod;
struct DexCacheOffsets;
class DexFile;
@@ -36,6 +36,7 @@ class Thread;
namespace mirror {
+class Class;
class MethodType;
class String;
@@ -60,7 +61,7 @@ template <typename T> struct PACKED(8) DexCachePair {
// it's always non-null if the id branch succeeds (except for the 0th id).
// Set the initial state for the 0th entry to be {0,1} which is guaranteed to fail
// the lookup id == stored id branch.
- DexCachePair(T* object, uint32_t index)
+ DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object),
index(index) {}
DexCachePair() = default;
@@ -74,39 +75,28 @@ template <typename T> struct PACKED(8) DexCachePair {
dex_cache[0].store(first_elem, std::memory_order_relaxed);
}
- static GcRoot<T> Lookup(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- uint32_t cache_size) {
- DCHECK_NE(cache_size, 0u);
- DexCachePair<T> element = dex_cache[idx % cache_size].load(std::memory_order_relaxed);
- if (idx != element.index) {
- return GcRoot<T>(nullptr);
- }
-
- DCHECK(!element.object.IsNull());
- return element.object;
- }
-
- static void Assign(std::atomic<DexCachePair<T>>* dex_cache,
- uint32_t idx,
- T* object,
- uint32_t cache_size) {
- DCHECK_LT(idx % cache_size, cache_size);
- dex_cache[idx % cache_size].store(
- DexCachePair<T>(object, idx), std::memory_order_relaxed);
- }
-
static uint32_t InvalidIndexForSlot(uint32_t slot) {
// Since the cache size is a power of two, 0 will always map to slot 0.
// Use 1 for slot 0 and 0 for all other slots.
return (slot == 0) ? 1u : 0u;
}
+
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(!object.IsNull());
+ return object.Read();
+ }
};
-using StringDexCachePair = DexCachePair<mirror::String>;
+using TypeDexCachePair = DexCachePair<Class>;
+using TypeDexCacheType = std::atomic<TypeDexCachePair>;
+
+using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
-using MethodTypeDexCachePair = DexCachePair<mirror::MethodType>;
+using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
// C++ mirror of java.lang.DexCache.
@@ -115,6 +105,11 @@ class MANAGED DexCache FINAL : public Object {
// Size of java.lang.DexCache.class.
static uint32_t ClassSize(PointerSize pointer_size);
+ // Size of type dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheTypeCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheTypeCacheSize),
+ "Type dex cache size is not a power of 2.");
+
// Size of string dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
static constexpr size_t kDexCacheStringCacheSize = 1024;
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
@@ -126,6 +121,10 @@ class MANAGED DexCache FINAL : public Object {
static_assert(IsPowerOfTwo(kDexCacheMethodTypeCacheSize),
"MethodType dex cache size is not a power of 2.");
+ static constexpr size_t StaticTypeSize() {
+ return kDexCacheTypeCacheSize;
+ }
+
static constexpr size_t StaticStringSize() {
return kDexCacheStringCacheSize;
}
@@ -156,7 +155,7 @@ class MANAGED DexCache FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
- void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
+ void FixupResolvedTypes(TypeDexCacheType* dest, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
@@ -211,7 +210,7 @@ class MANAGED DexCache FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(DexCache, num_resolved_method_types_);
}
- mirror::String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
+ String* GetResolvedString(dex::StringIndex string_idx) ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(dex::StringIndex string_idx, ObjPtr<mirror::String> resolved) ALWAYS_INLINE
@@ -226,6 +225,8 @@ class MANAGED DexCache FINAL : public Object {
void SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void ClearResolvedType(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -254,11 +255,11 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(StringsOffset(), strings);
}
- GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
+ TypeDexCacheType* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<TypeDexCacheType*>(ResolvedTypesOffset());
}
- void SetResolvedTypes(GcRoot<Class>* resolved_types)
+ void SetResolvedTypes(TypeDexCacheType* resolved_types)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
@@ -323,7 +324,7 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
- void SetLocation(ObjPtr<mirror::String> location) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetLocation(ObjPtr<String> location) REQUIRES_SHARED(Locks::mutator_lock_);
// NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
// provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -340,7 +341,7 @@ class MANAGED DexCache FINAL : public Object {
ObjPtr<String> location,
StringDexCacheType* strings,
uint32_t num_strings,
- GcRoot<Class>* resolved_types,
+ TypeDexCacheType* resolved_types,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
@@ -351,12 +352,16 @@ class MANAGED DexCache FINAL : public Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
- void VisitReferences(ObjPtr<mirror::Class> klass, const Visitor& visitor)
+ void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
@@ -366,7 +371,7 @@ class MANAGED DexCache FINAL : public Object {
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
- uint64_t resolved_types_; // GcRoot<Class>*, array with num_resolved_types_ elements.
+ uint64_t resolved_types_; // TypeDexCacheType*, array with num_resolved_types_ elements.
uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
// elements.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index 8f978e122c..5693f67646 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -51,7 +51,8 @@ TEST_F(DexCacheTest, Open) {
EXPECT_TRUE(dex_cache->StaticStringSize() == dex_cache->NumStrings()
|| java_lang_dex_file_->NumStringIds() == dex_cache->NumStrings());
- EXPECT_EQ(java_lang_dex_file_->NumTypeIds(), dex_cache->NumResolvedTypes());
+ EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
+ || java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 10fc90bc27..fd22d9e646 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -71,7 +71,7 @@ static void EnableDebugger() {
static void EnableDebugFeatures(uint32_t debug_flags) {
// Must match values in com.android.internal.os.Zygote.
enum {
- DEBUG_ENABLE_DEBUGGER = 1,
+ DEBUG_ENABLE_JDWP = 1,
DEBUG_ENABLE_CHECKJNI = 1 << 1,
DEBUG_ENABLE_ASSERT = 1 << 2,
DEBUG_ENABLE_SAFEMODE = 1 << 3,
@@ -79,6 +79,7 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
DEBUG_GENERATE_DEBUG_INFO = 1 << 5,
DEBUG_ALWAYS_JIT = 1 << 6,
DEBUG_NATIVE_DEBUGGABLE = 1 << 7,
+ DEBUG_JAVA_DEBUGGABLE = 1 << 8,
};
Runtime* const runtime = Runtime::Current();
@@ -100,11 +101,11 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ENABLE_JNI_LOGGING;
}
- Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0);
- if ((debug_flags & DEBUG_ENABLE_DEBUGGER) != 0) {
+ Dbg::SetJdwpAllowed((debug_flags & DEBUG_ENABLE_JDWP) != 0);
+ if ((debug_flags & DEBUG_ENABLE_JDWP) != 0) {
EnableDebugger();
}
- debug_flags &= ~DEBUG_ENABLE_DEBUGGER;
+ debug_flags &= ~DEBUG_ENABLE_JDWP;
const bool safe_mode = (debug_flags & DEBUG_ENABLE_SAFEMODE) != 0;
if (safe_mode) {
@@ -130,6 +131,14 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
debug_flags &= ~DEBUG_ALWAYS_JIT;
}
+ if ((debug_flags & DEBUG_JAVA_DEBUGGABLE) != 0) {
+ runtime->AddCompilerOption("--debuggable");
+ runtime->SetJavaDebuggable(true);
+ // Deoptimize the boot image as it may be non-debuggable.
+ runtime->DeoptimizeBootImage();
+ debug_flags &= ~DEBUG_JAVA_DEBUGGABLE;
+ }
+
if ((debug_flags & DEBUG_NATIVE_DEBUGGABLE) != 0) {
runtime->AddCompilerOption("--debuggable");
runtime->AddCompilerOption("--generate-debug-info");
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index f1c350f23c..0b667fec45 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -53,7 +53,7 @@ static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
+ CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index)));
}
@@ -69,8 +69,11 @@ static void DexCache_setResolvedType(JNIEnv* env, jobject javaDexCache, jint typ
jobject type) {
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->NumResolvedTypes());
- dex_cache->SetResolvedType(dex::TypeIndex(type_index), soa.Decode<mirror::Class>(type));
+ CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
+ ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type);
+ if (t != nullptr) {
+ dex_cache->SetResolvedType(dex::TypeIndex(type_index), t);
+ }
}
static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
@@ -78,7 +81,10 @@ static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint s
ScopedFastNativeObjectAccess soa(env);
ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- dex_cache->SetResolvedString(dex::StringIndex(string_index), soa.Decode<mirror::String>(string));
+ ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string);
+ if (s != nullptr) {
+ dex_cache->SetResolvedString(dex::StringIndex(string_index), s);
+ }
}
static JNINativeMethod gMethods[] = {
diff --git a/runtime/oat.h b/runtime/oat.h
index 106bd4096f..62f010ba97 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '0', '5', '\0' }; // Stack map alignment change.
+ static constexpr uint8_t kOatVersion[] = { '1', '0', '6', '\0' }; // hash-based DexCache types
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 8554fa2693..b19ace5464 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -530,7 +530,7 @@ bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
class_path = OatFile::kSpecialSharedLibrary;
}
argv.push_back(class_path);
- if (runtime->IsDebuggable()) {
+ if (runtime->IsJavaDebuggable()) {
argv.push_back("--debuggable");
}
runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
diff --git a/runtime/openjdkjvmti/ti_class_loader.cc b/runtime/openjdkjvmti/ti_class_loader.cc
index b68fc60c6c..c2f17924da 100644
--- a/runtime/openjdkjvmti/ti_class_loader.cc
+++ b/runtime/openjdkjvmti/ti_class_loader.cc
@@ -61,9 +61,14 @@ namespace openjdkjvmti {
bool ClassLoaderHelper::AddToClassLoader(art::Thread* self,
art::Handle<art::mirror::ClassLoader> loader,
const art::DexFile* dex_file) {
+ art::ScopedObjectAccessUnchecked soa(self);
art::StackHandleScope<2> hs(self);
- art::Handle<art::mirror::Object> java_dex_file_obj(hs.NewHandle(FindSourceDexFileObject(self,
- loader)));
+ if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file);
+ return true;
+ }
+ art::Handle<art::mirror::Object> java_dex_file_obj(
+ hs.NewHandle(FindSourceDexFileObject(self, loader)));
if (java_dex_file_obj.IsNull()) {
return false;
}
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index b76d74ae79..4b8108accf 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -75,9 +75,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
StackVisitor::StackWalkKind::kIncludeInlinedFrames),
allocator_(allocator),
obsoleted_methods_(obsoleted_methods),
- obsolete_maps_(obsolete_maps),
- is_runtime_frame_(false) {
- }
+ obsolete_maps_(obsolete_maps) { }
~ObsoleteMethodStackVisitor() OVERRIDE {}
@@ -100,21 +98,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
bool VisitFrame() OVERRIDE REQUIRES(art::Locks::mutator_lock_) {
art::ArtMethod* old_method = GetMethod();
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool prev_was_runtime_frame_ = is_runtime_frame_;
- is_runtime_frame_ = old_method->IsRuntimeMethod();
if (obsoleted_methods_.find(old_method) != obsoleted_methods_.end()) {
- // The check below works since when we deoptimize we set shadow frames for all frames until a
- // native/runtime transition and for those set the return PC to a function that will complete
- // the deoptimization. This does leave us with the unfortunate side-effect that frames just
- // below runtime frames cannot be deoptimized at the moment.
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- // TODO b/33616143
- if (!IsShadowFrame() && prev_was_runtime_frame_) {
- LOG(FATAL) << "Deoptimization failed due to runtime method in stack. See b/33616143";
- }
// We cannot ensure that the right dex file is used in inlined frames so we don't support
// redefining them.
DCHECK(!IsInInlinedFrame()) << "Inlined frames are not supported when using redefinition";
@@ -163,9 +147,6 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
// values in this map must be added to the obsolete_methods_ (and obsolete_dex_caches_) fields of
// the redefined classes ClassExt by the caller.
std::unordered_map<art::ArtMethod*, art::ArtMethod*>* obsolete_maps_;
- // TODO REMOVE once either current_method doesn't stick around through suspend points or deopt
- // works through runtime methods.
- bool is_runtime_frame_;
};
jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
@@ -464,7 +445,8 @@ void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::C
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
art::mirror::ClassExt* ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
- CallbackCtx ctx(art_klass->GetClassLoader()->GetAllocator());
+ art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
+ CallbackCtx ctx(linker->GetAllocatorForClassLoader(art_klass->GetClassLoader()));
// Add all the declared methods to the map
for (auto& m : art_klass->GetDeclaredMethods(art::kRuntimePointerSize)) {
ctx.obsolete_methods.insert(&m);
@@ -508,18 +490,6 @@ void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
}
}
-// TODO It should be possible to only deoptimize the specific obsolete methods.
-// TODO ReJitEverything can (sort of) fail. In certain cases it will skip deoptimizing some frames.
-// If one of these frames is an obsolete method we have a problem. b/33616143
-// TODO This shouldn't be necessary once we can ensure that the current method is not kept in
-// registers across suspend points.
-// TODO Pending b/33630159
-void Redefiner::EnsureObsoleteMethodsAreDeoptimized() {
- art::ScopedAssertNoThreadSuspension nts("Deoptimizing everything!");
- art::instrumentation::Instrumentation* i = runtime_->GetInstrumentation();
- i->ReJitEverything("libOpenJkdJvmti - Class Redefinition");
-}
-
bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
art::StackHandleScope<1> hs(driver_->self_);
@@ -733,33 +703,32 @@ class RedefinitionDataHolder {
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
holder->SetMirrorClass(klass_index, GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
- holder->SetSourceClassLoader(klass_index, loader.Get());
- if (loader.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
- ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
- if (dex_file_obj.Get() == nullptr) {
- // TODO Better error msg.
- RecordFailure(ERR(INTERNAL), "Unable to find class loader!");
- return false;
- }
- holder->SetNewDexFileCookie(klass_index,
- ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
- dex_file_obj,
- dex_file_.get()).Ptr());
- if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
- driver_->self_->AssertPendingOOMException();
- driver_->self_->ClearException();
- RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
- return false;
+ // The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
+ if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
+ holder->SetSourceClassLoader(klass_index, loader.Get());
+ art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
+ ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
+ holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ if (dex_file_obj.Get() == nullptr) {
+ // TODO Better error msg.
+ RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
+ return false;
+ }
+ holder->SetNewDexFileCookie(klass_index,
+ ClassLoaderHelper::AllocateNewDexFileCookie(driver_->self_,
+ dex_file_obj,
+ dex_file_.get()).Ptr());
+ if (holder->GetNewDexFileCookie(klass_index) == nullptr) {
+ driver_->self_->AssertPendingOOMException();
+ driver_->self_->ClearException();
+ RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
+ return false;
+ }
}
holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
if (holder->GetNewDexCache(klass_index) == nullptr) {
@@ -846,6 +815,13 @@ jvmtiError Redefiner::Run() {
// cleaned up by the GC eventually.
return result_;
}
+ int32_t counter = 0;
+ for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ if (holder.GetSourceClassLoader(counter) == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ }
+ counter++;
+ }
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
art::gc::Heap* heap = runtime_->GetHeap();
@@ -864,26 +840,20 @@ jvmtiError Redefiner::Run() {
// TODO We need to update all debugger MethodIDs so they note the method they point to is
// obsolete or implement some other well defined semantics.
// TODO We need to decide on & implement semantics for JNI jmethodids when we redefine methods.
- int32_t cnt = 0;
+ counter = 0;
for (Redefiner::ClassRedefinition& redef : redefinitions_) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- art::mirror::Class* klass = holder.GetMirrorClass(cnt);
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(cnt),
- holder.GetNewDexFileCookie(cnt));
+ if (holder.GetSourceClassLoader(counter) != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
+ holder.GetNewDexFileCookie(counter));
+ }
+ art::mirror::Class* klass = holder.GetMirrorClass(counter);
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(cnt), holder.GetOriginalDexFileBytes(cnt));
- cnt++;
+ redef.UpdateClass(klass, holder.GetNewDexCache(counter),
+ holder.GetOriginalDexFileBytes(counter));
+ counter++;
}
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethod's stashed in registers that they then use to attempt to hit the
- // DexCache. (b/33630159)
- // TODO This can fail (leave some methods optimized) near runtime methods (including
- // quick-to-interpreter transition function).
- // TODO We probably don't need this at all once we have a way to ensure that the
- // current_art_method is never stashed in a (physical) register by the JIT and lost to the
- // stack-walker.
- EnsureObsoleteMethodsAreDeoptimized();
// TODO Verify the new Class.
// TODO Shrink the obsolete method maps if possible?
// TODO find appropriate class loader.
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 85df6e1024..5bcaef8971 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -127,6 +127,10 @@ class Redefiner {
art::mirror::Class* GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
art::mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const art::DexFile& GetDexFile() {
+ return *dex_file_;
+ }
+
art::mirror::DexCache* CreateNewDexCache(art::Handle<art::mirror::ClassLoader> loader)
REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -239,14 +243,6 @@ class Redefiner {
REQUIRES_SHARED(art::Locks::mutator_lock_);
void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Ensure that obsolete methods are deoptimized. This is needed since optimized methods may have
- // pointers to their ArtMethods stashed in registers that they then use to attempt to hit the
- // DexCache.
- void EnsureObsoleteMethodsAreDeoptimized()
- REQUIRES(art::Locks::mutator_lock_)
- REQUIRES(!art::Locks::thread_list_lock_,
- !art::Locks::classlinker_classes_lock_);
-
void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
void RecordFailure(jvmtiError result, const std::string& error_msg) {
RecordFailure(result, "NO CLASS", error_msg);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index ccc5f7a8ee..9113f83cd4 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -300,8 +300,6 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.Define("-Xplugin:_")
.WithType<std::vector<Plugin>>().AppendValues()
.IntoKey(M::Plugins)
- .Define("-Xfully-deoptable")
- .IntoKey(M::FullyDeoptable)
.Define("-XX:ThreadSuspendTimeout=_") // in ms
.WithType<MillisecondsToNanoseconds>() // store as ns
.IntoKey(M::ThreadSuspendTimeout)
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 8d758a4a4b..4e76951189 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -347,9 +347,11 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
callee_method_ = method;
return true;
} else if (!single_frame_deopt_ &&
- !Runtime::Current()->IsDeoptimizeable(GetCurrentQuickFramePc())) {
+ !Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
// We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered
// from compiled code is always allowed since HDeoptimize always saves the full environment.
+ LOG(WARNING) << "Got request to deoptimize un-deoptimizable method "
+ << method->PrettyMethod();
FinishStackWalk();
return false; // End stack walk.
} else {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index b30e5109b6..693b8f4e2f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -245,7 +245,7 @@ Runtime::Runtime()
force_native_bridge_(false),
is_native_bridge_loaded_(false),
is_native_debuggable_(false),
- is_fully_deoptable_(false),
+ is_java_debuggable_(false),
zygote_max_failed_boots_(0),
experimental_flags_(ExperimentalFlags::kNone),
oat_file_manager_(nullptr),
@@ -826,14 +826,6 @@ bool Runtime::IsShuttingDown(Thread* self) {
return IsShuttingDownLocked();
}
-bool Runtime::IsDebuggable() const {
- if (IsFullyDeoptable()) {
- return true;
- }
- const OatFile* oat_file = GetOatFileManager().GetPrimaryOatFile();
- return oat_file != nullptr && oat_file->IsDebuggable();
-}
-
void Runtime::StartDaemonThreads() {
ScopedTrace trace(__FUNCTION__);
VLOG(startup) << "Runtime::StartDaemonThreads entering";
@@ -1039,6 +1031,12 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
+ for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ if (option.starts_with("--debuggable")) {
+ SetJavaDebuggable(true);
+ break;
+ }
+ }
image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
image_location_ = runtime_options.GetOrDefault(Opt::Image);
@@ -1053,8 +1051,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
- is_fully_deoptable_ = runtime_options.Exists(Opt::FullyDeoptable);
-
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1259,6 +1255,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
ScopedTrace trace2("AddImageStringsToTable");
GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
}
+ if (IsJavaDebuggable()) {
+ // Now that we have loaded the boot image, deoptimize its methods if we are running
+ // debuggable, as the code may have been compiled non-debuggable.
+ DeoptimizeBootImage();
+ }
} else {
std::vector<std::string> dex_filenames;
Split(boot_class_path_string_, ':', &dex_filenames);
@@ -1405,7 +1406,7 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
}
// Is the process debuggable? Otherwise, do not attempt to load the plugin.
- if (!runtime->IsDebuggable()) {
+ if (!runtime->IsJavaDebuggable()) {
*error_msg = "Process is not debuggable.";
return false;
}
@@ -2206,9 +2207,15 @@ bool Runtime::IsVerificationSoftFail() const {
return verify_ == verifier::VerifyMode::kSoftFail;
}
-bool Runtime::IsDeoptimizeable(uintptr_t code) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
+bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
+ // We only support async deopt (ie the compiled code is not explicitly asking for
+ // deopt, but something else like the debugger) in debuggable JIT code.
+ // We could look at the oat file where `code` is being defined,
+ // and check whether it's been compiled debuggable, but we decided to
+ // only rely on the JIT for debuggable apps.
+ return IsJavaDebuggable() &&
+ GetJit() != nullptr &&
+ GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
}
LinearAlloc* Runtime::CreateLinearAlloc() {
@@ -2292,4 +2299,43 @@ RuntimeCallbacks* Runtime::GetRuntimeCallbacks() {
return callbacks_.get();
}
+// Used to patch boot image method entry point to interpreter bridge.
+class UpdateEntryPointsClassVisitor : public ClassVisitor {
+ public:
+ explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
+ : instrumentation_(instrumentation) {}
+
+ bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
+ auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+ for (auto& m : klass->GetMethods(pointer_size)) {
+ const void* code = m.GetEntryPointFromQuickCompiledCode();
+ if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
+ !m.IsNative() &&
+ !m.IsProxyMethod()) {
+ instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
+ }
+ }
+ return true;
+ }
+
+ private:
+ instrumentation::Instrumentation* const instrumentation_;
+};
+
+void Runtime::SetJavaDebuggable(bool value) {
+ is_java_debuggable_ = value;
+ // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
+}
+
+void Runtime::DeoptimizeBootImage() {
+ // If we've already started and we are setting this runtime to debuggable,
+ // we patch entry points of methods in boot image to interpreter bridge, as
+ // boot image code may be AOT compiled as not debuggable.
+ if (!GetInstrumentation()->IsForcedInterpretOnly()) {
+ ScopedObjectAccess soa(Thread::Current());
+ UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
+ GetClassLinker()->VisitClasses(&visitor);
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index f7d6810ff5..30b1756d5d 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -434,7 +434,7 @@ class Runtime {
kInitialize
};
- jit::Jit* GetJit() {
+ jit::Jit* GetJit() const {
return jit_.get();
}
@@ -569,15 +569,14 @@ class Runtime {
return jit_options_.get();
}
- bool IsDebuggable() const;
-
- bool IsFullyDeoptable() const {
- return is_fully_deoptable_;
+ bool IsJavaDebuggable() const {
+ return is_java_debuggable_;
}
- void SetFullyDeoptable(bool value) {
- is_fully_deoptable_ = value;
- }
+ void SetJavaDebuggable(bool value);
+
+ // Deoptimize the boot image, called for Java debuggable apps.
+ void DeoptimizeBootImage();
bool IsNativeDebuggable() const {
return is_native_debuggable_;
@@ -639,9 +638,9 @@ class Runtime {
return zygote_no_threads_;
}
- // Returns if the code can be deoptimized. Code may be compiled with some
+ // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
@@ -863,8 +862,8 @@ class Runtime {
// Whether we are running under native debugger.
bool is_native_debuggable_;
- // Whether we are expected to be deoptable at all points.
- bool is_fully_deoptable_;
+ // Whether Java code needs to be debuggable.
+ bool is_java_debuggable_;
// The maximum number of failed boots we allow before pruning the dalvik cache
// and trying again. This option is only inspected when we're running as a
diff --git a/runtime/runtime_android.cc b/runtime/runtime_android.cc
index 0a996a9e55..495296cf7d 100644
--- a/runtime/runtime_android.cc
+++ b/runtime/runtime_android.cc
@@ -14,56 +14,33 @@
* limitations under the License.
*/
+#include "runtime.h"
+
#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
-#include "base/logging.h"
-#include "base/mutex.h"
-#include "thread-inl.h"
-#include "utils.h"
+#include <cstring>
-namespace art {
+#include "runtime_common.h"
-static constexpr bool kUseSignalHandler = false;
+namespace art {
struct sigaction old_action;
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handling_unexpected_signal = false;
- if (handling_unexpected_signal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- _exit(1);
- }
- handling_unexpected_signal = true;
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- // Print this out first in case DumpObject faults.
- LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
- }
+void HandleUnexpectedSignalAndroid(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ false);
+
// Run the old signal handler.
old_action.sa_sigaction(signal_number, info, raw_context);
}
void Runtime::InitPlatformSignalHandlers() {
- if (kUseSignalHandler) {
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
- int rc = 0;
- rc += sigaction(SIGSEGV, &action, &old_action);
- CHECK_EQ(rc, 0);
+ // Enable the signal handler dumping crash information to the logcat
+ // when the Android root is not "/system".
+ const char* android_root = getenv("ANDROID_ROOT");
+ if (android_root != nullptr && strcmp(android_root, "/system") != 0) {
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalAndroid,
+ &old_action,
+ /* handle_timeout_signal */ false);
}
}
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
new file mode 100644
index 0000000000..70aff37961
--- /dev/null
+++ b/runtime/runtime_common.cc
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime_common.h"
+
+#include <signal.h>
+
+#include <cinttypes>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include "android-base/stringprintf.h"
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "native_stack_dump.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+using android::base::StringPrintf;
+
+static constexpr bool kUseSigRTTimeout = true;
+static constexpr bool kDumpNativeStackOnTimeout = true;
+
+const char* GetSignalName(int signal_number) {
+ switch (signal_number) {
+ case SIGABRT: return "SIGABRT";
+ case SIGBUS: return "SIGBUS";
+ case SIGFPE: return "SIGFPE";
+ case SIGILL: return "SIGILL";
+ case SIGPIPE: return "SIGPIPE";
+ case SIGSEGV: return "SIGSEGV";
+#if defined(SIGSTKFLT)
+ case SIGSTKFLT: return "SIGSTKFLT";
+#endif
+ case SIGTRAP: return "SIGTRAP";
+ }
+ return "??";
+}
+
+const char* GetSignalCodeName(int signal_number, int signal_code) {
+ // Try the signal-specific codes...
+ switch (signal_number) {
+ case SIGILL:
+ switch (signal_code) {
+ case ILL_ILLOPC: return "ILL_ILLOPC";
+ case ILL_ILLOPN: return "ILL_ILLOPN";
+ case ILL_ILLADR: return "ILL_ILLADR";
+ case ILL_ILLTRP: return "ILL_ILLTRP";
+ case ILL_PRVOPC: return "ILL_PRVOPC";
+ case ILL_PRVREG: return "ILL_PRVREG";
+ case ILL_COPROC: return "ILL_COPROC";
+ case ILL_BADSTK: return "ILL_BADSTK";
+ }
+ break;
+ case SIGBUS:
+ switch (signal_code) {
+ case BUS_ADRALN: return "BUS_ADRALN";
+ case BUS_ADRERR: return "BUS_ADRERR";
+ case BUS_OBJERR: return "BUS_OBJERR";
+ }
+ break;
+ case SIGFPE:
+ switch (signal_code) {
+ case FPE_INTDIV: return "FPE_INTDIV";
+ case FPE_INTOVF: return "FPE_INTOVF";
+ case FPE_FLTDIV: return "FPE_FLTDIV";
+ case FPE_FLTOVF: return "FPE_FLTOVF";
+ case FPE_FLTUND: return "FPE_FLTUND";
+ case FPE_FLTRES: return "FPE_FLTRES";
+ case FPE_FLTINV: return "FPE_FLTINV";
+ case FPE_FLTSUB: return "FPE_FLTSUB";
+ }
+ break;
+ case SIGSEGV:
+ switch (signal_code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+#if defined(SEGV_BNDERR)
+ case SEGV_BNDERR: return "SEGV_BNDERR";
+#endif
+ }
+ break;
+ case SIGTRAP:
+ switch (signal_code) {
+ case TRAP_BRKPT: return "TRAP_BRKPT";
+ case TRAP_TRACE: return "TRAP_TRACE";
+ }
+ break;
+ }
+ // Then the other codes...
+ switch (signal_code) {
+ case SI_USER: return "SI_USER";
+#if defined(SI_KERNEL)
+ case SI_KERNEL: return "SI_KERNEL";
+#endif
+ case SI_QUEUE: return "SI_QUEUE";
+ case SI_TIMER: return "SI_TIMER";
+ case SI_MESGQ: return "SI_MESGQ";
+ case SI_ASYNCIO: return "SI_ASYNCIO";
+#if defined(SI_SIGIO)
+ case SI_SIGIO: return "SI_SIGIO";
+#endif
+#if defined(SI_TKILL)
+ case SI_TKILL: return "SI_TKILL";
+#endif
+ }
+ // Then give up...
+ return "?";
+}
+
+struct UContext {
+ explicit UContext(void* raw_context)
+ : context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {}
+
+ void Dump(std::ostream& os) const;
+
+ void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const;
+ void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const;
+
+ void DumpX86Flags(std::ostream& os, uint32_t flags) const;
+
+ mcontext_t& context;
+};
+
+void UContext::Dump(std::ostream& os) const {
+ // TODO: support non-x86 hosts.
+#if defined(__APPLE__) && defined(__i386__)
+ DumpRegister32(os, "eax", context->__ss.__eax);
+ DumpRegister32(os, "ebx", context->__ss.__ebx);
+ DumpRegister32(os, "ecx", context->__ss.__ecx);
+ DumpRegister32(os, "edx", context->__ss.__edx);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context->__ss.__edi);
+ DumpRegister32(os, "esi", context->__ss.__esi);
+ DumpRegister32(os, "ebp", context->__ss.__ebp);
+ DumpRegister32(os, "esp", context->__ss.__esp);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context->__ss.__eip);
+ os << " ";
+ DumpRegister32(os, "eflags", context->__ss.__eflags);
+ DumpX86Flags(os, context->__ss.__eflags);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context->__ss.__cs);
+ DumpRegister32(os, "ds", context->__ss.__ds);
+ DumpRegister32(os, "es", context->__ss.__es);
+ DumpRegister32(os, "fs", context->__ss.__fs);
+ os << '\n';
+ DumpRegister32(os, "gs", context->__ss.__gs);
+ DumpRegister32(os, "ss", context->__ss.__ss);
+#elif defined(__linux__) && defined(__i386__)
+ DumpRegister32(os, "eax", context.gregs[REG_EAX]);
+ DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
+ DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
+ DumpRegister32(os, "edx", context.gregs[REG_EDX]);
+ os << '\n';
+
+ DumpRegister32(os, "edi", context.gregs[REG_EDI]);
+ DumpRegister32(os, "esi", context.gregs[REG_ESI]);
+ DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
+ DumpRegister32(os, "esp", context.gregs[REG_ESP]);
+ os << '\n';
+
+ DumpRegister32(os, "eip", context.gregs[REG_EIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", context.gregs[REG_CS]);
+ DumpRegister32(os, "ds", context.gregs[REG_DS]);
+ DumpRegister32(os, "es", context.gregs[REG_ES]);
+ DumpRegister32(os, "fs", context.gregs[REG_FS]);
+ os << '\n';
+ DumpRegister32(os, "gs", context.gregs[REG_GS]);
+ DumpRegister32(os, "ss", context.gregs[REG_SS]);
+#elif defined(__linux__) && defined(__x86_64__)
+ DumpRegister64(os, "rax", context.gregs[REG_RAX]);
+ DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
+ DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
+ DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
+ os << '\n';
+
+ DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
+ DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
+ DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
+ DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
+ os << '\n';
+
+ DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
+ DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
+ DumpRegister64(os, "r10", context.gregs[REG_R10]);
+ DumpRegister64(os, "r11", context.gregs[REG_R11]);
+ os << '\n';
+
+ DumpRegister64(os, "r12", context.gregs[REG_R12]);
+ DumpRegister64(os, "r13", context.gregs[REG_R13]);
+ DumpRegister64(os, "r14", context.gregs[REG_R14]);
+ DumpRegister64(os, "r15", context.gregs[REG_R15]);
+ os << '\n';
+
+ DumpRegister64(os, "rip", context.gregs[REG_RIP]);
+ os << " ";
+ DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
+ DumpX86Flags(os, context.gregs[REG_EFL]);
+ os << '\n';
+
+ DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
+ DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
+ DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
+ os << '\n';
+#else
+ os << "Unknown architecture/word size/OS in ucontext dump";
+#endif
+}
+
+void UContext::DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
+ os << StringPrintf(" %6s: 0x%08x", name, value);
+}
+
+void UContext::DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
+ os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
+}
+
+void UContext::DumpX86Flags(std::ostream& os, uint32_t flags) const {
+ os << " [";
+ if ((flags & (1 << 0)) != 0) {
+ os << " CF";
+ }
+ if ((flags & (1 << 2)) != 0) {
+ os << " PF";
+ }
+ if ((flags & (1 << 4)) != 0) {
+ os << " AF";
+ }
+ if ((flags & (1 << 6)) != 0) {
+ os << " ZF";
+ }
+ if ((flags & (1 << 7)) != 0) {
+ os << " SF";
+ }
+ if ((flags & (1 << 8)) != 0) {
+ os << " TF";
+ }
+ if ((flags & (1 << 9)) != 0) {
+ os << " IF";
+ }
+ if ((flags & (1 << 10)) != 0) {
+ os << " DF";
+ }
+ if ((flags & (1 << 11)) != 0) {
+ os << " OF";
+ }
+ os << " ]";
+}
+
+int GetTimeoutSignal() {
+#if defined(__APPLE__)
+ // Mac does not support realtime signals.
+ UNUSED(kUseSigRTTimeout);
+ return -1;
+#else
+ return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
+#endif
+}
+
+static bool IsTimeoutSignal(int signal_number) {
+ return signal_number == GetTimeoutSignal();
+}
+
+#if defined(__APPLE__)
+// On macOS, clang complains about art::HandleUnexpectedSignalCommon's
+// stack frame size being too large; disable that warning locally.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wframe-larger-than="
+#endif
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux) {
+ bool handle_timeout_signal = running_on_linux;
+ bool dump_on_stderr = running_on_linux;
+
+ static bool handling_unexpected_signal = false;
+ if (handling_unexpected_signal) {
+ LogHelper::LogLineLowStack(__FILE__,
+ __LINE__,
+ ::android::base::FATAL_WITHOUT_ABORT,
+ "HandleUnexpectedSignal reentered\n");
+ if (handle_timeout_signal) {
+ if (IsTimeoutSignal(signal_number)) {
+ // Ignore a recursive timeout.
+ return;
+ }
+ }
+ _exit(1);
+ }
+ handling_unexpected_signal = true;
+
+ gAborting++; // set before taking any locks
+ MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
+
+ bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
+ signal_number == SIGFPE || signal_number == SIGSEGV);
+
+ OsInfo os_info;
+ const char* cmd_line = GetCmdLine();
+ if (cmd_line == nullptr) {
+ cmd_line = "<unset>"; // Because no-one called InitLogging.
+ }
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
+ UContext thread_context(raw_context);
+ Backtrace thread_backtrace(raw_context);
+
+ std::ostringstream stream;
+ stream << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
+ << StringPrintf("Fatal signal %d (%s), code %d (%s)",
+ signal_number,
+ GetSignalName(signal_number),
+ info->si_code,
+ GetSignalCodeName(signal_number, info->si_code))
+ << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << '\n'
+ << "OS: " << Dumpable<OsInfo>(os_info) << '\n'
+ << "Cmdline: " << cmd_line << '\n'
+ << "Thread: " << tid << " \"" << thread_name << "\"" << '\n'
+ << "Registers:\n" << Dumpable<UContext>(thread_context) << '\n'
+ << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << '\n';
+ if (dump_on_stderr) {
+ // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
+ // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
+ // considering this is an abort situation.
+ std::cerr << stream.str() << std::flush;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << stream.str() << std::flush;
+ }
+ if (kIsDebugBuild && signal_number == SIGSEGV) {
+ PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
+ }
+
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ if (handle_timeout_signal && IsTimeoutSignal(signal_number)) {
+ // Special timeout signal. Try to dump all threads.
+ // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
+ // are of value here.
+ runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
+ std::cerr << std::endl;
+ }
+
+ if (dump_on_stderr) {
+ std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
+ } else {
+ LOG(FATAL_WITHOUT_ABORT) << "Fault message: " << runtime->GetFaultMessage();
+ }
+ }
+}
+
+#if defined(__APPLE__)
+#pragma GCC diagnostic pop
+#endif
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal) {
+ struct sigaction action;
+ memset(&action, 0, sizeof(action));
+ sigemptyset(&action.sa_mask);
+ action.sa_sigaction = newact;
+ // Use the three-argument sa_sigaction handler.
+ action.sa_flags |= SA_SIGINFO;
+ // Use the alternate signal stack so we can catch stack overflows.
+ action.sa_flags |= SA_ONSTACK;
+
+ int rc = 0;
+ rc += sigaction(SIGABRT, &action, oldact);
+ rc += sigaction(SIGBUS, &action, oldact);
+ rc += sigaction(SIGFPE, &action, oldact);
+ rc += sigaction(SIGILL, &action, oldact);
+ rc += sigaction(SIGPIPE, &action, oldact);
+ rc += sigaction(SIGSEGV, &action, oldact);
+#if defined(SIGSTKFLT)
+ rc += sigaction(SIGSTKFLT, &action, oldact);
+#endif
+ rc += sigaction(SIGTRAP, &action, oldact);
+ // Special dump-all timeout.
+ if (handle_timeout_signal && GetTimeoutSignal() != -1) {
+ rc += sigaction(GetTimeoutSignal(), &action, oldact);
+ }
+ CHECK_EQ(rc, 0);
+}
+
+} // namespace art
diff --git a/runtime/runtime_common.h b/runtime/runtime_common.h
new file mode 100644
index 0000000000..832b6bbf3e
--- /dev/null
+++ b/runtime/runtime_common.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_RUNTIME_COMMON_H_
+#define ART_RUNTIME_RUNTIME_COMMON_H_
+
+// Code shared by runtime/runtime_android.cc and runtime/runtime_linux.cc.
+
+#if defined(__APPLE__)
+// On macOS, _XOPEN_SOURCE must be defined to access ucontext
+// routines, as they are considered deprecated on that platform.
+#define _XOPEN_SOURCE
+#endif
+
+#include <sys/utsname.h>
+#include <ucontext.h>
+
+#include <iomanip>
+
+#include "base/dumpable.h"
+#include "native_stack_dump.h"
+#include "utils.h"
+
+namespace art {
+
+struct Backtrace {
+ public:
+ explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
+ void Dump(std::ostream& os) const {
+ DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
+ }
+ private:
+ // Stores the context of the signal that was unexpected and will terminate the runtime. The
+ // DumpNativeStack code will take care of casting it to the expected type. This is required
+ // as our signal handler runs on an alternate stack.
+ void* raw_context_;
+};
+
+struct OsInfo {
+ void Dump(std::ostream& os) const {
+ utsname info;
+ uname(&info);
+ // Linux 2.6.38.8-gg784 (x86_64)
+ // Darwin 11.4.0 (x86_64)
+ os << info.sysname << " " << info.release << " (" << info.machine << ")";
+ }
+};
+
+const char* GetSignalName(int signal_number);
+const char* GetSignalCodeName(int signal_number, int signal_code);
+
+// Return the signal number we recognize as timeout. -1 means not active/supported.
+int GetTimeoutSignal();
+
+void HandleUnexpectedSignalCommon(int signal_number,
+ siginfo_t* info,
+ void* raw_context,
+ bool running_on_linux);
+
+void InitPlatformSignalHandlersCommon(void (*newact)(int, siginfo_t*, void*),
+ struct sigaction* oldact,
+ bool handle_timeout_signal);
+
+} // namespace art
+
+#endif // ART_RUNTIME_RUNTIME_COMMON_H_
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index b8894d2569..ad61cf373b 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -17,359 +17,19 @@
#include "runtime.h"
#include <signal.h>
-#include <string.h>
-#include <sys/utsname.h>
-#include <inttypes.h>
#include <iostream>
-#include <sstream>
-#include "android-base/stringprintf.h"
-
-#include "base/dumpable.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "native_stack_dump.h"
-#include "thread-inl.h"
-#include "thread_list.h"
-#include "utils.h"
+#include "runtime_common.h"
namespace art {
-using android::base::StringPrintf;
-
-static constexpr bool kUseSigRTTimeout = true;
-static constexpr bool kDumpNativeStackOnTimeout = true;
-
-struct Backtrace {
- public:
- explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
- void Dump(std::ostream& os) const {
- DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
- }
- private:
- // Stores the context of the signal that was unexpected and will terminate the runtime. The
- // DumpNativeStack code will take care of casting it to the expected type. This is required
- // as our signal handler runs on an alternate stack.
- void* raw_context_;
-};
-
-struct OsInfo {
- void Dump(std::ostream& os) const {
- utsname info;
- uname(&info);
- // Linux 2.6.38.8-gg784 (x86_64)
- // Darwin 11.4.0 (x86_64)
- os << info.sysname << " " << info.release << " (" << info.machine << ")";
- }
-};
-
-static const char* GetSignalName(int signal_number) {
- switch (signal_number) {
- case SIGABRT: return "SIGABRT";
- case SIGBUS: return "SIGBUS";
- case SIGFPE: return "SIGFPE";
- case SIGILL: return "SIGILL";
- case SIGPIPE: return "SIGPIPE";
- case SIGSEGV: return "SIGSEGV";
-#if defined(SIGSTKFLT)
- case SIGSTKFLT: return "SIGSTKFLT";
-#endif
- case SIGTRAP: return "SIGTRAP";
- }
- return "??";
-}
-
-static const char* GetSignalCodeName(int signal_number, int signal_code) {
- // Try the signal-specific codes...
- switch (signal_number) {
- case SIGILL:
- switch (signal_code) {
- case ILL_ILLOPC: return "ILL_ILLOPC";
- case ILL_ILLOPN: return "ILL_ILLOPN";
- case ILL_ILLADR: return "ILL_ILLADR";
- case ILL_ILLTRP: return "ILL_ILLTRP";
- case ILL_PRVOPC: return "ILL_PRVOPC";
- case ILL_PRVREG: return "ILL_PRVREG";
- case ILL_COPROC: return "ILL_COPROC";
- case ILL_BADSTK: return "ILL_BADSTK";
- }
- break;
- case SIGBUS:
- switch (signal_code) {
- case BUS_ADRALN: return "BUS_ADRALN";
- case BUS_ADRERR: return "BUS_ADRERR";
- case BUS_OBJERR: return "BUS_OBJERR";
- }
- break;
- case SIGFPE:
- switch (signal_code) {
- case FPE_INTDIV: return "FPE_INTDIV";
- case FPE_INTOVF: return "FPE_INTOVF";
- case FPE_FLTDIV: return "FPE_FLTDIV";
- case FPE_FLTOVF: return "FPE_FLTOVF";
- case FPE_FLTUND: return "FPE_FLTUND";
- case FPE_FLTRES: return "FPE_FLTRES";
- case FPE_FLTINV: return "FPE_FLTINV";
- case FPE_FLTSUB: return "FPE_FLTSUB";
- }
- break;
- case SIGSEGV:
- switch (signal_code) {
- case SEGV_MAPERR: return "SEGV_MAPERR";
- case SEGV_ACCERR: return "SEGV_ACCERR";
-#if defined(SEGV_BNDERR)
- case SEGV_BNDERR: return "SEGV_BNDERR";
-#endif
- }
- break;
- case SIGTRAP:
- switch (signal_code) {
- case TRAP_BRKPT: return "TRAP_BRKPT";
- case TRAP_TRACE: return "TRAP_TRACE";
- }
- break;
- }
- // Then the other codes...
- switch (signal_code) {
- case SI_USER: return "SI_USER";
-#if defined(SI_KERNEL)
- case SI_KERNEL: return "SI_KERNEL";
-#endif
- case SI_QUEUE: return "SI_QUEUE";
- case SI_TIMER: return "SI_TIMER";
- case SI_MESGQ: return "SI_MESGQ";
- case SI_ASYNCIO: return "SI_ASYNCIO";
-#if defined(SI_SIGIO)
- case SI_SIGIO: return "SI_SIGIO";
-#endif
-#if defined(SI_TKILL)
- case SI_TKILL: return "SI_TKILL";
-#endif
- }
- // Then give up...
- return "?";
-}
-
-struct UContext {
- explicit UContext(void* raw_context) :
- context(reinterpret_cast<ucontext_t*>(raw_context)->uc_mcontext) {
- }
-
- void Dump(std::ostream& os) const {
- // TODO: support non-x86 hosts (not urgent because this code doesn't run on targets).
-#if defined(__APPLE__) && defined(__i386__)
- DumpRegister32(os, "eax", context->__ss.__eax);
- DumpRegister32(os, "ebx", context->__ss.__ebx);
- DumpRegister32(os, "ecx", context->__ss.__ecx);
- DumpRegister32(os, "edx", context->__ss.__edx);
- os << '\n';
-
- DumpRegister32(os, "edi", context->__ss.__edi);
- DumpRegister32(os, "esi", context->__ss.__esi);
- DumpRegister32(os, "ebp", context->__ss.__ebp);
- DumpRegister32(os, "esp", context->__ss.__esp);
- os << '\n';
-
- DumpRegister32(os, "eip", context->__ss.__eip);
- os << " ";
- DumpRegister32(os, "eflags", context->__ss.__eflags);
- DumpX86Flags(os, context->__ss.__eflags);
- os << '\n';
-
- DumpRegister32(os, "cs", context->__ss.__cs);
- DumpRegister32(os, "ds", context->__ss.__ds);
- DumpRegister32(os, "es", context->__ss.__es);
- DumpRegister32(os, "fs", context->__ss.__fs);
- os << '\n';
- DumpRegister32(os, "gs", context->__ss.__gs);
- DumpRegister32(os, "ss", context->__ss.__ss);
-#elif defined(__linux__) && defined(__i386__)
- DumpRegister32(os, "eax", context.gregs[REG_EAX]);
- DumpRegister32(os, "ebx", context.gregs[REG_EBX]);
- DumpRegister32(os, "ecx", context.gregs[REG_ECX]);
- DumpRegister32(os, "edx", context.gregs[REG_EDX]);
- os << '\n';
-
- DumpRegister32(os, "edi", context.gregs[REG_EDI]);
- DumpRegister32(os, "esi", context.gregs[REG_ESI]);
- DumpRegister32(os, "ebp", context.gregs[REG_EBP]);
- DumpRegister32(os, "esp", context.gregs[REG_ESP]);
- os << '\n';
-
- DumpRegister32(os, "eip", context.gregs[REG_EIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", context.gregs[REG_CS]);
- DumpRegister32(os, "ds", context.gregs[REG_DS]);
- DumpRegister32(os, "es", context.gregs[REG_ES]);
- DumpRegister32(os, "fs", context.gregs[REG_FS]);
- os << '\n';
- DumpRegister32(os, "gs", context.gregs[REG_GS]);
- DumpRegister32(os, "ss", context.gregs[REG_SS]);
-#elif defined(__linux__) && defined(__x86_64__)
- DumpRegister64(os, "rax", context.gregs[REG_RAX]);
- DumpRegister64(os, "rbx", context.gregs[REG_RBX]);
- DumpRegister64(os, "rcx", context.gregs[REG_RCX]);
- DumpRegister64(os, "rdx", context.gregs[REG_RDX]);
- os << '\n';
-
- DumpRegister64(os, "rdi", context.gregs[REG_RDI]);
- DumpRegister64(os, "rsi", context.gregs[REG_RSI]);
- DumpRegister64(os, "rbp", context.gregs[REG_RBP]);
- DumpRegister64(os, "rsp", context.gregs[REG_RSP]);
- os << '\n';
-
- DumpRegister64(os, "r8 ", context.gregs[REG_R8]);
- DumpRegister64(os, "r9 ", context.gregs[REG_R9]);
- DumpRegister64(os, "r10", context.gregs[REG_R10]);
- DumpRegister64(os, "r11", context.gregs[REG_R11]);
- os << '\n';
-
- DumpRegister64(os, "r12", context.gregs[REG_R12]);
- DumpRegister64(os, "r13", context.gregs[REG_R13]);
- DumpRegister64(os, "r14", context.gregs[REG_R14]);
- DumpRegister64(os, "r15", context.gregs[REG_R15]);
- os << '\n';
-
- DumpRegister64(os, "rip", context.gregs[REG_RIP]);
- os << " ";
- DumpRegister32(os, "eflags", context.gregs[REG_EFL]);
- DumpX86Flags(os, context.gregs[REG_EFL]);
- os << '\n';
-
- DumpRegister32(os, "cs", (context.gregs[REG_CSGSFS]) & 0x0FFFF);
- DumpRegister32(os, "gs", (context.gregs[REG_CSGSFS] >> 16) & 0x0FFFF);
- DumpRegister32(os, "fs", (context.gregs[REG_CSGSFS] >> 32) & 0x0FFFF);
- os << '\n';
-#else
- os << "Unknown architecture/word size/OS in ucontext dump";
-#endif
- }
-
- void DumpRegister32(std::ostream& os, const char* name, uint32_t value) const {
- os << StringPrintf(" %6s: 0x%08x", name, value);
- }
-
- void DumpRegister64(std::ostream& os, const char* name, uint64_t value) const {
- os << StringPrintf(" %6s: 0x%016" PRIx64, name, value);
- }
+void HandleUnexpectedSignalLinux(int signal_number, siginfo_t* info, void* raw_context) {
+ HandleUnexpectedSignalCommon(signal_number, info, raw_context, /* running_on_linux */ true);
- void DumpX86Flags(std::ostream& os, uint32_t flags) const {
- os << " [";
- if ((flags & (1 << 0)) != 0) {
- os << " CF";
- }
- if ((flags & (1 << 2)) != 0) {
- os << " PF";
- }
- if ((flags & (1 << 4)) != 0) {
- os << " AF";
- }
- if ((flags & (1 << 6)) != 0) {
- os << " ZF";
- }
- if ((flags & (1 << 7)) != 0) {
- os << " SF";
- }
- if ((flags & (1 << 8)) != 0) {
- os << " TF";
- }
- if ((flags & (1 << 9)) != 0) {
- os << " IF";
- }
- if ((flags & (1 << 10)) != 0) {
- os << " DF";
- }
- if ((flags & (1 << 11)) != 0) {
- os << " OF";
- }
- os << " ]";
- }
-
- mcontext_t& context;
-};
-
-// Return the signal number we recognize as timeout. -1 means not active/supported.
-static int GetTimeoutSignal() {
-#if defined(__APPLE__)
- // Mac does not support realtime signals.
- UNUSED(kUseSigRTTimeout);
- return -1;
-#else
- return kUseSigRTTimeout ? (SIGRTMIN + 2) : -1;
-#endif
-}
-
-static bool IsTimeoutSignal(int signal_number) {
- return signal_number == GetTimeoutSignal();
-}
-
-void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context) {
- static bool handlingUnexpectedSignal = false;
- if (handlingUnexpectedSignal) {
- LogHelper::LogLineLowStack(__FILE__,
- __LINE__,
- ::android::base::FATAL_WITHOUT_ABORT,
- "HandleUnexpectedSignal reentered\n");
- if (IsTimeoutSignal(signal_number)) {
- // Ignore a recursive timeout.
- return;
- }
- _exit(1);
- }
- handlingUnexpectedSignal = true;
-
- gAborting++; // set before taking any locks
- MutexLock mu(Thread::Current(), *Locks::unexpected_signal_lock_);
-
- bool has_address = (signal_number == SIGILL || signal_number == SIGBUS ||
- signal_number == SIGFPE || signal_number == SIGSEGV);
-
- OsInfo os_info;
- const char* cmd_line = GetCmdLine();
- if (cmd_line == nullptr) {
- cmd_line = "<unset>"; // Because no-one called InitLogging.
- }
- pid_t tid = GetTid();
- std::string thread_name(GetThreadName(tid));
- UContext thread_context(raw_context);
- Backtrace thread_backtrace(raw_context);
-
- // Note: We are using cerr directly instead of LOG macros to ensure even just partial output
- // makes it out. That means we lose the "dalvikvm..." prefix, but that is acceptable
- // considering this is an abort situation.
-
- std::cerr << "*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***\n"
- << StringPrintf("Fatal signal %d (%s), code %d (%s)",
- signal_number, GetSignalName(signal_number),
- info->si_code,
- GetSignalCodeName(signal_number, info->si_code))
- << (has_address ? StringPrintf(" fault addr %p", info->si_addr) : "") << std::endl
- << "OS: " << Dumpable<OsInfo>(os_info) << std::endl
- << "Cmdline: " << cmd_line << std::endl
- << "Thread: " << tid << " \"" << thread_name << "\"" << std::endl
- << "Registers:\n" << Dumpable<UContext>(thread_context) << std::endl
- << "Backtrace:\n" << Dumpable<Backtrace>(thread_backtrace) << std::endl;
- if (kIsDebugBuild && signal_number == SIGSEGV) {
- PrintFileToLog("/proc/self/maps", LogSeverity::FATAL_WITHOUT_ABORT);
- }
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- if (IsTimeoutSignal(signal_number)) {
- // Special timeout signal. Try to dump all threads.
- // Note: Do not use DumpForSigQuit, as that might disable native unwind, but the native parts
- // are of value here.
- runtime->GetThreadList()->Dump(std::cerr, kDumpNativeStackOnTimeout);
- std::cerr << std::endl;
- }
- std::cerr << "Fault message: " << runtime->GetFaultMessage() << std::endl;
- }
if (getenv("debug_db_uid") != nullptr || getenv("art_wait_for_gdb_on_crash") != nullptr) {
+ pid_t tid = GetTid();
+ std::string thread_name(GetThreadName(tid));
std::cerr << "********************************************************\n"
<< "* Process " << getpid() << " thread " << tid << " \"" << thread_name
<< "\""
@@ -398,31 +58,9 @@ void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_contex
void Runtime::InitPlatformSignalHandlers() {
// On the host, we don't have debuggerd to dump a stack for us when something unexpected happens.
- struct sigaction action;
- memset(&action, 0, sizeof(action));
- sigemptyset(&action.sa_mask);
- action.sa_sigaction = HandleUnexpectedSignal;
- // Use the three-argument sa_sigaction handler.
- action.sa_flags |= SA_SIGINFO;
- // Use the alternate signal stack so we can catch stack overflows.
- action.sa_flags |= SA_ONSTACK;
-
- int rc = 0;
- rc += sigaction(SIGABRT, &action, nullptr);
- rc += sigaction(SIGBUS, &action, nullptr);
- rc += sigaction(SIGFPE, &action, nullptr);
- rc += sigaction(SIGILL, &action, nullptr);
- rc += sigaction(SIGPIPE, &action, nullptr);
- rc += sigaction(SIGSEGV, &action, nullptr);
-#if defined(SIGSTKFLT)
- rc += sigaction(SIGSTKFLT, &action, nullptr);
-#endif
- rc += sigaction(SIGTRAP, &action, nullptr);
- // Special dump-all timeout.
- if (GetTimeoutSignal() != -1) {
- rc += sigaction(GetTimeoutSignal(), &action, nullptr);
- }
- CHECK_EQ(rc, 0);
+ InitPlatformSignalHandlersCommon(HandleUnexpectedSignalLinux,
+ nullptr,
+ /* handle_timeout_signal */ true);
}
} // namespace art
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index ad748b04d3..e68a1b2681 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -123,7 +123,6 @@ RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::k
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library>
-RUNTIME_OPTIONS_KEY (Unit, FullyDeoptable) // -Xfully-deoptable
// Not parse-able from command line, but can be provided explicitly.
// (Do not add anything here that is defined in ParsedOptions::MakeParser)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 52d488fbae..eea68aa74e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2854,13 +2854,16 @@ void Thread::QuickDeliverException() {
if (Dbg::IsForcedInterpreterNeededForException(this)) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
- if (Runtime::Current()->IsDeoptimizeable(visitor.caller_pc)) {
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
// Save the exception into the deoptimization context so it can be restored
// before entering the interpreter.
PushDeoptimizationContext(
JValue(), /*is_reference */ false, /* from_code */ false, exception);
artDeoptimize(this);
UNREACHABLE();
+ } else {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << visitor.caller->PrettyMethod();
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 01c940e9df..df8acc37a2 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -455,7 +455,6 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
Closure* flip_callback,
gc::collector::GarbageCollector* collector) {
TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
- const uint64_t start_time = NanoTime();
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
@@ -464,13 +463,17 @@ size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
+ // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the
+ // pause.
+ const uint64_t suspend_start_time = NanoTime();
SuspendAllInternal(self, self, nullptr);
// Run the flip callback for the collector.
Locks::mutator_lock_->ExclusiveLock(self);
+ suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time);
flip_callback->Run(self);
Locks::mutator_lock_->ExclusiveUnlock(self);
- collector->RegisterPause(NanoTime() - start_time);
+ collector->RegisterPause(NanoTime() - suspend_start_time);
// Resume runnable threads.
size_t runnable_thread_count = 0;
@@ -629,8 +632,9 @@ void ThreadList::SuspendAllInternal(Thread* self,
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
// Update global suspend all state for attaching threads.
++suspend_all_count_;
- if (debug_suspend)
+ if (debug_suspend) {
++debug_suspend_all_count_;
+ }
pending_threads.StoreRelaxed(list_.size() - num_ignored);
// Increment everybody's suspend count (except those that should be ignored).
for (const auto& thread : list_) {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index d9179c3892..d24a5e5c4a 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -88,7 +88,10 @@ void ThreadPoolWorker::Run() {
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(),
+ true,
+ nullptr,
+ worker->thread_pool_->create_peers_));
worker->thread_ = Thread::Current();
// Thread pool workers cannot call into java.
worker->thread_->SetCanCallIntoJava(false);
@@ -112,7 +115,7 @@ void ThreadPool::RemoveAllTasks(Thread* self) {
tasks_.clear();
}
-ThreadPool::ThreadPool(const char* name, size_t num_threads)
+ThreadPool::ThreadPool(const char* name, size_t num_threads, bool create_peers)
: name_(name),
task_queue_lock_("task queue lock"),
task_queue_condition_("task queue condition", task_queue_lock_),
@@ -124,7 +127,8 @@ ThreadPool::ThreadPool(const char* name, size_t num_threads)
total_wait_time_(0),
// Add one since the caller of constructor waits on the barrier too.
creation_barier_(num_threads + 1),
- max_active_workers_(num_threads) {
+ max_active_workers_(num_threads),
+ create_peers_(create_peers) {
Thread* self = Thread::Current();
while (GetThreadCount() < num_threads) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
@@ -217,6 +221,7 @@ Task* ThreadPool::TryGetTaskLocked() {
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
+ CHECK(!create_peers_);
Task* task = nullptr;
while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index 7ecfcd1289..a465e11055 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -105,11 +105,17 @@ class ThreadPool {
// Remove all tasks in the queue.
void RemoveAllTasks(Thread* self) REQUIRES(!task_queue_lock_);
- ThreadPool(const char* name, size_t num_threads);
+ // Create a named thread pool with the given number of threads.
+ //
+ // If create_peers is true, all worker threads will have a Java peer object. Note that if the
+ // pool is asked to do work on the current thread (see Wait), a peer may not be available. Wait
+ // will conservatively abort if create_peers and do_work are true.
+ ThreadPool(const char* name, size_t num_threads, bool create_peers = false);
virtual ~ThreadPool();
// Wait for all tasks currently on queue to get completed. If the pool has been stopped, only
// wait till all already running tasks are done.
+ // When the pool was created with peers for workers, do_work must not be true (see ThreadPool()).
void Wait(Thread* self, bool do_work, bool may_hold_locks) REQUIRES(!task_queue_lock_);
size_t GetTaskCount(Thread* self) REQUIRES(!task_queue_lock_);
@@ -159,6 +165,7 @@ class ThreadPool {
uint64_t total_wait_time_;
Barrier creation_barier_;
size_t max_active_workers_ GUARDED_BY(task_queue_lock_);
+ const bool create_peers_;
private:
friend class ThreadPoolWorker;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 14c2c3bac8..28aa21f7a2 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -20,6 +20,7 @@
#include "atomic.h"
#include "common_runtime_test.h"
+#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
namespace art {
@@ -159,4 +160,55 @@ TEST_F(ThreadPoolTest, RecursiveTest) {
EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent());
}
+class PeerTask : public Task {
+ public:
+ PeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() != nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+class NoPeerTask : public Task {
+ public:
+ NoPeerTask() {}
+
+ void Run(Thread* self) {
+ ScopedObjectAccess soa(self);
+ CHECK(self->GetPeer() == nullptr);
+ }
+
+ void Finalize() {
+ delete this;
+ }
+};
+
+// Tests for create_peer functionality.
+TEST_F(ThreadPoolTest, PeerTest) {
+ Thread* self = Thread::Current();
+ {
+ ThreadPool thread_pool("Thread pool test thread pool", 1);
+ thread_pool.AddTask(self, new NoPeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+
+ {
+ // To create peers, the runtime needs to be started.
+ self->TransitionFromSuspendedToRunnable();
+ bool started = runtime_->Start();
+ ASSERT_TRUE(started);
+
+ ThreadPool thread_pool("Thread pool test thread pool", 1, true);
+ thread_pool.AddTask(self, new PeerTask());
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, false, false);
+ }
+}
+
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 2add955f8e..3a9975a4e2 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -905,6 +905,9 @@ void Trace::FlushBuf() {
void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff) {
+ // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
+ // same pointer value.
+ method = method->GetNonObsoleteMethod();
// Advance cur_offset_ atomically.
int32_t new_offset;
int32_t old_offset = 0;
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index bd1b044dae..2812c21004 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -48,9 +48,11 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader()) {
}
-inline constexpr size_t DexCacheArraysLayout::Alignment() {
- // GcRoot<> alignment is 4, i.e. lower than or equal to the pointer alignment.
- static_assert(alignof(GcRoot<mirror::Class>) == 4, "Expecting alignof(GcRoot<>) == 4");
+constexpr size_t DexCacheArraysLayout::Alignment() {
+ // mirror::Type/String/MethodTypeDexCacheType alignment is 8,
+ // i.e. higher than or equal to the pointer alignment.
+ static_assert(alignof(mirror::TypeDexCacheType) == 8,
+ "Expecting alignof(ClassDexCacheType) == 8");
static_assert(alignof(mirror::StringDexCacheType) == 8,
"Expecting alignof(StringDexCacheType) == 8");
static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
@@ -60,17 +62,22 @@ inline constexpr size_t DexCacheArraysLayout::Alignment() {
}
template <typename T>
-static constexpr PointerSize GcRootAsPointerSize() {
+constexpr PointerSize GcRootAsPointerSize() {
static_assert(sizeof(GcRoot<T>) == 4U, "Unexpected GcRoot size");
return PointerSize::k32;
}
inline size_t DexCacheArraysLayout::TypeOffset(dex::TypeIndex type_idx) const {
- return types_offset_ + ElementOffset(GcRootAsPointerSize<mirror::Class>(), type_idx.index_);
+ return types_offset_ + ElementOffset(PointerSize::k64,
+ type_idx.index_ % mirror::DexCache::kDexCacheTypeCacheSize);
}
inline size_t DexCacheArraysLayout::TypesSize(size_t num_elements) const {
- return ArraySize(GcRootAsPointerSize<mirror::Class>(), num_elements);
+ size_t cache_size = mirror::DexCache::kDexCacheTypeCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+ return ArraySize(PointerSize::k64, cache_size);
}
inline size_t DexCacheArraysLayout::TypesAlignment() const {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index b915457557..ba429d8c3e 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2399,7 +2399,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
// If this is a primitive type, fail HARD.
- mirror::Class* klass = dex_cache_->GetResolvedType(type_idx);
+ ObjPtr<mirror::Class> klass =
+ ClassLinker::LookupResolvedType(type_idx, dex_cache_.Get(), class_loader_.Get());
if (klass != nullptr && klass->IsPrimitive()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
<< dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
@@ -3684,9 +3685,10 @@ inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
}
const RegType& MethodVerifier::ResolveClassAndCheckAccess(dex::TypeIndex class_idx) {
- mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+ mirror::Class* klass =
+ ClassLinker::LookupResolvedType(class_idx, dex_cache_.Get(), class_loader_.Get()).Ptr();
const RegType* result = nullptr;
- if (klass != nullptr) {
+ if (klass != nullptr && !klass->IsErroneous()) {
bool precise = klass->CannotBeAssignedFromOtherTypes();
if (precise && !IsInstantiableOrPrimitive(klass)) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);