summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S49
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S80
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S82
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S81
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S54
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S45
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc17
-rw-r--r--runtime/gc/heap.cc16
-rw-r--r--runtime/hprof/hprof.cc27
-rw-r--r--runtime/interpreter/unstarted_runtime.cc104
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc95
-rw-r--r--runtime/jit/profile_compilation_info.cc18
-rw-r--r--runtime/jit/profile_compilation_info.h3
-rw-r--r--runtime/mirror/string.h5
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc4
15 files changed, 494 insertions, 186 deletions
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index a443a4060d..cfe8406fbf 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -965,9 +965,27 @@ ENTRY \name
END \name
.endm
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+// Macro for string and type resolution and initialization.
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_EVERYTHING_FRAME r1 @ save everything in case of GC
+ mov r1, r9 @ pass Thread::Current
+ bl \entrypoint @ (uint32_t index, Thread*)
+ cbz r0, 1f @ If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
+ bx lr
+ .cfi_restore_state
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END \name
+.endm
+
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
/*
* Called by managed code to resolve a static field and load a non-wide value.
@@ -1066,27 +1084,6 @@ ENTRY art_quick_set64_static
DELIVER_PENDING_EXCEPTION
END art_quick_set64_static
- /*
- * Entry from managed code to resolve a string, this stub will
- * check the dex cache for a matching string (the fast path), and if not found,
- * it will allocate a String and deliver an exception on error.
- * On success the String is returned. R0 holds the string index.
- */
-
-ENTRY art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME r1 @ save everything in case of GC
- mov r1, r9 @ pass Thread::Current
- bl artResolveStringFromCode @ (uint32_t type_idx, Thread*)
- cbz r0, 1f @ If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
- bx lr
- .cfi_restore_state
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END art_quick_resolve_string
-
-
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have arm specific asm.
@@ -2057,7 +2054,9 @@ ENTRY \name
beq .Lret_forwarding_address\name
.Lslow_rb_\name:
- // Save IP: the kSaveEverything entrypoint art_quick_resolve_string makes a tail call here.
+ // Save IP: The kSaveEverything entrypoint art_quick_resolve_string used to
+ // make a tail call here. Currently, it serves only for stack alignment but
+ // we may reintroduce kSaveEverything calls here in the future.
push {r0-r4, r9, ip, lr} @ save return address, core caller-save registers and ip
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r0, 0
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 219d8b447a..bfbe4816ba 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1553,6 +1553,24 @@ ENTRY \name
END \name
.endm
+// Macro for string and type resolution and initialization.
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_EVERYTHING_FRAME // save everything for stack crawl
+ mov x1, xSELF // pass Thread::Current
+ bl \entrypoint // (int32_t index, Thread* self)
+ cbz w0, 1f // If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+ ret // return
+ .cfi_restore_state
+ .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END \name
+.endm
+
.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
cbz w0, 1f // result zero branch over
ret // return
@@ -1571,10 +1589,11 @@ TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode,
* initializer and deliver the exception on error. On success the static storage base is
* returned.
*/
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
@@ -1604,27 +1623,6 @@ THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCod
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
- /*
- * Entry from managed code to resolve a string, this stub will
- * check the dex cache for a matching string (the fast path), and if not found,
- * it will allocate a String and deliver an exception on error.
- * On success the String is returned. R0 holds the string index.
- */
-
-ENTRY art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME // save everything for stack crawl
- mov x1, xSELF // pass Thread::Current
- bl artResolveStringFromCode // (int32_t string_idx, Thread* self)
- cbz w0, 1f // If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
- ret // return
- .cfi_restore_state
- .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END art_quick_resolve_string
-
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
// Comment out allocators that have arm64 specific asm.
@@ -2380,13 +2378,6 @@ END art_quick_indexof
ENTRY \name
// Reference is null, no work to do at all.
cbz \wreg, .Lret_rb_\name
- /*
- * Allocate 46 stack slots * 8 = 368 bytes:
- * - 20 slots for core registers X0-X19
- * - 24 slots for floating-point registers D0-D7 and D16-D31
- * - 1 slot for return address register XLR
- * - 1 padding slot for 16-byte stack alignment
- */
// Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lnot_marked_rb_\name
@@ -2398,10 +2389,15 @@ ENTRY \name
cmp wzr, wIP0, lsr #30
beq .Lret_forwarding_address\name
.Lslow_rb_\name:
- // We must not clobber IP0 since art_quick_resolve_string makes a tail call here and relies on
- // IP0 being restored.
+ /*
+ * Allocate 44 stack slots * 8 = 352 bytes:
+ * - 20 slots for core registers X0-15, X17-X19, LR
+ * - 24 slots for floating-point registers D0-D7 and D16-D31
+ */
+ // We must not clobber IP1 since code emitted for HLoadClass and HLoadString
+ // relies on IP1 being preserved.
// Save all potentially live caller-save core registers.
- SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368
+ SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 352
SAVE_TWO_REGS x2, x3, 16
SAVE_TWO_REGS x4, x5, 32
SAVE_TWO_REGS x6, x7, 48
@@ -2409,8 +2405,8 @@ ENTRY \name
SAVE_TWO_REGS x10, x11, 80
SAVE_TWO_REGS x12, x13, 96
SAVE_TWO_REGS x14, x15, 112
- SAVE_TWO_REGS x16, x17, 128
- SAVE_TWO_REGS x18, x19, 144
+ SAVE_TWO_REGS x17, x18, 128 // Skip x16, i.e. IP0.
+ SAVE_TWO_REGS x19, xLR, 144 // Save also return address.
// Save all potentially live caller-save floating-point registers.
stp d0, d1, [sp, #160]
stp d2, d3, [sp, #176]
@@ -2424,9 +2420,6 @@ ENTRY \name
stp d26, d27, [sp, #304]
stp d28, d29, [sp, #320]
stp d30, d31, [sp, #336]
- // Save return address.
- // (sp + #352 is a padding slot)
- SAVE_REG xLR, 360
.ifnc \wreg, w0
mov w0, \wreg // Pass arg1 - obj from `wreg`
@@ -2446,8 +2439,8 @@ ENTRY \name
POP_REGS_NE x10, x11, 80, \xreg
POP_REGS_NE x12, x13, 96, \xreg
POP_REGS_NE x14, x15, 112, \xreg
- POP_REGS_NE x16, x17, 128, \xreg
- POP_REGS_NE x18, x19, 144, \xreg
+ POP_REGS_NE x17, x18, 128, \xreg
+ POP_REGS_NE x19, xLR, 144, \xreg // Restore also return address.
// Restore floating-point registers.
ldp d0, d1, [sp, #160]
ldp d2, d3, [sp, #176]
@@ -2461,9 +2454,8 @@ ENTRY \name
ldp d26, d27, [sp, #304]
ldp d28, d29, [sp, #320]
ldp d30, d31, [sp, #336]
- // Restore return address and remove padding.
- RESTORE_REG xLR, 360
- DECREASE_FRAME 368
+ // Remove frame and return.
+ DECREASE_FRAME 352
ret
.Lret_forwarding_address\name:
mvn wIP0, wIP0
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 663cb6c62f..10b690ac4a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1576,9 +1576,87 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+// A hand-written override for:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
+ # Fast path rosalloc allocation
+ # a0: type
+ # s1: Thread::Current
+ # -----------------------------
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # t4: thread stack bottom offset
+ # v0: free list head
+ #
+ # t5, t6 : temps
+ lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation
+ lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left.
+ bgeu $t3, $t4, .Lslow_path_\c_name
+
+ lw $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
+ li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation. Also does the
+ # initialized and finalizable checks.
+ bgtu $t1, $t5, .Lslow_path_\c_name
+
+ # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
+ # combine the two shifts together.
+ srl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+
+ addu $t2, $t1, $s1
+ lw $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+ lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqz $v0, .Lslow_path_\c_name
+ nop
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+ lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $a0
+ sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+ sw $v0, 0($t3)
+ addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+ lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $t5, $t5, -1
+ sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ nop
+
+ .Lslow_path_\c_name:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ la $t9, \cxx_name
+ jalr $t9
+ move $a1, $s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \c_name
+.endm
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 5fee575331..fff6d256b1 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1533,8 +1533,85 @@ END \name
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+// A hand-written override for:
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
+.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name
+ENTRY \c_name
+ # Fast path rosalloc allocation
+ # a0: type
+ # s1: Thread::Current
+ # -----------------------------
+ # t1: object size
+ # t2: rosalloc run
+ # t3: thread stack top offset
+ # a4: thread stack bottom offset
+ # v0: free list head
+ #
+ # a5, a6 : temps
+ ld $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation stack
+ ld $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # has any room left.
+ bgeuc $t3, $a4, .Lslow_path_\c_name
+
+ lwu $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1).
+ li $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local
+ # allocation. Also does the initialized
+ # and finalizable checks.
+ bltuc $a5, $t1, .Lslow_path_\c_name
+
+ # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
+ # combine the two shifts together.
+ dsrl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+
+ daddu $t2, $t1, $s1
+ ld $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2).
+
+ # Load the free list head (v0).
+ # NOTE: this will be the return val.
+ ld $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+ beqzc $v0, .Lslow_path_\c_name
+
+ # Load the next pointer of the head and update the list head with the next pointer.
+ ld $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
+ sd $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
+
+ # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
+ # asserted to match.
+
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+
+ POISON_HEAP_REF $a0
+ sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
+
+ # Push the new object onto the thread local allocation stack and increment the thread local
+ # allocation stack top.
+ sd $v0, 0($t3)
+ daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
+ sd $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
+
+ # Decrement the size of the free list.
+ lw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+ addiu $a5, $a5, -1
+ sw $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
+
+ sync # Fence.
+
+ jalr $zero, $ra
+ .cpreturn # Restore gp from t8 in branch delay slot.
+
+.Lslow_path_\c_name:
+ SETUP_SAVE_REFS_ONLY_FRAME
+ jal \cxx_name
+ move $a1 ,$s1 # Pass self as argument.
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END \c_name
+.endm
+
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc
+ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc
+
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 76615e843b..8c907e0790 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -922,6 +922,31 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
+// Macro for string and type resolution and initialization.
+MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME ebx, ebx // save ref containing registers for GC
+ // Outgoing argument set up
+ subl MACRO_LITERAL(8), %esp // push padding
+ CFI_ADJUST_CFA_OFFSET(8)
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH eax // pass arg1
+ call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
+ CFI_ADJUST_CFA_OFFSET(-16)
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX // restore frame up to return address
+ ret // return
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testl %eax, %eax // eax == 0 ?
jz 1f // if eax == 0 goto 1
@@ -1245,31 +1270,10 @@ GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFr
GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
- // Outgoing argument set up
- subl LITERAL(8), %esp // push padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg1
- call SYMBOL(artResolveStringFromCode)
- addl LITERAL(16), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
- testl %eax, %eax // If result is null, deliver the OOME.
- jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX
- ret
- CFI_RESTORE_STATE
- CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_FUNCTION art_quick_resolve_string
-
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index a1ae858735..f1be52eeb6 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -950,6 +950,26 @@ MACRO3(THREE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
END_FUNCTION VAR(c_name)
END_MACRO
+// Macro for string and type resolution and initialization.
+MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name)
+ DEFINE_FUNCTION VAR(c_name)
+ SETUP_SAVE_EVERYTHING_FRAME // save everything for GC
+ // Outgoing argument set up
+ movl %eax, %edi // pass string index
+ movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
+ call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
+ ret
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+ END_FUNCTION VAR(c_name)
+END_MACRO
+
MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
@@ -1270,27 +1290,10 @@ DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
-DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_EVERYTHING_FRAME
- // Outgoing argument set up
- movl %eax, %edi // pass string index
- movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, Thread*)
-
- testl %eax, %eax // If result is null, deliver the OOME.
- jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
- ret
- CFI_RESTORE_STATE
- CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
-1:
- DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_FUNCTION art_quick_resolve_string
-
-ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 5b1b2871c2..699cf91c70 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -53,13 +53,18 @@ static inline void BssWriteBarrier(ArtMethod* outer_method) REQUIRES_SHARED(Lock
}
}
+constexpr Runtime::CalleeSaveType kInitEntrypointSaveType =
+ // TODO: Change allocation entrypoints on MIPS and MIPS64 to kSaveEverything.
+ (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
+ : Runtime::kSaveEverything;
+
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, true, false);
@@ -73,7 +78,7 @@ extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* s
REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, false);
@@ -88,7 +93,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, Runtime::kSaveRefsOnly);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::Class* result =
ResolveVerifyAndClinit(dex::TypeIndex(type_idx), caller, self, false, true);
@@ -101,11 +106,7 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(
- self,
- // TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
- (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
- : Runtime::kSaveEverything);
+ auto caller_and_outer = GetCalleeSaveMethodCallerAndOuterMethod(self, kInitEntrypointSaveType);
ArtMethod* caller = caller_and_outer.caller;
mirror::String* result = ResolveStringFromCode(caller, dex::StringIndex(string_idx));
if (LIKELY(result != nullptr)) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 051f3f7b00..0a45fcedae 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3887,13 +3887,15 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// blocking watermark. Ensure that only one of those threads runs the
// blocking GC. The rest of the threads should instead wait for the
// blocking GC to complete.
- if (native_blocking_gc_in_progress_) {
- do {
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- } else {
- native_blocking_gc_in_progress_ = true;
- run_gc = true;
+ if (native_blocking_gcs_finished_ == initial_gcs_finished) {
+ if (native_blocking_gc_in_progress_) {
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+ } else {
+ native_blocking_gc_in_progress_ = true;
+ run_gc = true;
+ }
}
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 3d3ad593b3..133502e6a3 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -224,12 +224,6 @@ class EndianOutput {
HandleU1List(values, count);
length_ += count;
}
- void AddU1AsU2List(const uint8_t* values, size_t count) {
- HandleU1AsU2List(values, count);
- // Array of char from compressed String (8-bit) is added as 16-bit blocks
- int ceil_count_to_even = count + ((count & 1) ? 1 : 0);
- length_ += ceil_count_to_even * sizeof(uint8_t);
- }
void AddU2List(const uint16_t* values, size_t count) {
HandleU2List(values, count);
length_ += count * sizeof(uint16_t);
@@ -1277,7 +1271,7 @@ void Hprof::DumpHeapClass(mirror::Class* klass) {
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), nullptr);
__ AddU1(t);
}
- // Add native value character array for strings.
+ // Add native value character array for strings / byte array for compressed strings.
if (klass->IsStringClass()) {
__ AddStringId(LookupStringId("value"));
__ AddU1(hprof_basic_object);
@@ -1359,8 +1353,16 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
case hprof_basic_short:
__ AddU2(f->GetShort(obj));
break;
- case hprof_basic_float:
case hprof_basic_int:
+ if (mirror::kUseStringCompression &&
+ klass->IsStringClass() &&
+ f->GetOffset().SizeValue() == mirror::String::CountOffset().SizeValue()) {
+ // Store the string length instead of the raw count field with compression flag.
+ __ AddU4(obj->AsString()->GetLength());
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ case hprof_basic_float:
case hprof_basic_object:
__ AddU4(f->Get32(obj));
break;
@@ -1397,16 +1399,15 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass) {
CHECK_EQ(obj->IsString(), string_value != nullptr);
if (string_value != nullptr) {
mirror::String* s = obj->AsString();
- // Compressed string's (8-bit) length is ceil(length/2) in 16-bit blocks
- int length_in_16_bit = (s->IsCompressed()) ? ((s->GetLength() + 1) / 2) : s->GetLength();
__ AddU1(HPROF_PRIMITIVE_ARRAY_DUMP);
__ AddObjectId(string_value);
__ AddStackTraceSerialNumber(LookupStackTraceSerialNumber(obj));
- __ AddU4(length_in_16_bit);
- __ AddU1(hprof_basic_char);
+ __ AddU4(s->GetLength());
if (s->IsCompressed()) {
- __ AddU1AsU2List(s->GetValueCompressed(), s->GetLength());
+ __ AddU1(hprof_basic_byte);
+ __ AddU1List(s->GetValueCompressed(), s->GetLength());
} else {
+ __ AddU1(hprof_basic_char);
__ AddU2List(s->GetValue(), s->GetLength());
}
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 371e2f1e65..545cc1ad42 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -21,6 +21,7 @@
#include <stdlib.h>
#include <cmath>
+#include <initializer_list>
#include <limits>
#include <locale>
#include <unordered_map>
@@ -883,43 +884,74 @@ void UnstartedRuntime::UnstartedSystemGetPropertyWithDefault(
GetSystemProperty(self, shadow_frame, result, arg_offset, true);
}
-void UnstartedRuntime::UnstartedThreadLocalGet(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
- std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- bool ok = false;
- if (caller == "void java.lang.FloatingDecimal.developLongDigits(int, long, long)" ||
- caller == "java.lang.String java.lang.FloatingDecimal.toJavaFormatString()") {
- // Allocate non-threadlocal buffer.
- result->SetL(mirror::CharArray::Alloc(self, 26));
- ok = true;
- } else if (caller ==
- "java.lang.FloatingDecimal java.lang.FloatingDecimal.getThreadLocalInstance()") {
- // Allocate new object.
- StackHandleScope<2> hs(self);
- Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
- Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
- h_real_to_string_class->AllocObject(self)));
- if (h_real_to_string_obj.Get() != nullptr) {
- auto* cl = Runtime::Current()->GetClassLinker();
- ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
- "<init>", "()V", cl->GetImagePointerSize());
- if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG_STREAM(FATAL), mirror::Class::kDumpClassFullDetail);
- } else {
- JValue invoke_result;
- EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
- nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_real_to_string_obj.Get());
- ok = true;
- }
+static std::string GetImmediateCaller(ShadowFrame* shadow_frame)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (shadow_frame->GetLink() == nullptr) {
+ return "<no caller>";
+ }
+ return ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+}
+
+static bool CheckCallers(ShadowFrame* shadow_frame,
+ std::initializer_list<std::string> allowed_call_stack)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (const std::string& allowed_caller : allowed_call_stack) {
+ if (shadow_frame->GetLink() == nullptr) {
+ return false;
+ }
+
+ std::string found_caller = ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod());
+ if (allowed_caller != found_caller) {
+ return false;
+ }
+
+ shadow_frame = shadow_frame->GetLink();
+ }
+ return true;
+}
+
+static ObjPtr<mirror::Object> CreateInstanceOf(Thread* self, const char* class_descriptor)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Find the requested class.
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ObjPtr<mirror::Class> klass =
+ class_linker->FindClass(self, class_descriptor, ScopedNullHandle<mirror::ClassLoader>());
+ if (klass == nullptr) {
+ AbortTransactionOrFail(self, "Could not load class %s", class_descriptor);
+ return nullptr;
+ }
+
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(klass));
+ Handle<mirror::Object> h_obj(hs.NewHandle(h_class->AllocObject(self)));
+ if (h_obj.Get() != nullptr) {
+ ArtMethod* init_method = h_class->FindDirectMethod(
+ "<init>", "()V", class_linker->GetImagePointerSize());
+ if (init_method == nullptr) {
+ AbortTransactionOrFail(self, "Could not find <init> for %s", class_descriptor);
+ return nullptr;
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_obj.Get(), nullptr, nullptr);
+ if (!self->IsExceptionPending()) {
+ return h_obj.Get();
}
+ AbortTransactionOrFail(self, "Could not run <init> for %s", class_descriptor);
}
}
+ AbortTransactionOrFail(self, "Could not allocate instance of %s", class_descriptor);
+ return nullptr;
+}
- if (!ok) {
- AbortTransactionOrFail(self, "Could not create RealToString object");
+void UnstartedRuntime::UnstartedThreadLocalGet(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+ if (CheckCallers(shadow_frame, { "sun.misc.FloatingDecimal$BinaryToASCIIBuffer "
+ "sun.misc.FloatingDecimal.getBinaryToASCIIBuffer()" })) {
+ result->SetL(CreateInstanceOf(self, "Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;"));
+ } else {
+ AbortTransactionOrFail(self,
+ "ThreadLocal.get() does not support %s",
+ GetImmediateCaller(shadow_frame).c_str());
}
}
@@ -1252,12 +1284,12 @@ void UnstartedRuntime::UnstartedReferenceGetReferent(
// initialization of other classes, so will *use* the value.
void UnstartedRuntime::UnstartedRuntimeAvailableProcessors(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
- std::string caller(ArtMethod::PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- if (caller == "void java.util.concurrent.SynchronousQueue.<clinit>()") {
+ if (CheckCallers(shadow_frame, { "void java.util.concurrent.SynchronousQueue.<clinit>()" })) {
// SynchronousQueue really only separates between single- and multiprocessor case. Return
// 8 as a conservative upper approximation.
result->SetI(8);
- } else if (caller == "void java.util.concurrent.ConcurrentHashMap.<clinit>()") {
+ } else if (CheckCallers(shadow_frame,
+ { "void java.util.concurrent.ConcurrentHashMap.<clinit>()" })) {
// ConcurrentHashMap uses it for striding. 8 still seems an OK general value, as it's likely
// a good upper bound.
// TODO: Consider resetting in the zygote?
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index ae55f4c2ef..31be587e9c 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -944,5 +944,100 @@ TEST_F(UnstartedRuntimeTest, GetDeclaringClass) {
ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
+TEST_F(UnstartedRuntimeTest, ThreadLocalGet) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ StackHandleScope<1> hs(self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ // Positive test. See that We get something for float conversion.
+ {
+ Handle<mirror::Class> floating_decimal = hs.NewHandle(
+ class_linker->FindClass(self,
+ "Lsun/misc/FloatingDecimal;",
+ ScopedNullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(floating_decimal.Get() != nullptr);
+ ASSERT_TRUE(class_linker->EnsureInitialized(self, floating_decimal, true, true));
+
+ ArtMethod* caller_method = floating_decimal->FindDeclaredDirectMethod(
+ "getBinaryToASCIIBuffer",
+ "()Lsun/misc/FloatingDecimal$BinaryToASCIIBuffer;",
+ class_linker->GetImagePointerSize());
+ // floating_decimal->DumpClass(LOG_STREAM(ERROR), mirror::Class::kDumpClassFullDetail);
+ ASSERT_TRUE(caller_method != nullptr);
+ ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame);
+
+ UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ EXPECT_TRUE(result.GetL() != nullptr);
+ EXPECT_FALSE(self->IsExceptionPending());
+
+ ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ }
+
+ // Negative test.
+ PrepareForAborts();
+
+ {
+ // Just use a method in Class.
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ ArtMethod* caller_method =
+ &*class_class->GetDeclaredMethods(class_linker->GetImagePointerSize()).begin();
+ ShadowFrame* caller_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, caller_method, 0);
+ shadow_frame->SetLink(caller_frame);
+
+ Transaction transaction;
+ Runtime::Current()->EnterTransactionMode(&transaction);
+ UnstartedThreadLocalGet(self, shadow_frame, &result, 0);
+ Runtime::Current()->ExitTransactionMode();
+ ASSERT_TRUE(self->IsExceptionPending());
+ ASSERT_TRUE(transaction.IsAborted());
+ self->ClearException();
+
+ ShadowFrame::DeleteDeoptimizedFrame(caller_frame);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
+TEST_F(UnstartedRuntimeTest, FloatConversion) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+
+ StackHandleScope<1> hs(self);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> double_class = hs.NewHandle(
+ class_linker->FindClass(self,
+ "Ljava/lang/Double;",
+ ScopedNullHandle<mirror::ClassLoader>()));
+ ASSERT_TRUE(double_class.Get() != nullptr);
+ ASSERT_TRUE(class_linker->EnsureInitialized(self, double_class, true, true));
+
+ ArtMethod* method = double_class->FindDeclaredDirectMethod("toString",
+ "(D)Ljava/lang/String;",
+ class_linker->GetImagePointerSize());
+ ASSERT_TRUE(method != nullptr);
+
+ // create instruction data for invoke-direct {v0, v1} of method with fake index
+ uint16_t inst_data[3] = { 0x2070, 0x0000, 0x0010 };
+ const Instruction* inst = Instruction::At(inst_data);
+
+ JValue result;
+ ShadowFrame* shadow_frame = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, method, 0);
+ shadow_frame->SetVRegDouble(0, 1.23);
+ interpreter::DoCall<false, false>(method, self, *shadow_frame, inst, inst_data[0], &result);
+ ObjPtr<mirror::String> string_result = reinterpret_cast<mirror::String*>(result.GetL());
+ ASSERT_TRUE(string_result != nullptr);
+
+ std::string mod_utf = string_result->ToModifiedUtf8();
+ EXPECT_EQ("1.23", mod_utf);
+
+ ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 1405c40096..9ba2d1a355 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -597,6 +597,24 @@ uint32_t ProfileCompilationInfo::GetNumberOfResolvedClasses() const {
return total;
}
+// Produce a non-owning vector from a vector.
+template<typename T>
+const std::vector<T*>* MakeNonOwningVector(const std::vector<std::unique_ptr<T>>* owning_vector) {
+ auto non_owning_vector = new std::vector<T*>();
+ for (auto& element : *owning_vector) {
+ non_owning_vector->push_back(element.get());
+ }
+ return non_owning_vector;
+}
+
+std::string ProfileCompilationInfo::DumpInfo(
+ const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ bool print_full_dex_location) const {
+ std::unique_ptr<const std::vector<const DexFile*>> non_owning_dex_files(
+ MakeNonOwningVector(dex_files));
+ return DumpInfo(non_owning_dex_files.get(), print_full_dex_location);
+}
+
std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location) const {
std::ostringstream os;
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index f8061bcfd8..b1587c0070 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
#define ART_RUNTIME_JIT_PROFILE_COMPILATION_INFO_H_
+#include <memory>
#include <set>
#include <vector>
@@ -72,6 +73,8 @@ class ProfileCompilationInfo {
// If dex_files is not null then the method indices will be resolved to their
// names.
// This is intended for testing and debugging.
+ std::string DumpInfo(const std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ bool print_full_dex_location = true) const;
std::string DumpInfo(const std::vector<const DexFile*>* dex_files,
bool print_full_dex_location = true) const;
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 95b6c3e76b..409c6c2896 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -241,8 +241,9 @@ class MANAGED String FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
- // First bit (uppermost/leftmost) is taken out for Compressed/Uncompressed flag
- // [0] Uncompressed: string uses 16-bit memory | [1] Compressed: 8-bit memory
+
+ // If string compression is enabled, count_ holds the StringCompressionFlag in the
+ // least significant bit and the length in the remaining bits, length = count_ >> 1.
int32_t count_;
uint32_t hash_code_;
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 00d4144415..f8f8fa6b25 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -78,7 +78,9 @@ struct ThreadCallback : public art::ThreadLifecycleCallback, public art::Runtime
if (art::kIsDebugBuild) {
std::string name;
self->GetThreadName(name);
- if (name != "Signal Catcher" && !android::base::StartsWith(name, "Jit thread pool")) {
+ if (name != "JDWP" &&
+ name != "Signal Catcher" &&
+ !android::base::StartsWith(name, "Jit thread pool")) {
LOG(FATAL) << "Unexpected thread before start: " << name;
}
}