summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S187
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S92
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S130
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.S4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S125
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/dex_file_verifier.cc84
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc6
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h5
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc52
-rw-r--r--runtime/entrypoints_order_test.cc6
-rw-r--r--runtime/gc/accounting/space_bitmap.cc42
-rw-r--r--runtime/gc/accounting/space_bitmap.h9
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc44
-rw-r--r--runtime/gc/collector/concurrent_copying.cc11
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/space/region_space-inl.h25
-rw-r--r--runtime/gc/space/region_space.cc23
-rw-r--r--runtime/gc/space/region_space.h9
-rw-r--r--runtime/indirect_reference_table.cc25
-rw-r--r--runtime/indirect_reference_table.h18
-rw-r--r--runtime/indirect_reference_table_test.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc38
-rw-r--r--runtime/java_vm_ext.cc21
-rw-r--r--runtime/java_vm_ext.h13
-rw-r--r--runtime/jni_env_ext.cc8
-rw-r--r--runtime/jni_env_ext.h8
-rw-r--r--runtime/jni_internal_test.cc4
-rw-r--r--runtime/leb128.h94
-rw-r--r--runtime/mirror/dex_cache-inl.h45
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/runtime.cc11
-rw-r--r--runtime/runtime.h4
-rw-r--r--runtime/thread.cc9
-rw-r--r--runtime/thread.h7
37 files changed, 881 insertions, 290 deletions
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index cdb4c251a8..bf70c554b1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -239,6 +239,30 @@
.cfi_adjust_cfa_offset -56
.endm
+.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
+ add sp, #8 @ rewind sp
+ .cfi_adjust_cfa_offset -8
+ vpop {d0-d15}
+ .cfi_adjust_cfa_offset -128
+ add sp, #4 @ skip r0
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore r0 @ debugger can no longer restore caller's r0
+ pop {r1-r12, lr} @ 13 words of callee saves
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r9
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_restore r12
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -52
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz r0, 1f @ result non-zero branch over
bx lr @ return
@@ -252,17 +276,23 @@
.endm
/*
- * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
-.macro DELIVER_PENDING_EXCEPTION
- .fnend
- .fnstart
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save callee saves for throw
+.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
mov r0, r9 @ pass Thread::Current
bl artDeliverPendingExceptionFromCode @ artDeliverPendingExceptionFromCode(Thread*)
.endm
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+.macro DELIVER_PENDING_EXCEPTION
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0 @ save callee saves for throw
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+.endm
+
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
.extern \cxx_name
ENTRY \c_name
@@ -1078,41 +1108,71 @@ END art_quick_set64_instance
*/
ENTRY art_quick_resolve_string
- ldr r1, [sp] @ load referrer
- ldr r1, [r1, #ART_METHOD_DECLARING_CLASS_OFFSET] @ load declaring class
- ldr r1, [r1, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] @ load string dex cache
- ubfx r2, r0, #0, #STRING_DEX_CACHE_HASH_BITS
- add r1, r1, r2, LSL #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT
- ldrd r2, r3, [r1] @ load index into r3 and pointer into r2
- cmp r0, r3
+ push {r10-r12, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r10, 0
+ .cfi_rel_offset r11, 4
+ .cfi_rel_offset ip, 8
+ .cfi_rel_offset lr, 12
+ ldr r10, [sp, #16] @ load referrer
+ ldr r10, [r10, #ART_METHOD_DECLARING_CLASS_OFFSET] @ load declaring class
+ ldr r10, [r10, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] @ load string dex cache
+ ubfx r11, r0, #0, #STRING_DEX_CACHE_HASH_BITS
+ add r10, r10, r11, LSL #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT
+ ldrd r10, r11, [r10] @ load index into r11 and pointer into r10
+ cmp r0, r11
bne .Lart_quick_resolve_string_slow_path
#ifdef USE_READ_BARRIER
- ldr r3, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz r3, .Lart_quick_resolve_string_marking
+ ldr r0, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz r0, .Lart_quick_resolve_string_marking
+.Lart_quick_resolve_string_no_rb:
#endif
- mov r0, r2
- bx lr
-// Slow path case, the index did not match
-.Lart_quick_resolve_string_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME r2 @ save callee saves in case of GC
- mov r1, r9 @ pass Thread::Current
- mov r3, sp
- bl artResolveStringFromCode @ (uint32_t type_idx, Method* method, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ mov r0, r10
+ pop {r10-r12, pc}
+
+#ifdef USE_READ_BARRIER
// GC is marking case, need to check the mark bit.
.Lart_quick_resolve_string_marking:
- ldr r3, [r2, MIRROR_OBJECT_LOCK_WORD_OFFSET]
- tst r3, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
- mov r0, r2
- bne .Lart_quick_resolve_string_no_rb
- push {r1, r2, r3, lr} @ Save x1, LR
- .cfi_adjust_cfa_offset 16
- bl artReadBarrierMark @ Get the marked string back.
- pop {r1, r2, r3, lr} @ Restore registers.
+ ldr r0, [r10, MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ lsrs r0, #(LOCK_WORD_MARK_BIT_SHIFT + 1)
+ bcs .Lart_quick_resolve_string_no_rb
+ mov r0, r10
+ .cfi_remember_state
+ pop {r10-r12, lr}
.cfi_adjust_cfa_offset -16
-.Lart_quick_resolve_string_no_rb:
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_restore r12
+ .cfi_restore lr
+ // Note: art_quick_read_barrier_mark_reg00 clobbers IP but the .Lslow_rb_* does not.
+ b .Lslow_rb_art_quick_read_barrier_mark_reg00 @ Get the marked string back.
+ .cfi_restore_state
+#endif
+
+// Slow path case, the index did not match
+.Lart_quick_resolve_string_slow_path:
+ push {r0-r9} @ 10 words of callee saves and args; {r10-r12, lr} already saved.
+ .cfi_adjust_cfa_offset 40
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ .cfi_rel_offset r4, 16
+ .cfi_rel_offset r5, 20
+ .cfi_rel_offset r6, 24
+ .cfi_rel_offset r7, 28
+ .cfi_rel_offset r8, 32
+ .cfi_rel_offset r9, 36
+ SETUP_SAVE_EVERYTHING_FRAME_CORE_REGS_SAVED r1 @ save callee saves in case of GC
+ mov r1, r9 @ pass Thread::Current
+ bl artResolveStringFromCode @ (uint32_t type_idx, Thread*)
+ cbz r0, 1f @ If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
bx lr
+ .cfi_restore_state
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
@@ -1920,6 +1980,8 @@ END art_quick_l2f
* getting its argument and returning its result through register
* `reg`, saving and restoring all caller-save registers.
*
+ * IP is clobbered; `reg` must not be IP.
+ *
* If `reg` is different from `r0`, the generated function follows a
* non-standard runtime calling convention:
* - register `reg` is used to pass the (sole) argument of this
@@ -1936,36 +1998,71 @@ ENTRY \name
SMART_CBZ \reg, .Lret_rb_\name
// Check lock word for mark bit, if marked return. Use IP for scratch since it is blocked.
ldr ip, [\reg, MIRROR_OBJECT_LOCK_WORD_OFFSET]
- ands ip, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
+ tst ip, #LOCK_WORD_MARK_BIT_MASK_SHIFTED
beq .Lslow_rb_\name
// Already marked, return right away.
+.Lret_rb_\name:
bx lr
.Lslow_rb_\name:
- push {r0-r5, r9, lr} @ save return address and core caller-save registers
- @ also save callee save r5 for 16 byte alignment
+ // Save IP: the kSaveEverything entrypoint art_quick_resolve_string makes a tail call here.
+ push {r0-r4, r9, ip, lr} @ save return address, core caller-save registers and ip
.cfi_adjust_cfa_offset 32
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset r3, 12
.cfi_rel_offset r4, 16
- .cfi_rel_offset r5, 20
- .cfi_rel_offset r9, 24
+ .cfi_rel_offset r9, 20
+ .cfi_rel_offset ip, 24
.cfi_rel_offset lr, 28
- vpush {s0-s15} @ save floating-point caller-save registers
- .cfi_adjust_cfa_offset 64
.ifnc \reg, r0
mov r0, \reg @ pass arg1 - obj from `reg`
.endif
+
+ vpush {s0-s15} @ save floating-point caller-save registers
+ .cfi_adjust_cfa_offset 64
bl artReadBarrierMark @ r0 <- artReadBarrierMark(obj)
- mov ip, r0 @ Save result in IP
vpop {s0-s15} @ restore floating-point registers
.cfi_adjust_cfa_offset -64
- pop {r0-r5, r9, lr} @ restore caller-save registers
- mov \reg, ip @ copy result to reg
-.Lret_rb_\name:
+
+ .ifc \reg, r0 @ Save result to the stack slot or destination register.
+ str r0, [sp, #0]
+ .else
+ .ifc \reg, r1
+ str r0, [sp, #4]
+ .else
+ .ifc \reg, r2
+ str r0, [sp, #8]
+ .else
+ .ifc \reg, r3
+ str r0, [sp, #12]
+ .else
+ .ifc \reg, r4
+ str r0, [sp, #16]
+ .else
+ .ifc \reg, r9
+ str r0, [sp, #20]
+ .else
+ mov \reg, r0
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+ .endif
+
+ pop {r0-r4, r9, ip, lr} @ restore caller-save registers
+ .cfi_adjust_cfa_offset -32
+ .cfi_restore r0
+ .cfi_restore r1
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r4
+ .cfi_restore r9
+ .cfi_restore ip
+ .cfi_restore lr
bx lr
END \name
.endm
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 04a3cc6cae..483cee3100 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -337,7 +337,7 @@
SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR
.endm
-.macro RESTORE_SAVE_EVERYTHING_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
// Restore FP registers.
// For better performance, load d0 and d31 separately, so that all LDPs are 16-byte aligned.
ldr d0, [sp, #8]
@@ -359,7 +359,6 @@
ldr d31, [sp, #256]
// Restore core registers.
- RESTORE_REG x0, 264
RESTORE_TWO_REGS x1, x2, 272
RESTORE_TWO_REGS x3, x4, 288
RESTORE_TWO_REGS x5, x6, 304
@@ -379,6 +378,11 @@
DECREASE_FRAME 512
.endm
+.macro RESTORE_SAVE_EVERYTHING_FRAME
+ RESTORE_REG x0, 264
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+.endm
+
.macro RETURN_IF_RESULT_IS_ZERO
cbnz x0, 1f // result non-zero branch over
ret // return
@@ -392,11 +396,10 @@
.endm
/*
- * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
-.macro DELIVER_PENDING_EXCEPTION
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
mov x0, xSELF
// Point of no return.
@@ -404,6 +407,15 @@
brk 0 // Unreached
.endm
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+.macro DELIVER_PENDING_EXCEPTION
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+.endm
+
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
cbnz \reg, 1f
@@ -1638,40 +1650,54 @@ END art_quick_set64_static
*/
ENTRY art_quick_resolve_string
- ldr x1, [sp] // load referrer
- ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class
- ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache
- ubfx x2, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into x2
- ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2
- cmp x0, x2, lsr #32 // compare against upper 32 bits
+ SAVE_TWO_REGS_INCREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__
+ ldr x29, [sp, #(2 * __SIZEOF_POINTER__)] // load referrer
+ ldr w29, [x29, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class
+ ldr x29, [x29, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache
+ ubfx lr, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into LR
+ ldr x29, [x29, lr, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x29
+ cmp x0, x29, lsr #32 // compare against upper 32 bits
bne .Lart_quick_resolve_string_slow_path
- ubfx x0, x2, #0, #32 // extract lower 32 bits into x0
+ ubfx x0, x29, #0, #32 // extract lower 32 bits into x0
#ifdef USE_READ_BARRIER
// Most common case: GC is not marking.
- ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
- cbnz x3, .Lart_quick_resolve_string_marking
+ ldr w29, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+ cbnz x29, .Lart_quick_resolve_string_marking
+.Lart_quick_resolve_string_no_rb:
#endif
+ .cfi_remember_state
+ RESTORE_TWO_REGS_DECREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__
ret
+ .cfi_restore_state
+ .cfi_def_cfa_offset 16 // workaround for clang bug: 31975598
+
+#ifdef USE_READ_BARRIER
+// GC is marking case, need to check the mark bit.
+.Lart_quick_resolve_string_marking:
+ ldr x29, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbnz x29, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb
+ .cfi_remember_state
+ RESTORE_TWO_REGS_DECREASE_FRAME x29, xLR, 2 * __SIZEOF_POINTER__
+ // Note: art_quick_read_barrier_mark_reg00 clobbers IP0 but the .Lslow_rb_* does not.
+ b .Lslow_rb_art_quick_read_barrier_mark_reg00 // Get the marked string back.
+ .cfi_restore_state
+ .cfi_def_cfa_offset 16 // workaround for clang bug: 31975598
+#endif
// Slow path case, the index did not match.
.Lart_quick_resolve_string_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC
+ INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__)
+ SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR // save callee saves in case of GC
mov x1, xSELF // pass Thread::Current
bl artResolveStringFromCode // (int32_t string_idx, Thread* self)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-
-// GC is marking case, need to check the mark bit.
-.Lart_quick_resolve_string_marking:
- ldr x3, [x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb
- // Save LR so that we can return, also x1 for alignment purposes.
- SAVE_TWO_REGS_INCREASE_FRAME x1, xLR, 16 // Save x1, LR.
- bl artReadBarrierMark // Get the marked string back.
- RESTORE_TWO_REGS_DECREASE_FRAME x1, xLR, 16 // Restore registers.
-.Lart_quick_resolve_string_no_rb:
- ret
-
+ cbz w0, 1f // If result is null, deliver the OOME.
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+ ret // return
+ .cfi_restore_state
+ .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_quick_resolve_string
// Generate the allocation entrypoints for each allocator.
@@ -2513,9 +2539,10 @@ ENTRY \name
*/
// Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler.
ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_path_rb_\name
+ tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lslow_rb_\name
+.Lret_rb_\name:
ret
-.Lslow_path_rb_\name:
+.Lslow_rb_\name:
// Save all potentially live caller-save core registers.
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 368
SAVE_TWO_REGS x2, x3, 16
@@ -2580,7 +2607,6 @@ ENTRY \name
// Restore return address and remove padding.
RESTORE_REG xLR, 360
DECREASE_FRAME 368
-.Lret_rb_\name:
ret
END \name
.endm
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 7bb59efdbf..f4f9a68e30 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -224,12 +224,11 @@ END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
- * when EDI is already saved.
+ * when EDI and ESI are already saved.
*/
-MACRO2(SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED, got_reg, temp_reg)
+MACRO2(SETUP_SAVE_EVERYTHING_FRAME_EDI_ESI_SAVED, got_reg, temp_reg)
// Save core registers from highest to lowest to agree with core spills bitmap.
- // EDI, or at least a placeholder for it, is already on the stack.
- PUSH esi
+ // EDI and ESI, or at least placeholders for them, are already on the stack.
PUSH ebp
PUSH ebx
PUSH edx
@@ -268,13 +267,25 @@ END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ * when EDI is already saved.
+ */
+MACRO2(SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED, got_reg, temp_reg)
+ // Save core registers from highest to lowest to agree with core spills bitmap.
+ // EDI, or at least a placeholder for it, is already on the stack.
+ PUSH esi
+ SETUP_SAVE_EVERYTHING_FRAME_EDI_ESI_SAVED RAW_VAR(got_reg), RAW_VAR(temp_reg)
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
MACRO2(SETUP_SAVE_EVERYTHING_FRAME, got_reg, temp_reg)
PUSH edi
SETUP_SAVE_EVERYTHING_FRAME_EDI_SAVED RAW_VAR(got_reg), RAW_VAR(temp_reg)
END_MACRO
-MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_FRPS)
// Restore FPRs. Method and padding is still on the stack.
movsd 16(%esp), %xmm0
movsd 24(%esp), %xmm1
@@ -284,13 +295,10 @@ MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
movsd 56(%esp), %xmm5
movsd 64(%esp), %xmm6
movsd 72(%esp), %xmm7
+END_MACRO
- // Remove save everything callee save method, stack alignment padding and FPRs.
- addl MACRO_LITERAL(16 + 8 * 8), %esp
- CFI_ADJUST_CFA_OFFSET(-(16 + 8 * 8))
-
- // Restore core registers.
- POP eax
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_EAX)
+ // Restore core registers (except eax).
POP ecx
POP edx
POP ebx
@@ -299,12 +307,32 @@ MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
POP edi
END_MACRO
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
+ RESTORE_SAVE_EVERYTHING_FRAME_FRPS
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addl MACRO_LITERAL(16 + 8 * 8), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 8 * 8))
+
+ POP eax
+ RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_EAX
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX)
+ RESTORE_SAVE_EVERYTHING_FRAME_FRPS
+
+ // Remove save everything callee save method, stack alignment padding and FPRs, skip EAX.
+ addl MACRO_LITERAL(16 + 8 * 8 + 4), %esp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 8 * 8 + 4))
+
+ RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_EAX
+END_MACRO
+
/*
- * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_.
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
-MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save callee saves for throw
+MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
// Outgoing argument set up
subl MACRO_LITERAL(12), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(12)
@@ -314,6 +342,15 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
UNREACHABLE
END_MACRO
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save callee saves for throw
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END_MACRO
+
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
@@ -1114,26 +1151,42 @@ DEFINE_FUNCTION art_quick_alloc_object_region_tlab
END_FUNCTION art_quick_alloc_object_region_tlab
DEFINE_FUNCTION art_quick_resolve_string
- movl 4(%esp), %ecx // get referrer
- movl ART_METHOD_DECLARING_CLASS_OFFSET(%ecx), %ecx // get declaring class
- movl DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %ecx // get string dex cache
- movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %edx
- andl %eax, %edx
- movlps (%ecx, %edx, STRING_DEX_CACHE_ELEMENT_SIZE), %xmm0 // load string idx and ptr to xmm0
- movd %xmm0, %ecx // extract pointer
+ PUSH edi
+ PUSH esi
+ // Save xmm0 at an aligned address on the stack.
+ subl MACRO_LITERAL(12), %esp
+ CFI_ADJUST_CFA_OFFSET(12)
+ movsd %xmm0, 0(%esp)
+ movl 24(%esp), %edi // get referrer
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%edi), %edi // get declaring class
+ movl DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%edi), %edi // get string dex cache
+ movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %esi
+ andl %eax, %esi
+ movlps (%edi, %esi, STRING_DEX_CACHE_ELEMENT_SIZE), %xmm0 // load string idx and ptr to xmm0
+ movd %xmm0, %edi // extract pointer
pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
- movd %xmm0, %edx // extract index
- cmp %edx, %eax
+ movd %xmm0, %esi // extract index
+ // Restore xmm0 and remove it together with padding from the stack.
+ movsd 0(%esp), %xmm0
+ addl MACRO_LITERAL(12), %esp
+ CFI_ADJUST_CFA_OFFSET(-12)
+ cmp %esi, %eax
jne .Lart_quick_resolve_string_slow_path
- movl %ecx, %eax
+ movl %edi, %eax
+ CFI_REMEMBER_STATE
+ POP esi
+ POP edi
#ifdef USE_READ_BARRIER
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_resolve_string_marking
+ jne .Lnot_null_art_quick_read_barrier_mark_reg00
#endif
ret
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(esp, 24) // workaround for clang bug: 31975598
+
.Lart_quick_resolve_string_slow_path:
// Outgoing argument set up
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
+ SETUP_SAVE_EVERYTHING_FRAME_EDI_ESI_SAVED ebx, ebx
subl LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1142,21 +1195,15 @@ DEFINE_FUNCTION art_quick_resolve_string
call SYMBOL(artResolveStringFromCode)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_SAVE_REFS_ONLY_FRAME
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-.Lart_quick_resolve_string_marking:
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
- jnz .Lart_quick_resolve_string_no_rb
- subl LITERAL(12), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(12)
- PUSH eax // Pass the string as the first param.
- call SYMBOL(artReadBarrierMark)
- addl LITERAL(16), %esp
- CFI_ADJUST_CFA_OFFSET(-16)
-.Lart_quick_resolve_string_no_rb:
- RESTORE_SAVE_REFS_ONLY_FRAME
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX
ret
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION art_quick_resolve_string
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
@@ -2102,6 +2149,7 @@ MACRO2(READ_BARRIER_MARK_REG, name, reg)
// Null check so that we can load the lock word.
test REG_VAR(reg), REG_VAR(reg)
jz .Lret_rb_\name
+.Lnot_null_\name:
// Check the mark bit, if it is 1 return.
testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
jz .Lslow_rb_\name
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index af4a6c4f99..28018c5f24 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -76,6 +76,8 @@
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+ #define CFI_RESTORE_STATE .cfi_restore_state
+ #define CFI_REMEMBER_STATE .cfi_remember_state
#else
// Mac OS' doesn't like cfi_* directives.
#define CFI_STARTPROC
@@ -85,6 +87,8 @@
#define CFI_DEF_CFA_REGISTER(reg)
#define CFI_RESTORE(reg)
#define CFI_REL_OFFSET(reg,size)
+ #define CFI_RESTORE_STATE
+ #define CFI_REMEMBER_STATE
#endif
// Symbols.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index c3321e17b9..afa1c0ff03 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -263,16 +263,15 @@ END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
- * when R15 is already saved.
+ * when R14 and R15 are already saved.
*/
-MACRO0(SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED)
+MACRO0(SETUP_SAVE_EVERYTHING_FRAME_R14_R15_SAVED)
#if defined(__APPLE__)
int3
int3
#else
// Save core registers from highest to lowest to agree with core spills bitmap.
- // R15, or at least a placeholder for it, is already on the stack.
- PUSH r14
+ // R14 and R15, or at least placeholders for them, are already on the stack.
PUSH r13
PUSH r12
PUSH r11
@@ -326,13 +325,23 @@ END_MACRO
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveEverything)
+ * when R15 is already saved.
+ */
+MACRO0(SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED)
+ PUSH r14
+ SETUP_SAVE_EVERYTHING_FRAME_R14_R15_SAVED
+END_MACRO
+
+ /*
+ * Macro that sets up the callee save frame to conform with
+ * Runtime::CreateCalleeSaveMethod(kSaveEverything)
*/
MACRO0(SETUP_SAVE_EVERYTHING_FRAME)
PUSH r15
SETUP_SAVE_EVERYTHING_FRAME_R15_SAVED
END_MACRO
-MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_FRPS)
// Restore FPRs. Method and padding is still on the stack.
movq 16(%rsp), %xmm0
movq 24(%rsp), %xmm1
@@ -350,12 +359,10 @@ MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
movq 120(%rsp), %xmm13
movq 128(%rsp), %xmm14
movq 136(%rsp), %xmm15
+END_MACRO
- // Remove save everything callee save method, stack alignment padding and FPRs.
- addq MACRO_LITERAL(16 + 16 * 8), %rsp
- CFI_ADJUST_CFA_OFFSET(-(16 + 16 * 8))
- // Restore callee and GPR args, mixed together to agree with core spills bitmap.
- POP rax
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX)
+ // Restore callee and GPR args (except RAX), mixed together to agree with core spills bitmap.
POP rcx
POP rdx
POP rbx
@@ -372,19 +379,47 @@ MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
POP r15
END_MACRO
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME)
+ RESTORE_SAVE_EVERYTHING_FRAME_FRPS
+
+ // Remove save everything callee save method, stack alignment padding and FPRs.
+ addq MACRO_LITERAL(16 + 16 * 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 16 * 8))
+
+ POP rax
+ RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX
+END_MACRO
+
+MACRO0(RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX)
+ RESTORE_SAVE_EVERYTHING_FRAME_FRPS
+
+ // Remove save everything callee save method, stack alignment padding and FPRs, skip RAX.
+ addq MACRO_LITERAL(16 + 16 * 8 + 8), %rsp
+ CFI_ADJUST_CFA_OFFSET(-(16 + 16 * 8 + 8))
+
+ RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX
+END_MACRO
/*
- * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
- * exception is Thread::Current()->exception_.
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_ when the runtime method frame is ready.
*/
-MACRO0(DELIVER_PENDING_EXCEPTION)
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw
+MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
// (Thread*) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*)
UNREACHABLE
END_MACRO
+ /*
+ * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+ * exception is Thread::Current()->exception_.
+ */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save callee saves for throw
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END_MACRO
+
MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
DEFINE_FUNCTION VAR(c_name)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
@@ -1295,45 +1330,48 @@ DEFINE_FUNCTION art_quick_alloc_object_initialized_region_tlab
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
DEFINE_FUNCTION art_quick_resolve_string
- movq 8(%rsp), %rcx // get referrer
- movl ART_METHOD_DECLARING_CLASS_OFFSET(%rcx), %ecx // get declaring class
- movq DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %rcx // get string dex cache
- movq LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %rdx
- andq %rdi, %rdx
- movq (%rcx, %rdx, STRING_DEX_CACHE_ELEMENT_SIZE), %rdx
- movl %edx, %eax
- shrq LITERAL(32), %rdx
- cmp %rdx, %rdi
+ // Custom calling convention: RAX serves as both input and output.
+ PUSH r15
+ PUSH r14
+ movq 24(%rsp), %r15 // get referrer
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%r15), %r15d // get declaring class
+ movq DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%r15d), %r15 // get string dex cache
+ movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %r14d
+ andl %eax, %r14d
+ movq (%r15, %r14, STRING_DEX_CACHE_ELEMENT_SIZE), %r14
+ movl %r14d, %r15d
+ shrq LITERAL(32), %r14
+ cmpl %r14d, %eax
jne .Lart_quick_resolve_string_slow_path
+ movl %r15d, %eax
+ CFI_REMEMBER_STATE
+ POP r14
+ POP r15
#ifdef USE_READ_BARRIER
cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_resolve_string_marking
+ jne .Lnot_null_art_quick_read_barrier_mark_reg00
#endif
ret
-// Slow path, the index did not match
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(rsp, 24) // workaround for clang bug: 31975598
+
+// Slow path, the index did not match.
.Lart_quick_resolve_string_slow_path:
- SETUP_SAVE_REFS_ONLY_FRAME
- movq %rcx, %rax
+ SETUP_SAVE_EVERYTHING_FRAME_R14_R15_SAVED
// Outgoing argument set up
+ movl %eax, %edi // pass string index
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call SYMBOL(artResolveStringFromCode) // artResolveStringFromCode(arg0, referrer, Thread*)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-// GC is marking case, need to check the mark bit.
-.Lart_quick_resolve_string_marking:
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%rax)
- jnz .Lart_quick_resolve_string_no_rb
- // Save LR so that we can return, also x1 for alignment purposes
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- movq %rax, %rdi
- call SYMBOL(artReadBarrierMark)
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
-.Lart_quick_resolve_string_no_rb:
+
+ testl %eax, %eax // If result is null, deliver the OOME.
+ jz 1f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
ret
+ CFI_RESTORE_STATE
+ CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING) // workaround for clang bug: 31975598
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION art_quick_resolve_string
ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
@@ -2230,6 +2268,7 @@ MACRO2(READ_BARRIER_MARK_REG, name, reg)
// Null check so that we can load the lock word.
testq REG_VAR(reg), REG_VAR(reg)
jz .Lret_rb_\name
+.Lnot_null_\name:
// Check the mark bit, if it is 1 return.
testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(REG_VAR(reg))
jz .Lslow_rb_\name
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 567791e291..cd8815b25a 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -98,7 +98,7 @@ ADD_TEST_EQ(THREAD_LOCAL_POS_OFFSET,
ADD_TEST_EQ(THREAD_LOCAL_END_OFFSET,
art::Thread::ThreadLocalEndOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.thread_local_objects.
-#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + __SIZEOF_POINTER__)
+#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_END_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 5132efc03c..5d70076d1e 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -454,6 +454,27 @@ uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
return result;
}
+
+#define DECODE_UNSIGNED_CHECKED_FROM_WITH_ERROR_VALUE(ptr, var, error_value) \
+ uint32_t var; \
+ if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
+ return error_value; \
+ }
+
+#define DECODE_UNSIGNED_CHECKED_FROM(ptr, var) \
+ uint32_t var; \
+ if (!DecodeUnsignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
+ ErrorStringPrintf("Read out of bounds"); \
+ return false; \
+ }
+
+#define DECODE_SIGNED_CHECKED_FROM(ptr, var) \
+ int32_t var; \
+ if (!DecodeSignedLeb128Checked(&ptr, begin_ + size_, &var)) { \
+ ErrorStringPrintf("Read out of bounds"); \
+ return false; \
+ }
+
bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
uint32_t* handler_offsets, uint32_t handlers_size) {
const uint8_t* handlers_base = DexFile::GetCatchHandlerData(*code_item, 0);
@@ -461,7 +482,7 @@ bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_it
for (uint32_t i = 0; i < handlers_size; i++) {
bool catch_all;
size_t offset = ptr_ - handlers_base;
- int32_t size = DecodeSignedLeb128(&ptr_);
+ DECODE_SIGNED_CHECKED_FROM(ptr_, size);
if (UNLIKELY((size < -65536) || (size > 65536))) {
ErrorStringPrintf("Invalid exception handler size: %d", size);
@@ -478,12 +499,12 @@ bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_it
handler_offsets[i] = static_cast<uint32_t>(offset);
while (size-- > 0) {
- uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, type_idx);
if (!CheckIndex(type_idx, header_->type_ids_size_, "handler type_idx")) {
return false;
}
- uint32_t addr = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, addr);
if (UNLIKELY(addr >= code_item->insns_size_in_code_units_)) {
ErrorStringPrintf("Invalid handler addr: %x", addr);
return false;
@@ -491,7 +512,7 @@ bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_it
}
if (catch_all) {
- uint32_t addr = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, addr);
if (UNLIKELY(addr >= code_item->insns_size_in_code_units_)) {
ErrorStringPrintf("Invalid handler catch_all_addr: %x", addr);
return false;
@@ -726,7 +747,7 @@ bool DexFileVerifier::CheckEncodedValue() {
}
bool DexFileVerifier::CheckEncodedArray() {
- uint32_t size = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
while (size--) {
if (!CheckEncodedValue()) {
@@ -738,16 +759,16 @@ bool DexFileVerifier::CheckEncodedArray() {
}
bool DexFileVerifier::CheckEncodedAnnotation() {
- uint32_t idx = DecodeUnsignedLeb128(&ptr_);
- if (!CheckIndex(idx, header_->type_ids_size_, "encoded_annotation type_idx")) {
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, anno_idx);
+ if (!CheckIndex(anno_idx, header_->type_ids_size_, "encoded_annotation type_idx")) {
return false;
}
- uint32_t size = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
uint32_t last_idx = 0;
for (uint32_t i = 0; i < size; i++) {
- idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, idx);
if (!CheckIndex(idx, header_->string_ids_size_, "annotation_element name_idx")) {
return false;
}
@@ -1002,7 +1023,7 @@ bool DexFileVerifier::CheckIntraCodeItem() {
}
ptr_ = DexFile::GetCatchHandlerData(*code_item, 0);
- uint32_t handlers_size = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, handlers_size);
if (UNLIKELY((handlers_size == 0) || (handlers_size >= 65536))) {
ErrorStringPrintf("Invalid handlers_size: %ud", handlers_size);
@@ -1051,7 +1072,7 @@ bool DexFileVerifier::CheckIntraCodeItem() {
}
bool DexFileVerifier::CheckIntraStringDataItem() {
- uint32_t size = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
const uint8_t* file_end = begin_ + size_;
for (uint32_t i = 0; i < size; i++) {
@@ -1137,15 +1158,15 @@ bool DexFileVerifier::CheckIntraStringDataItem() {
}
bool DexFileVerifier::CheckIntraDebugInfoItem() {
- DecodeUnsignedLeb128(&ptr_);
- uint32_t parameters_size = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, dummy);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, parameters_size);
if (UNLIKELY(parameters_size > 65536)) {
ErrorStringPrintf("Invalid parameters_size: %x", parameters_size);
return false;
}
for (uint32_t j = 0; j < parameters_size; j++) {
- uint32_t parameter_name = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, parameter_name);
if (parameter_name != 0) {
parameter_name--;
if (!CheckIndex(parameter_name, header_->string_ids_size_, "debug_info_item parameter_name")) {
@@ -1161,27 +1182,27 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
return true;
}
case DexFile::DBG_ADVANCE_PC: {
- DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, advance_pc_dummy);
break;
}
case DexFile::DBG_ADVANCE_LINE: {
- DecodeSignedLeb128(&ptr_);
+ DECODE_SIGNED_CHECKED_FROM(ptr_, advance_line_dummy);
break;
}
case DexFile::DBG_START_LOCAL: {
- uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, reg_num);
if (UNLIKELY(reg_num >= 65536)) {
ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
}
- uint32_t name_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, name_idx);
if (name_idx != 0) {
name_idx--;
if (!CheckIndex(name_idx, header_->string_ids_size_, "DBG_START_LOCAL name_idx")) {
return false;
}
}
- uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, type_idx);
if (type_idx != 0) {
type_idx--;
if (!CheckIndex(type_idx, header_->type_ids_size_, "DBG_START_LOCAL type_idx")) {
@@ -1192,7 +1213,7 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
}
case DexFile::DBG_END_LOCAL:
case DexFile::DBG_RESTART_LOCAL: {
- uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, reg_num);
if (UNLIKELY(reg_num >= 65536)) {
ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
@@ -1200,26 +1221,26 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
break;
}
case DexFile::DBG_START_LOCAL_EXTENDED: {
- uint32_t reg_num = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, reg_num);
if (UNLIKELY(reg_num >= 65536)) {
ErrorStringPrintf("Bad reg_num for opcode %x", opcode);
return false;
}
- uint32_t name_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, name_idx);
if (name_idx != 0) {
name_idx--;
if (!CheckIndex(name_idx, header_->string_ids_size_, "DBG_START_LOCAL_EXTENDED name_idx")) {
return false;
}
}
- uint32_t type_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, type_idx);
if (type_idx != 0) {
type_idx--;
if (!CheckIndex(type_idx, header_->type_ids_size_, "DBG_START_LOCAL_EXTENDED type_idx")) {
return false;
}
}
- uint32_t sig_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, sig_idx);
if (sig_idx != 0) {
sig_idx--;
if (!CheckIndex(sig_idx, header_->string_ids_size_, "DBG_START_LOCAL_EXTENDED sig_idx")) {
@@ -1229,7 +1250,7 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
break;
}
case DexFile::DBG_SET_FILE: {
- uint32_t name_idx = DecodeUnsignedLeb128(&ptr_);
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, name_idx);
if (name_idx != 0) {
name_idx--;
if (!CheckIndex(name_idx, header_->string_ids_size_, "DBG_SET_FILE name_idx")) {
@@ -2127,7 +2148,7 @@ bool DexFileVerifier::CheckInterAnnotationSetItem() {
const DexFile::AnnotationItem* annotation =
reinterpret_cast<const DexFile::AnnotationItem*>(begin_ + *offsets);
const uint8_t* data = annotation->annotation_;
- uint32_t idx = DecodeUnsignedLeb128(&data);
+ DECODE_UNSIGNED_CHECKED_FROM(data, idx);
if (UNLIKELY(last_idx >= idx && i != 0)) {
ErrorStringPrintf("Out-of-order entry types: %x then %x", last_idx, idx);
@@ -2442,7 +2463,10 @@ static std::string GetStringOrError(const uint8_t* const begin,
// Assume that the data is OK at this point. String data has been checked at this point.
const uint8_t* ptr = begin + string_id->string_data_off_;
- DecodeUnsignedLeb128(&ptr);
+ uint32_t dummy;
+ if (!DecodeUnsignedLeb128Checked(&ptr, begin + header->file_size_, &dummy)) {
+ return "(error)";
+ }
return reinterpret_cast<const char*>(ptr);
}
@@ -2604,7 +2628,11 @@ static bool FindMethodName(uint32_t method_index,
return false;
}
const uint8_t* str_data_ptr = begin + string_off;
- DecodeUnsignedLeb128(&str_data_ptr);
+ uint32_t dummy;
+ if (!DecodeUnsignedLeb128Checked(&str_data_ptr, begin + header->file_size_, &dummy)) {
+ *error_msg = "String size out of bounds for method flags verification";
+ return false;
+ }
*str = reinterpret_cast<const char*>(str_data_ptr);
return true;
}
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 1ee1f818b6..df23f94a31 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -78,6 +78,7 @@ void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints)
qpoints->pJniMethodEnd = JniMethodEnd;
qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodFastEndWithReference = JniMethodFastEndWithReference;
qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
qpoints->pJniMethodFastEnd = JniMethodFastEnd;
qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 2a3a6bfa06..4d47b83185 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -60,7 +60,11 @@ extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
+ auto* caller = GetCalleeSaveMethodCaller(
+ self,
+ // TODO: Change art_quick_resolve_string on MIPS and MIPS64 to kSaveEverything.
+ (kRuntimeISA == kMips || kRuntimeISA == kMips64) ? Runtime::kSaveRefsOnly
+ : Runtime::kSaveEverything);
mirror::String* result = ResolveStringFromCode(caller, string_idx);
if (LIKELY(result != nullptr)) {
// For AOT code, we need a write barrier for the class loader that holds
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 89712a3cc7..915f18ed71 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -65,6 +65,11 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie, jobject lo
extern mirror::Object* JniMethodEndWithReference(jobject result, uint32_t saved_local_ref_cookie,
Thread* self)
NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+extern mirror::Object* JniMethodFastEndWithReference(jobject result,
+ uint32_t saved_local_ref_cookie,
+ Thread* self)
+ NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
+
extern mirror::Object* JniMethodEndWithReferenceSynchronized(jobject result,
uint32_t saved_local_ref_cookie,
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index e4029191d6..3cfee45462 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -78,6 +78,7 @@
V(JniMethodFastEnd, void, uint32_t, Thread*) \
V(JniMethodEndSynchronized, void, uint32_t, jobject, Thread*) \
V(JniMethodEndWithReference, mirror::Object*, jobject, uint32_t, Thread*) \
+ V(JniMethodFastEndWithReference, mirror::Object*, jobject, uint32_t, Thread*) \
V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject, uint32_t, jobject, Thread*) \
V(QuickGenericJniTrampoline, void, ArtMethod*) \
\
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index b25f447e4b..330c742354 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -22,6 +22,9 @@
namespace art {
+template <bool kDynamicFast>
+static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS;
+
extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
Thread* self ATTRIBUTE_UNUSED) {
DCHECK(kUseReadBarrier);
@@ -78,7 +81,28 @@ static void GoToRunnable(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
bool is_fast = native_method->IsFastNative();
if (!is_fast) {
self->TransitionFromSuspendedToRunnable();
- } else if (UNLIKELY(self->TestAllFlags())) {
+ } else {
+ GoToRunnableFast</*kDynamicFast*/true>(self);
+ }
+}
+
+// TODO: NO_THREAD_SAFETY_ANALYSIS due to different control paths depending on fast JNI.
+template <bool kDynamicFast>
+ALWAYS_INLINE static inline void GoToRunnableFast(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ if (kIsDebugBuild) {
+ // Should only enter here if the method is !Fast JNI or @FastNative.
+ ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
+
+ if (kDynamicFast) {
+ CHECK(native_method->IsFastNative()) << native_method->PrettyMethod();
+ } else {
+ CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
+ }
+ }
+
+ // When we are in "fast" JNI or @FastNative, we are already Runnable.
+ // Only do a suspend check on the way out of JNI.
+ if (UNLIKELY(self->TestAllFlags())) {
// In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
// is a flag raised.
DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
@@ -106,20 +130,7 @@ extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self) {
}
extern void JniMethodFastEnd(uint32_t saved_local_ref_cookie, Thread* self) {
- // inlined fast version of GoToRunnable(self);
-
- if (kIsDebugBuild) {
- ArtMethod* native_method = *self->GetManagedStack()->GetTopQuickFrame();
- CHECK(native_method->IsAnnotatedWithFastNative()) << native_method->PrettyMethod();
- }
-
- if (UNLIKELY(self->TestAllFlags())) {
- // In fast JNI mode we never transitioned out of runnable. Perform a suspend check if there
- // is a flag raised.
- DCHECK(Locks::mutator_lock_->IsSharedHeld(self));
- self->CheckSuspend();
- }
-
+ GoToRunnableFast</*kDynamicFast*/false>(self);
PopLocalReferences(saved_local_ref_cookie, self);
}
@@ -131,10 +142,6 @@ extern void JniMethodEndSynchronized(uint32_t saved_local_ref_cookie,
PopLocalReferences(saved_local_ref_cookie, self);
}
-// TODO: JniMethodFastEndWithReference
-// (Probably don't need to have a synchronized variant since
-// it already has to do atomic operations)
-
// Common result handling for EndWithReference.
static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result,
uint32_t saved_local_ref_cookie,
@@ -157,6 +164,13 @@ static mirror::Object* JniMethodEndWithReferenceHandleResult(jobject result,
return o.Ptr();
}
+extern mirror::Object* JniMethodFastEndWithReference(jobject result,
+ uint32_t saved_local_ref_cookie,
+ Thread* self) {
+ GoToRunnableFast</*kDynamicFast*/false>(self);
+ return JniMethodEndWithReferenceHandleResult(result, saved_local_ref_cookie, self);
+}
+
extern mirror::Object* JniMethodEndWithReference(jobject result,
uint32_t saved_local_ref_cookie,
Thread* self) {
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 03254ab8d4..cdb1051e08 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -122,9 +122,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
// Skip across the entrypoints structures.
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_pos, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_pos, thread_local_end, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_objects, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_end, thread_local_start, sizeof(void*));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_start, thread_local_objects, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_objects, mterp_current_ibase, sizeof(size_t));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_current_ibase, mterp_default_ibase, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, mterp_default_ibase, mterp_alt_ibase, sizeof(void*));
@@ -223,6 +223,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndSynchronized, pJniMethodEndWithReference,
sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReference,
+ pJniMethodFastEndWithReference, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodFastEndWithReference,
pJniMethodEndWithReferenceSynchronized, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEndWithReferenceSynchronized,
pQuickGenericJniTrampoline, sizeof(void*));
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index a968343318..e2f5a1d7fc 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -104,6 +104,48 @@ void SpaceBitmap<kAlignment>::Clear() {
}
template<size_t kAlignment>
+void SpaceBitmap<kAlignment>::ClearRange(const mirror::Object* begin, const mirror::Object* end) {
+ uintptr_t begin_offset = reinterpret_cast<uintptr_t>(begin) - heap_begin_;
+ uintptr_t end_offset = reinterpret_cast<uintptr_t>(end) - heap_begin_;
+ // Align begin and end to word boundaries.
+ while (begin_offset < end_offset && OffsetBitIndex(begin_offset) != 0) {
+ Clear(reinterpret_cast<mirror::Object*>(heap_begin_ + begin_offset));
+ begin_offset += kAlignment;
+ }
+ while (begin_offset < end_offset && OffsetBitIndex(end_offset) != 0) {
+ end_offset -= kAlignment;
+ Clear(reinterpret_cast<mirror::Object*>(heap_begin_ + end_offset));
+ }
+ const uintptr_t start_index = OffsetToIndex(begin_offset);
+ const uintptr_t end_index = OffsetToIndex(end_offset);
+ Atomic<uintptr_t>* const mem_begin = &bitmap_begin_[start_index];
+ Atomic<uintptr_t>* const mem_end = &bitmap_begin_[end_index];
+ Atomic<uintptr_t>* const page_begin = AlignUp(mem_begin, kPageSize);
+ Atomic<uintptr_t>* const page_end = AlignDown(mem_end, kPageSize);
+ if (!kMadviseZeroes || page_begin >= page_end) {
+ // No possible area to madvise.
+ std::fill(reinterpret_cast<uint8_t*>(mem_begin),
+ reinterpret_cast<uint8_t*>(mem_end),
+ 0);
+ } else {
+ // Spans one or more pages.
+ DCHECK_LE(mem_begin, page_begin);
+ DCHECK_LE(page_begin, page_end);
+ DCHECK_LE(page_end, mem_end);
+ std::fill(reinterpret_cast<uint8_t*>(mem_begin),
+ reinterpret_cast<uint8_t*>(page_begin),
+ 0);
+ CHECK_NE(madvise(page_begin,
+ reinterpret_cast<uint8_t*>(page_end) - reinterpret_cast<uint8_t*>(page_begin),
+ MADV_DONTNEED),
+ -1) << "madvise failed";
+ std::fill(reinterpret_cast<uint8_t*>(page_end),
+ reinterpret_cast<uint8_t*>(mem_end),
+ 0);
+ }
+}
+
+template<size_t kAlignment>
void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
DCHECK_EQ(Size(), source_bitmap->Size());
const size_t count = source_bitmap->Size() / sizeof(intptr_t);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 296663a8be..b13648894d 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -68,9 +68,13 @@ class SpaceBitmap {
return static_cast<T>(index * kAlignment * kBitsPerIntPtrT);
}
+ ALWAYS_INLINE static constexpr uintptr_t OffsetBitIndex(uintptr_t offset) {
+ return (offset / kAlignment) % kBitsPerIntPtrT;
+ }
+
// Bits are packed in the obvious way.
static constexpr uintptr_t OffsetToMask(uintptr_t offset) {
- return (static_cast<size_t>(1)) << ((offset / kAlignment) % kBitsPerIntPtrT);
+ return static_cast<size_t>(1) << OffsetBitIndex(offset);
}
bool Set(const mirror::Object* obj) ALWAYS_INLINE {
@@ -87,6 +91,9 @@ class SpaceBitmap {
// Fill the bitmap with zeroes. Returns the bitmap's memory to the system as a side-effect.
void Clear();
+ // Clear a covered by the bitmap using madvise if possible.
+ void ClearRange(const mirror::Object* begin, const mirror::Object* end);
+
bool Test(const mirror::Object* obj) const;
// Return true iff <obj> is within the range of pointers that this bitmap could potentially cover,
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index edb08ef3d9..8c06cfd640 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -62,7 +62,7 @@ TEST_F(SpaceBitmapTest, ScanRange) {
std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
- EXPECT_TRUE(space_bitmap.get() != nullptr);
+ EXPECT_TRUE(space_bitmap != nullptr);
// Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
@@ -87,6 +87,48 @@ TEST_F(SpaceBitmapTest, ScanRange) {
}
}
+TEST_F(SpaceBitmapTest, ClearRange) {
+ uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
+ size_t heap_capacity = 16 * MB;
+
+ std::unique_ptr<ContinuousSpaceBitmap> bitmap(
+ ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
+ EXPECT_TRUE(bitmap != nullptr);
+
+ // Set all of the bits in the bitmap.
+ for (size_t j = 0; j < heap_capacity; j += kObjectAlignment) {
+ const mirror::Object* obj = reinterpret_cast<mirror::Object*>(heap_begin + j);
+ bitmap->Set(obj);
+ }
+
+ std::vector<std::pair<uintptr_t, uintptr_t>> ranges = {
+ {0, 10 * KB + kObjectAlignment},
+ {kObjectAlignment, kObjectAlignment},
+ {kObjectAlignment, 2 * kObjectAlignment},
+ {kObjectAlignment, 5 * kObjectAlignment},
+ {1 * KB + kObjectAlignment, 2 * KB + 5 * kObjectAlignment},
+ };
+ // Try clearing a few ranges.
+ for (const std::pair<uintptr_t, uintptr_t>& range : ranges) {
+ const mirror::Object* obj_begin = reinterpret_cast<mirror::Object*>(heap_begin + range.first);
+ const mirror::Object* obj_end = reinterpret_cast<mirror::Object*>(heap_begin + range.second);
+ bitmap->ClearRange(obj_begin, obj_end);
+ // Boundaries should still be marked.
+ for (uintptr_t i = 0; i < range.first; i += kObjectAlignment) {
+ EXPECT_TRUE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+ }
+ for (uintptr_t i = range.second; i < range.second + kPageSize; i += kObjectAlignment) {
+ EXPECT_TRUE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+ }
+ // Everything inside should be cleared.
+ for (uintptr_t i = range.first; i < range.second; i += kObjectAlignment) {
+ EXPECT_FALSE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+ bitmap->Set(reinterpret_cast<mirror::Object*>(heap_begin + i));
+ }
+ }
+}
+
+
class SimpleCounter {
public:
explicit SimpleCounter(size_t* counter) : count_(counter) {}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1931caf65e..13af67eb3e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -170,10 +170,10 @@ void ConcurrentCopying::BindBitmaps() {
CHECK(space->IsZygoteSpace() || space->IsImageSpace());
immune_spaces_.AddSpace(space);
} else if (space == region_space_) {
- accounting::ContinuousSpaceBitmap* bitmap =
- accounting::ContinuousSpaceBitmap::Create("cc region space bitmap",
- space->Begin(), space->Capacity());
- region_space_bitmap_ = bitmap;
+ // It is OK to clear the bitmap with mutators running since the only place it is read is
+ // VisitObjects which has exclusion with CC.
+ region_space_bitmap_ = region_space_->GetMarkBitmap();
+ region_space_bitmap_->Clear();
}
}
}
@@ -1601,9 +1601,8 @@ void ConcurrentCopying::ReclaimPhase() {
SwapBitmaps();
heap_->UnBindBitmaps();
- // Delete the region bitmap.
+ // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
DCHECK(region_space_bitmap_ != nullptr);
- delete region_space_bitmap_;
region_space_bitmap_ = nullptr;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ba18699168..918b8db301 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -2490,6 +2490,8 @@ void Heap::PreZygoteFork() {
} else {
if (collector_type_ == kCollectorTypeCC) {
region_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ // Evacuated everything out of the region space, clear the mark bitmap.
+ region_space_->GetMarkBitmap()->Clear();
} else {
bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
}
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 66fd62cee1..bbc634dbf3 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -241,15 +241,28 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
+ // For newly allocated and evacuated regions, live bytes will be -1.
uint8_t* pos = r->Begin();
uint8_t* top = r->Top();
- while (pos < top) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ const bool need_bitmap =
+ r->LiveBytes() != static_cast<size_t>(-1) &&
+ r->LiveBytes() != static_cast<size_t>(top - pos);
+ if (need_bitmap) {
+ GetLiveBitmap()->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(pos),
+ reinterpret_cast<uintptr_t>(top),
+ [callback, arg](mirror::Object* obj) {
callback(obj, arg);
- pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
- } else {
- break;
+ });
+ } else {
+ while (pos < top) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ callback(obj, arg);
+ pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
+ } else {
+ break;
+ }
}
}
}
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 23cae7c821..35bc36988a 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -59,6 +59,8 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
regions_[i] = Region(i, region_addr, region_addr + kRegionSize);
}
+ mark_bitmap_.reset(
+ accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
if (kIsDebugBuild) {
CHECK_EQ(regions_[0].Begin(), Begin());
for (size_t i = 0; i < num_regions_; ++i) {
@@ -215,7 +217,28 @@ void RegionSpace::ClearFromSpace() {
r->Clear();
--num_non_free_regions_;
} else if (r->IsInUnevacFromSpace()) {
+ size_t full_count = 0;
+ while (r->IsInUnevacFromSpace()) {
+ Region* const cur = &regions_[i + full_count];
+ if (i + full_count >= num_regions_ ||
+ cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
+ break;
+ }
+ if (full_count != 0) {
+ cur->SetUnevacFromSpaceAsToSpace();
+ }
+ ++full_count;
+ }
+ // Note that r is the full_count == 0 iteration since it is not handled by the loop.
r->SetUnevacFromSpaceAsToSpace();
+ if (full_count >= 1) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + full_count * kRegionSize));
+ // Skip over extra regions we cleared.
+ // Subtract one for the for loop.
+ i += full_count - 1;
+ }
}
}
evac_region_ = nullptr;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 4e57a856c5..381ccfac5d 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -77,12 +77,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
return 0;
}
accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
- // No live bitmap.
- return nullptr;
+ return mark_bitmap_.get();
}
accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
- // No mark bitmap.
- return nullptr;
+ return mark_bitmap_.get();
}
void Clear() OVERRIDE REQUIRES(!region_lock_);
@@ -516,6 +514,9 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.
+ // Mark bitmap used by the GC.
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
+
DISALLOW_COPY_AND_ASSIGN(RegionSpace);
};
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 130c10d322..7389c73096 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -60,27 +60,24 @@ void IndirectReferenceTable::AbortIfNoCheckJNI(const std::string& msg) {
IndirectReferenceTable::IndirectReferenceTable(size_t max_count,
IndirectRefKind desired_kind,
- bool abort_on_error)
+ std::string* error_msg)
: kind_(desired_kind),
max_entries_(max_count) {
+ CHECK(error_msg != nullptr);
CHECK_NE(desired_kind, kHandleScopeOrInvalid);
- std::string error_str;
const size_t table_bytes = max_count * sizeof(IrtEntry);
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
- PROT_READ | PROT_WRITE, false, false, &error_str));
- if (abort_on_error) {
- CHECK(table_mem_map_.get() != nullptr) << error_str;
- CHECK_EQ(table_mem_map_->Size(), table_bytes);
- CHECK(table_mem_map_->Begin() != nullptr);
- } else if (table_mem_map_.get() == nullptr ||
- table_mem_map_->Size() != table_bytes ||
- table_mem_map_->Begin() == nullptr) {
- table_mem_map_.reset();
- LOG(ERROR) << error_str;
- return;
+ PROT_READ | PROT_WRITE, false, false, error_msg));
+ if (table_mem_map_.get() == nullptr && error_msg->empty()) {
+ *error_msg = "Unable to map memory for indirect ref table";
+ }
+
+ if (table_mem_map_.get() != nullptr) {
+ table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
+ } else {
+ table_ = nullptr;
}
- table_ = reinterpret_cast<IrtEntry*>(table_mem_map_->Begin());
segment_state_.all = IRT_FIRST_SEGMENT;
}
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 1762b10350..363280a87c 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -257,12 +257,24 @@ bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
class IndirectReferenceTable {
public:
- // WARNING: When using with abort_on_error = false, the object may be in a partially
- // initialized state. Use IsValid() to check.
- IndirectReferenceTable(size_t max_count, IndirectRefKind kind, bool abort_on_error = true);
+ /*
+ * WARNING: Construction of the IndirectReferenceTable may fail.
+ * error_msg must not be null. If error_msg is set by the constructor, then
+ * construction has failed and the IndirectReferenceTable will be in an
+ * invalid state. Use IsValid to check whether the object is in an invalid
+ * state.
+ */
+ IndirectReferenceTable(size_t max_count, IndirectRefKind kind, std::string* error_msg);
~IndirectReferenceTable();
+ /*
+ * Checks whether construction of the IndirectReferenceTable succeeded.
+ *
+ * This object must only be used if IsValid() returns true. It is safe to
+ * call IsValid from multiple threads without locking or other explicit
+ * synchronization.
+ */
bool IsValid() const;
/*
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index 7b28f0bda8..d7026de559 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -49,7 +49,9 @@ TEST_F(IndirectReferenceTableTest, BasicTest) {
ScopedObjectAccess soa(Thread::Current());
static const size_t kTableMax = 20;
- IndirectReferenceTable irt(kTableMax, kGlobal);
+ std::string error_msg;
+ IndirectReferenceTable irt(kTableMax, kGlobal, &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
mirror::Class* c = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
StackHandleScope<4> hs(soa.Self());
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 09d11678f2..b71236b511 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -667,11 +667,39 @@ inline bool DoInvokePolymorphic(Thread* self, ShadowFrame& shadow_frame,
return false;
}
} else if (handle_kind == kInvokeDirect) {
- // TODO(narayan) : We need to handle the case where the target method is a
- // constructor here. Also the case where we don't want to dynamically
- // dispatch based on the type of the receiver.
- UNIMPLEMENTED(FATAL) << "Direct invokes are not implemented yet.";
- return false;
+ if (called_method->IsConstructor()) {
+ // TODO(narayan) : We need to handle the case where the target method is a
+ // constructor here.
+ UNIMPLEMENTED(FATAL) << "Direct invokes for constructors are not implemented yet.";
+ return false;
+ }
+
+ // Nothing special to do in the case where we're not dealing with a
+ // constructor. It's a private method, and we've already access checked at
+ // the point of creating the handle.
+ } else if (handle_kind == kInvokeSuper) {
+ mirror::Class* declaring_class = called_method->GetDeclaringClass();
+
+ // Note that we're not dynamically dispatching on the type of the receiver
+ // here. We use the static type of the "receiver" object that we've
+ // recorded in the method handle's type, which will be the same as the
+ // special caller that was specified at the point of lookup.
+ mirror::Class* referrer_class = handle_type->GetPTypes()->Get(0);
+ if (!declaring_class->IsInterface()) {
+ mirror::Class* super_class = referrer_class->GetSuperClass();
+ uint16_t vtable_index = called_method->GetMethodIndex();
+ DCHECK(super_class != nullptr);
+ DCHECK(super_class->HasVTable());
+ // Note that super_class is a super of referrer_class and called_method
+ // will always be declared by super_class (or one of its super classes).
+ DCHECK_LT(vtable_index, super_class->GetVTableLength());
+ called_method = super_class->GetVTableEntry(vtable_index, kRuntimePointerSize);
+ } else {
+ called_method = referrer_class->FindVirtualMethodForInterfaceSuper(
+ called_method, kRuntimePointerSize);
+ }
+
+ CHECK(called_method != nullptr);
}
// NOTE: handle_kind == kInvokeStatic needs no special treatment here. We
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 7285b9a965..9b4327f137 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -411,7 +411,9 @@ const JNIInvokeInterface gJniInvokeInterface = {
JII::AttachCurrentThreadAsDaemon
};
-JavaVMExt::JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options)
+JavaVMExt::JavaVMExt(Runtime* runtime,
+ const RuntimeArgumentMap& runtime_options,
+ std::string* error_msg)
: runtime_(runtime),
check_jni_abort_hook_(nullptr),
check_jni_abort_hook_data_(nullptr),
@@ -420,10 +422,10 @@ JavaVMExt::JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options
tracing_enabled_(runtime_options.Exists(RuntimeArgumentMap::JniTrace)
|| VLOG_IS_ON(third_party_jni)),
trace_(runtime_options.GetOrDefault(RuntimeArgumentMap::JniTrace)),
- globals_(kGlobalsMax, kGlobal),
+ globals_(kGlobalsMax, kGlobal, error_msg),
libraries_(new Libraries),
unchecked_functions_(&gJniInvokeInterface),
- weak_globals_(kWeakGlobalsMax, kWeakGlobal),
+ weak_globals_(kWeakGlobalsMax, kWeakGlobal, error_msg),
allow_accessing_weak_globals_(true),
weak_globals_add_condition_("weak globals add condition",
(CHECK(Locks::jni_weak_globals_lock_ != nullptr),
@@ -436,6 +438,19 @@ JavaVMExt::JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options
JavaVMExt::~JavaVMExt() {
}
+// Checking "globals" and "weak_globals" usually requires locks, but we
+// don't need the locks to check for validity when constructing the
+// object. Use NO_THREAD_SAFETY_ANALYSIS for this.
+std::unique_ptr<JavaVMExt> JavaVMExt::Create(Runtime* runtime,
+ const RuntimeArgumentMap& runtime_options,
+ std::string* error_msg) NO_THREAD_SAFETY_ANALYSIS {
+ std::unique_ptr<JavaVMExt> java_vm(new JavaVMExt(runtime, runtime_options, error_msg));
+ if (java_vm && java_vm->globals_.IsValid() && java_vm->weak_globals_.IsValid()) {
+ return java_vm;
+ }
+ return nullptr;
+}
+
jint JavaVMExt::HandleGetEnv(/*out*/void** env, jint version) {
for (GetEnvHook hook : env_hooks_) {
jint res = hook(this, env, version);
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index 05717f41e7..9e37f1178c 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -43,7 +43,14 @@ using GetEnvHook = jint (*)(JavaVMExt* vm, /*out*/void** new_env, jint version);
class JavaVMExt : public JavaVM {
public:
- JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options);
+ // Creates a new JavaVMExt object.
+ // Returns nullptr on error, in which case error_msg is set to a message
+ // describing the error.
+ static std::unique_ptr<JavaVMExt> Create(Runtime* runtime,
+ const RuntimeArgumentMap& runtime_options,
+ std::string* error_msg);
+
+
~JavaVMExt();
bool ForceCopy() const {
@@ -192,6 +199,10 @@ class JavaVMExt : public JavaVM {
static bool IsBadJniVersion(int version);
private:
+ // The constructor should not be called directly. It may leave the object in
+ // an erroneous state, and the result needs to be checked.
+ JavaVMExt(Runtime* runtime, const RuntimeArgumentMap& runtime_options, std::string* error_msg);
+
// Return true if self can currently access weak globals.
bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 1ca2cb4146..8eca8fcba9 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -57,19 +57,19 @@ jint JNIEnvExt::GetEnvHandler(JavaVMExt* vm, /*out*/void** env, jint version) {
return JNI_OK;
}
-JNIEnvExt* JNIEnvExt::Create(Thread* self_in, JavaVMExt* vm_in) {
- std::unique_ptr<JNIEnvExt> ret(new JNIEnvExt(self_in, vm_in));
+JNIEnvExt* JNIEnvExt::Create(Thread* self_in, JavaVMExt* vm_in, std::string* error_msg) {
+ std::unique_ptr<JNIEnvExt> ret(new JNIEnvExt(self_in, vm_in, error_msg));
if (CheckLocalsValid(ret.get())) {
return ret.release();
}
return nullptr;
}
-JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in)
+JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in, std::string* error_msg)
: self(self_in),
vm(vm_in),
local_ref_cookie(IRT_FIRST_SEGMENT),
- locals(kLocalsInitial, kLocal, false),
+ locals(kLocalsInitial, kLocal, error_msg),
check_jni(false),
runtime_deleted(false),
critical(0),
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index 549f8c56a0..e89debbf90 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -34,7 +34,9 @@ class JavaVMExt;
static constexpr size_t kLocalsInitial = 512;
struct JNIEnvExt : public JNIEnv {
- static JNIEnvExt* Create(Thread* self, JavaVMExt* vm);
+ // Creates a new JNIEnvExt. Returns null on error, in which case error_msg
+ // will contain a description of the error.
+ static JNIEnvExt* Create(Thread* self, JavaVMExt* vm, std::string* error_msg);
~JNIEnvExt();
@@ -103,9 +105,9 @@ struct JNIEnvExt : public JNIEnv {
void SetFunctionsToRuntimeShutdownFunctions();
private:
- // The constructor should not be called directly. It may leave the object in an erronuous state,
+ // The constructor should not be called directly. It may leave the object in an erroneous state,
// and the result needs to be checked.
- JNIEnvExt(Thread* self, JavaVMExt* vm);
+ JNIEnvExt(Thread* self, JavaVMExt* vm, std::string* error_msg);
// All locked objects, with the (Java caller) stack frame that locked them. Used in CheckJNI
// to ensure that only monitors locked in this native frame are being unlocked, and that at
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index c6d5c9ea61..9479a181c6 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -2307,7 +2307,9 @@ TEST_F(JniInternalTest, IndirectReferenceTableOffsets) {
// The segment_state_ field is private, and we want to avoid friend declaration. So we'll check
// by modifying memory.
// The parameters don't really matter here.
- IndirectReferenceTable irt(5, IndirectRefKind::kGlobal, true);
+ std::string error_msg;
+ IndirectReferenceTable irt(5, IndirectRefKind::kGlobal, &error_msg);
+ ASSERT_TRUE(irt.IsValid()) << error_msg;
uint32_t old_state = irt.GetSegmentState();
// Write some new state directly. We invert parts of old_state to ensure a new value.
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 74934aebf0..31459af3a0 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -53,6 +53,49 @@ static inline uint32_t DecodeUnsignedLeb128(const uint8_t** data) {
return static_cast<uint32_t>(result);
}
+static inline bool DecodeUnsignedLeb128Checked(const uint8_t** data,
+ const void* end,
+ uint32_t* out) {
+ const uint8_t* ptr = *data;
+ if (ptr >= end) {
+ return false;
+ }
+ int result = *(ptr++);
+ if (UNLIKELY(result > 0x7f)) {
+ if (ptr >= end) {
+ return false;
+ }
+ int cur = *(ptr++);
+ result = (result & 0x7f) | ((cur & 0x7f) << 7);
+ if (cur > 0x7f) {
+ if (ptr >= end) {
+ return false;
+ }
+ cur = *(ptr++);
+ result |= (cur & 0x7f) << 14;
+ if (cur > 0x7f) {
+ if (ptr >= end) {
+ return false;
+ }
+ cur = *(ptr++);
+ result |= (cur & 0x7f) << 21;
+ if (cur > 0x7f) {
+ if (ptr >= end) {
+ return false;
+ }
+ // Note: We don't check to see if cur is out of range here,
+ // meaning we tolerate garbage in the four high-order bits.
+ cur = *(ptr++);
+ result |= cur << 28;
+ }
+ }
+ }
+ }
+ *data = ptr;
+ *out = static_cast<uint32_t>(result);
+ return true;
+}
+
// Reads an unsigned LEB128 + 1 value. updating the given pointer to point
// just past the end of the read value. This function tolerates
// non-zero high-order bits in the fifth encoded byte.
@@ -97,6 +140,57 @@ static inline int32_t DecodeSignedLeb128(const uint8_t** data) {
return result;
}
+static inline bool DecodeSignedLeb128Checked(const uint8_t** data,
+ const void* end,
+ int32_t* out) {
+ const uint8_t* ptr = *data;
+ if (ptr >= end) {
+ return false;
+ }
+ int32_t result = *(ptr++);
+ if (result <= 0x7f) {
+ result = (result << 25) >> 25;
+ } else {
+ if (ptr >= end) {
+ return false;
+ }
+ int cur = *(ptr++);
+ result = (result & 0x7f) | ((cur & 0x7f) << 7);
+ if (cur <= 0x7f) {
+ result = (result << 18) >> 18;
+ } else {
+ if (ptr >= end) {
+ return false;
+ }
+ cur = *(ptr++);
+ result |= (cur & 0x7f) << 14;
+ if (cur <= 0x7f) {
+ result = (result << 11) >> 11;
+ } else {
+ if (ptr >= end) {
+ return false;
+ }
+ cur = *(ptr++);
+ result |= (cur & 0x7f) << 21;
+ if (cur <= 0x7f) {
+ result = (result << 4) >> 4;
+ } else {
+ if (ptr >= end) {
+ return false;
+ }
+ // Note: We don't check to see if cur is out of range here,
+ // meaning we tolerate garbage in the four high-order bits.
+ cur = *(ptr++);
+ result |= cur << 28;
+ }
+ }
+ }
+ }
+ *data = ptr;
+ *out = static_cast<uint32_t>(result);
+ return true;
+}
+
// Returns the number of bytes needed to encode the value in unsigned LEB128.
static inline uint32_t UnsignedLeb128Size(uint32_t data) {
// bits_to_encode = (data != 0) ? 32 - CLZ(x) : 1 // 32 - CLZ(data | 1)
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index be849a330c..df3865b1fe 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -24,6 +24,7 @@
#include "base/casts.h"
#include "base/enums.h"
#include "base/logging.h"
+#include "gc_root.h"
#include "mirror/class.h"
#include "mirror/method_type.h"
#include "runtime.h"
@@ -159,6 +160,33 @@ inline void DexCache::SetElementPtrSize(PtrType* ptr_array,
}
}
+template <typename T,
+ ReadBarrierOption kReadBarrierOption,
+ typename Visitor>
+inline void VisitDexCachePairs(std::atomic<DexCachePair<T>>* pairs,
+ size_t num_pairs,
+ const Visitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < num_pairs; ++i) {
+ DexCachePair<T> source = pairs[i].load(std::memory_order_relaxed);
+ // NOTE: We need the "template" keyword here to avoid a compilation
+ // failure. GcRoot<T> is a template argument-dependent type and we need to
+ // tell the compiler to treat "Read" as a template rather than a field or
+ // function. Otherwise, on encountering the "<" token, the compiler would
+ // treat "Read" as a field.
+ T* before = source.object.template Read<kReadBarrierOption>();
+ // TODO(narayan): This additional GC root construction and assignment
+ // is unnecessary. We're already operating on a copy of the DexCachePair
+ // that's in the cache.
+ GcRoot<T> root(before);
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ if (root.Read() != before) {
+ source.object = GcRoot<T>(root.Read());
+ pairs[i].store(source, std::memory_order_relaxed);
+ }
+ }
+}
+
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
@@ -168,21 +196,16 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- mirror::StringDexCacheType* strings = GetStrings();
- for (size_t i = 0, num_strings = NumStrings(); i != num_strings; ++i) {
- StringDexCachePair source = strings[i].load(std::memory_order_relaxed);
- mirror::String* before = source.object.Read<kReadBarrierOption>();
- GcRoot<mirror::String> root(before);
- visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
- if (root.Read() != before) {
- source.object = GcRoot<String>(root.Read());
- strings[i].store(source, std::memory_order_relaxed);
- }
- }
+ VisitDexCachePairs<mirror::String, kReadBarrierOption, Visitor>(
+ GetStrings(), NumStrings(), visitor);
+
GcRoot<mirror::Class>* resolved_types = GetResolvedTypes();
for (size_t i = 0, num_types = NumResolvedTypes(); i != num_types; ++i) {
visitor.VisitRootIfNonNull(resolved_types[i].AddressWithoutBarrier());
}
+
+ VisitDexCachePairs<mirror::MethodType, kReadBarrierOption, Visitor>(
+ GetResolvedMethodTypes(), NumResolvedMethodTypes(), visitor);
}
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 4d8687cebf..814a4934e7 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '8', '9', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '9', '0', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6e15c38a53..bde41858cf 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -347,13 +347,13 @@ Runtime::~Runtime() {
delete class_linker_;
delete heap_;
delete intern_table_;
- delete java_vm_;
delete oat_file_manager_;
Thread::Shutdown();
QuasiAtomic::Shutdown();
verifier::MethodVerifier::Shutdown();
// Destroy allocators before shutting down the MemMap because they may use it.
+ java_vm_.reset();
linear_alloc_.reset();
low_4gb_arena_pool_.reset();
arena_pool_.reset();
@@ -1120,7 +1120,12 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
}
}
- java_vm_ = new JavaVMExt(this, runtime_options);
+ std::string error_msg;
+ java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
+ if (java_vm_.get() == nullptr) {
+ LOG(ERROR) << "Could not initialize JavaVMExt: " << error_msg;
+ return false;
+ }
// Add the JniEnv handler.
// TODO Refactor this stuff.
@@ -1144,7 +1149,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
class_linker_ = new ClassLinker(intern_table_);
if (GetHeap()->HasBootImageSpace()) {
- std::string error_msg;
bool result = class_linker_->InitFromBootImage(&error_msg);
if (!result) {
LOG(ERROR) << "Could not initialize from image: " << error_msg;
@@ -1191,7 +1195,6 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
&boot_class_path);
}
instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
- std::string error_msg;
if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
LOG(ERROR) << "Could not initialize without image: " << error_msg;
return false;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e2ba2626e9..7cb87abe30 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -277,7 +277,7 @@ class Runtime {
}
JavaVMExt* GetJavaVM() const {
- return java_vm_;
+ return java_vm_.get();
}
size_t GetMaxSpinsBeforeThinkLockInflation() const {
@@ -757,7 +757,7 @@ class Runtime {
SignalCatcher* signal_catcher_;
std::string stack_trace_file_;
- JavaVMExt* java_vm_;
+ std::unique_ptr<JavaVMExt> java_vm_;
std::unique_ptr<jit::Jit> jit_;
std::unique_ptr<jit::JitOptions> jit_options_;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 39fe8d09c1..e47ccc062b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -606,8 +606,9 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
// Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
// do not have a good way to report this on the child's side.
+ std::string error_msg;
std::unique_ptr<JNIEnvExt> child_jni_env_ext(
- JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM()));
+ JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg));
int pthread_create_result = 0;
if (child_jni_env_ext.get() != nullptr) {
@@ -648,7 +649,7 @@ void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_siz
env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
{
std::string msg(child_jni_env_ext.get() == nullptr ?
- "Could not allocate JNI Env" :
+ StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
StringPrintf("pthread_create (%s stack) failed: %s",
PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
ScopedObjectAccess soa(env);
@@ -693,8 +694,10 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
DCHECK_EQ(jni_env_ext->self, this);
tlsPtr_.jni_env = jni_env_ext;
} else {
- tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
+ std::string error_msg;
+ tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg);
if (tlsPtr_.jni_env == nullptr) {
+ LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg;
return false;
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 6f5913e6b3..24038f5475 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1392,7 +1392,7 @@ class Thread {
stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
- thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
+ thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_start(nullptr),
thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr),
@@ -1506,12 +1506,13 @@ class Thread {
JniEntryPoints jni_entrypoints;
QuickEntryPoints quick_entrypoints;
- // Thread-local allocation pointer.
- uint8_t* thread_local_start;
// thread_local_pos and thread_local_end must be consecutive for ldrd and are 8 byte aligned for
// potentially better performance.
uint8_t* thread_local_pos;
uint8_t* thread_local_end;
+ // Thread-local allocation pointer.
+ uint8_t* thread_local_start;
+
size_t thread_local_objects;
// Mterp jump table bases.