summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk16
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc15
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S3
-rw-r--r--runtime/arch/arm/quick_method_frame_info_arm.h16
-rw-r--r--runtime/arch/arm64/asm_support_arm64.S7
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h22
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc158
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S482
-rw-r--r--runtime/arch/arm64/quick_method_frame_info_arm64.h60
-rw-r--r--runtime/arch/arm64/registers_arm64.h2
-rw-r--r--runtime/arch/memcmp16.h2
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc13
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc3
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S7
-rw-r--r--runtime/arch/x86/asm_support_x86.S4
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc351
-rw-r--r--runtime/arch/x86/memcmp16_x86.S1038
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S30
-rw-r--r--runtime/arch/x86_64/fault_handler_x86_64.cc47
-rwxr-xr-xruntime/arch/x86_64/memcmp16_x86_64.S1210
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S16
-rw-r--r--runtime/base/macros.h1
-rw-r--r--runtime/base/mutex.cc38
-rw-r--r--runtime/base/mutex.h22
-rw-r--r--runtime/base/scoped_flock.cc16
-rw-r--r--runtime/base/scoped_flock.h5
-rw-r--r--runtime/base/unix_file/fd_file_test.cc3
-rw-r--r--runtime/base/unix_file/mapped_file_test.cc8
-rw-r--r--runtime/base/unix_file/random_access_file_test.h6
-rw-r--r--runtime/check_jni.cc3805
-rw-r--r--runtime/check_jni.h29
-rw-r--r--runtime/class_linker-inl.h39
-rw-r--r--runtime/class_linker.cc683
-rw-r--r--runtime/class_linker.h82
-rw-r--r--runtime/class_linker_test.cc26
-rw-r--r--runtime/common_runtime_test.cc104
-rw-r--r--runtime/common_runtime_test.h20
-rw-r--r--runtime/compiler_callbacks.h4
-rw-r--r--runtime/debugger.cc113
-rw-r--r--runtime/dex_file.cc33
-rw-r--r--runtime/dex_file.h31
-rw-r--r--runtime/dex_file_test.cc30
-rw-r--r--runtime/dex_file_verifier.cc74
-rw-r--r--runtime/dex_file_verifier.h8
-rw-r--r--runtime/elf_file.cc52
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h146
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc121
-rw-r--r--runtime/entrypoints/entrypoint_utils.h29
-rw-r--r--runtime/entrypoints/portable/portable_throw_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc61
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h106
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_enum.h56
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h117
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc33
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/fault_handler.cc36
-rw-r--r--runtime/fault_handler.h13
-rw-r--r--runtime/gc/accounting/card_table-inl.h9
-rw-r--r--runtime/gc/accounting/card_table.cc19
-rw-r--r--runtime/gc/accounting/card_table.h8
-rw-r--r--runtime/gc/accounting/card_table_test.cc150
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/collector/garbage_collector.cc44
-rw-r--r--runtime/gc/collector/garbage_collector.h19
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h5
-rw-r--r--runtime/gc/collector/mark_sweep.cc15
-rw-r--r--runtime/gc/collector/semi_space-inl.h47
-rw-r--r--runtime/gc/collector/semi_space.cc193
-rw-r--r--runtime/gc/collector/semi_space.h11
-rw-r--r--runtime/gc/heap.cc413
-rw-r--r--runtime/gc/heap.h20
-rw-r--r--runtime/gc/space/bump_pointer_space.cc8
-rw-r--r--runtime/gc/space/bump_pointer_space.h3
-rw-r--r--runtime/gc/space/image_space.cc334
-rw-r--r--runtime/gc/space/image_space.h25
-rw-r--r--runtime/gc/space/large_object_space.cc5
-rw-r--r--runtime/gc/space/large_object_space.h3
-rw-r--r--runtime/gc/space/malloc_space.cc4
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/rosalloc_space.cc6
-rw-r--r--runtime/gc/space/rosalloc_space.h1
-rw-r--r--runtime/gc/space/space.h6
-rw-r--r--runtime/gc/space/zygote_space.cc13
-rw-r--r--runtime/gc/space/zygote_space.h3
-rw-r--r--runtime/gc_root-inl.h33
-rw-r--r--runtime/gc_root.h58
-rw-r--r--runtime/globals.h2
-rw-r--r--runtime/hprof/hprof.cc34
-rw-r--r--runtime/implicit_check_options.h172
-rw-r--r--runtime/indirect_reference_table-inl.h8
-rw-r--r--runtime/indirect_reference_table.cc38
-rw-r--r--runtime/indirect_reference_table.h27
-rw-r--r--runtime/instruction_set.cc38
-rw-r--r--runtime/instruction_set.h28
-rw-r--r--runtime/instrumentation.cc99
-rw-r--r--runtime/instrumentation.h29
-rw-r--r--runtime/intern_table.cc52
-rw-r--r--runtime/intern_table.h11
-rw-r--r--runtime/interpreter/interpreter.cc9
-rw-r--r--runtime/interpreter/interpreter_common.cc4
-rw-r--r--runtime/interpreter/interpreter_common.h3
-rw-r--r--runtime/java_vm_ext.cc829
-rw-r--r--runtime/java_vm_ext.h185
-rw-r--r--runtime/jdwp/jdwp.h2
-rw-r--r--runtime/jdwp/jdwp_handler.cc4
-rw-r--r--runtime/jdwp/jdwp_main.cc2
-rw-r--r--runtime/jni_env_ext-inl.h (renamed from runtime/jni_internal-inl.h)8
-rw-r--r--runtime/jni_env_ext.cc89
-rw-r--r--runtime/jni_env_ext.h115
-rw-r--r--runtime/jni_internal.cc866
-rw-r--r--runtime/jni_internal.h189
-rw-r--r--runtime/jni_internal_test.cc999
-rw-r--r--runtime/lock_word.h2
-rw-r--r--runtime/mem_map.cc160
-rw-r--r--runtime/mem_map.h13
-rw-r--r--runtime/memory_region.h29
-rw-r--r--runtime/method_helper-inl.h14
-rw-r--r--runtime/method_helper.cc28
-rw-r--r--runtime/method_helper.h35
-rw-r--r--runtime/mirror/array-inl.h4
-rw-r--r--runtime/mirror/array.cc2
-rw-r--r--runtime/mirror/array.h17
-rw-r--r--runtime/mirror/art_field.cc15
-rw-r--r--runtime/mirror/art_field.h10
-rw-r--r--runtime/mirror/art_method-inl.h58
-rw-r--r--runtime/mirror/art_method.cc32
-rw-r--r--runtime/mirror/art_method.h23
-rw-r--r--runtime/mirror/class-inl.h50
-rw-r--r--runtime/mirror/class.cc74
-rw-r--r--runtime/mirror/class.h36
-rw-r--r--runtime/mirror/dex_cache-inl.h8
-rw-r--r--runtime/mirror/dex_cache.h19
-rw-r--r--runtime/mirror/object.cc6
-rw-r--r--runtime/mirror/object.h9
-rw-r--r--runtime/mirror/object_test.cc7
-rw-r--r--runtime/mirror/reference.cc15
-rw-r--r--runtime/mirror/reference.h10
-rw-r--r--runtime/mirror/stack_trace_element.cc15
-rw-r--r--runtime/mirror/stack_trace_element.h9
-rw-r--r--runtime/mirror/string.cc14
-rw-r--r--runtime/mirror/string.h9
-rw-r--r--runtime/mirror/throwable.cc14
-rw-r--r--runtime/mirror/throwable.h9
-rw-r--r--runtime/monitor.cc18
-rw-r--r--runtime/monitor.h7
-rw-r--r--runtime/monitor_pool.cc6
-rw-r--r--runtime/native/dalvik_system_DexFile.cc279
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/dalvik_system_VMStack.cc7
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc5
-rw-r--r--runtime/native/java_lang_Class.cc5
-rw-r--r--runtime/native/java_lang_DexCache.cc1
-rw-r--r--runtime/native/java_lang_Runtime.cc16
-rw-r--r--runtime/native/java_lang_Thread.cc9
-rw-r--r--runtime/native/java_lang_VMClassLoader.cc17
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc1
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc7
-rw-r--r--runtime/native_bridge.cc267
-rw-r--r--runtime/native_bridge.h39
-rw-r--r--runtime/noop_compiler_callbacks.h5
-rw-r--r--runtime/oat.cc31
-rw-r--r--runtime/oat.h7
-rw-r--r--runtime/oat_file.cc104
-rw-r--r--runtime/oat_file.h41
-rw-r--r--runtime/object_callbacks.h10
-rw-r--r--runtime/parsed_options.cc101
-rw-r--r--runtime/parsed_options.h8
-rw-r--r--runtime/primitive.cc1
-rw-r--r--runtime/primitive.h8
-rw-r--r--runtime/proxy_test.cc10
-rw-r--r--runtime/quick/inline_method_analyser.h7
-rw-r--r--runtime/quick_exception_handler.cc38
-rw-r--r--runtime/reference_table.cc35
-rw-r--r--runtime/reference_table.h3
-rw-r--r--runtime/reference_table_test.cc2
-rw-r--r--runtime/reflection.cc48
-rw-r--r--runtime/reflection.h1
-rw-r--r--runtime/runtime-inl.h13
-rw-r--r--runtime/runtime.cc185
-rw-r--r--runtime/runtime.h50
-rw-r--r--runtime/safe_map.h1
-rw-r--r--runtime/scoped_thread_state_change.h8
-rw-r--r--runtime/stack.cc153
-rw-r--r--runtime/stack.h17
-rw-r--r--runtime/stack_map.h307
-rw-r--r--runtime/thread-inl.h32
-rw-r--r--runtime/thread.cc173
-rw-r--r--runtime/thread.h26
-rw-r--r--runtime/thread_linux.cc13
-rw-r--r--runtime/thread_list.cc76
-rw-r--r--runtime/thread_list.h2
-rw-r--r--runtime/trace.cc30
-rw-r--r--runtime/trace.h5
-rw-r--r--runtime/utils.cc91
-rw-r--r--runtime/utils.h36
-rw-r--r--runtime/utils_test.cc2
-rw-r--r--runtime/verifier/method_verifier-inl.h4
-rw-r--r--runtime/verifier/method_verifier.cc251
-rw-r--r--runtime/verifier/method_verifier.h32
-rw-r--r--runtime/verifier/reg_type.cc113
-rw-r--r--runtime/verifier/reg_type.h99
-rw-r--r--runtime/verifier/reg_type_cache-inl.h4
-rw-r--r--runtime/verifier/reg_type_cache.cc113
-rw-r--r--runtime/verifier/reg_type_cache.h80
-rw-r--r--runtime/verifier/reg_type_test.cc179
-rw-r--r--runtime/verifier/register_line-inl.h2
-rw-r--r--runtime/verifier/register_line.cc109
-rw-r--r--runtime/verifier/register_line.h66
-rw-r--r--runtime/well_known_classes.cc4
-rw-r--r--runtime/well_known_classes.h2
210 files changed, 13361 insertions, 5568 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index f2d3c8e8c0..1e037f5b06 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -81,6 +81,7 @@ LIBART_COMMON_SRC_FILES := \
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
interpreter/interpreter_switch_impl.cc \
+ java_vm_ext.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
jdwp/jdwp_handler.cc \
@@ -88,6 +89,7 @@ LIBART_COMMON_SRC_FILES := \
jdwp/jdwp_request.cc \
jdwp/jdwp_socket.cc \
jdwp/object_registry.cc \
+ jni_env_ext.cc \
jni_internal.cc \
jobject_comparator.cc \
mem_map.cc \
@@ -104,6 +106,7 @@ LIBART_COMMON_SRC_FILES := \
mirror/string.cc \
mirror/throwable.cc \
monitor.cc \
+ native_bridge.cc \
native/dalvik_system_DexFile.cc \
native/dalvik_system_VMDebug.cc \
native/dalvik_system_VMRuntime.cc \
@@ -238,6 +241,7 @@ LIBART_SRC_FILES_x86 := \
arch/x86/context_x86.cc \
arch/x86/entrypoints_init_x86.cc \
arch/x86/jni_entrypoints_x86.S \
+ arch/x86/memcmp16_x86.S \
arch/x86/portable_entrypoints_x86.S \
arch/x86/quick_entrypoints_x86.S \
arch/x86/thread_x86.cc \
@@ -246,15 +250,18 @@ LIBART_SRC_FILES_x86 := \
LIBART_TARGET_SRC_FILES_x86 := \
$(LIBART_SRC_FILES_x86)
+# Note that the fault_handler_x86.cc is not a mistake. This file is
+# shared between the x86 and x86_64 architectures.
LIBART_SRC_FILES_x86_64 := \
arch/x86_64/context_x86_64.cc \
arch/x86_64/entrypoints_init_x86_64.cc \
arch/x86_64/jni_entrypoints_x86_64.S \
+ arch/x86_64/memcmp16_x86_64.S \
arch/x86_64/portable_entrypoints_x86_64.S \
arch/x86_64/quick_entrypoints_x86_64.S \
arch/x86_64/thread_x86_64.cc \
monitor_pool.cc \
- arch/x86_64/fault_handler_x86_64.cc
+ arch/x86/fault_handler_x86.cc
LIBART_TARGET_SRC_FILES_x86_64 := \
$(LIBART_SRC_FILES_x86_64) \
@@ -292,6 +299,7 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
dex_file.h \
dex_instruction.h \
gc/collector/gc_type.h \
+ gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
indirect_reference_table.h \
@@ -340,6 +348,7 @@ define build-libart
LOCAL_CPP_EXTENSION := $$(ART_CPP_EXTENSION)
ifeq ($$(art_ndebug_or_debug),ndebug)
LOCAL_MODULE := libart
+ LOCAL_FDO_SUPPORT := true
else # debug
LOCAL_MODULE := libartd
endif
@@ -370,7 +379,9 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
LOCAL_CFLAGS := $$(LIBART_CFLAGS)
- LOCAL_LDFLAGS := $$(LIBART_LDFLAGS)
+ # TODO(danalbert): Work around the test failures caused by removing -Bsymbolic
+ # by turning it back on for libart until I get a chance to look at them.
+ LOCAL_LDFLAGS := $$(LIBART_LDFLAGS) -Wl,-Bsymbolic
ifeq ($$(art_target_or_host),target)
LOCAL_LDFLAGS += $$(LIBART_TARGET_LDFLAGS)
else
@@ -416,6 +427,7 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
LOCAL_STATIC_LIBRARIES := libziparchive libz
else # host
LOCAL_STATIC_LIBRARIES += libcutils libziparchive-host libz libutils
+ LOCAL_SHARED_LIBRARIES += libsigchain
LOCAL_LDLIBS += -ldl -lpthread
ifeq ($$(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index 2a82129511..be28544af0 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -46,9 +46,10 @@ static uint32_t GetInstructionSize(uint8_t* pc) {
return instr_size;
}
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
*out_sp = static_cast<uintptr_t>(sc->arm_sp);
VLOG(signals) << "sp: " << *out_sp;
@@ -60,7 +61,7 @@ void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod**
// get the method from the top of the stack. However it's in r0.
uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
- reinterpret_cast<uint8_t*>(*out_sp) - kArmStackOverflowReservedBytes);
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm));
if (overflow_addr == fault_addr) {
*out_method = reinterpret_cast<mirror::ArtMethod*>(sc->arm_r0);
} else {
@@ -114,7 +115,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
uint32_t checkinst1 = 0xf8d90000 + Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
uint16_t checkinst2 = 0x6800;
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->arm_pc);
uint8_t* ptr1 = ptr2 - 4;
@@ -178,7 +179,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
// to the overflow region below the protected region.
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- struct ucontext *uc = (struct ucontext *)context;
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
VLOG(signals) << "sigcontext: " << std::hex << sc;
@@ -191,7 +192,7 @@ bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
", fault_addr: " << fault_addr;
- uintptr_t overflow_addr = sp - kArmStackOverflowReservedBytes;
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm);
Thread* self = reinterpret_cast<Thread*>(sc->arm_r9);
CHECK_EQ(self, Thread::Current());
@@ -205,7 +206,7 @@ bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
}
// We know this is a stack overflow. We need to move the sp to the overflow region
- // the exists below the protected region. Determine the address of the next
+ // that exists below the protected region. Determine the address of the next
// available valid address below the protected region.
uintptr_t prevsp = sp;
sp = pregion;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 4939610e60..86cb16aab5 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -365,8 +365,9 @@ END art_quick_invoke_stub
ARM_ENTRY art_quick_do_long_jump
vldm r1, {s0-s31} @ load all fprs from argument fprs_
ldr r2, [r0, #60] @ r2 = r15 (PC from gprs_ 60=4*15)
+ ldr r14, [r0, #56] @ (LR from gprs_ 56=4*14)
add r0, r0, #12 @ increment r0 to skip gprs_[0..2] 12=4*3
- ldm r0, {r3-r14} @ load remaining gprs from argument gprs_
+ ldm r0, {r3-r13} @ load remaining gprs from argument gprs_
mov r0, #0 @ clear result registers r0 and r1
mov r1, #0
bx r2 @ do long jump
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 83cacac5be..7595e94e26 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -63,6 +63,22 @@ constexpr QuickMethodFrameInfo ArmCalleeSaveMethodFrameInfo(Runtime::CalleeSaveT
ArmCalleeSaveFpSpills(type));
}
+constexpr size_t ArmCalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ (POPCOUNT(ArmCalleeSaveCoreSpills(type)) +
+ POPCOUNT(ArmCalleeSaveFpSpills(type))) * kArmPointerSize;
+}
+
+constexpr size_t ArmCalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ POPCOUNT(ArmCalleeSaveCoreSpills(type)) * kArmPointerSize;
+}
+
+constexpr size_t ArmCalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+ return ArmCalleeSaveFrameSize(type) -
+ POPCOUNT(ArmCalleeSaveCoreSpills(type) & (-(1 << LR))) * kArmPointerSize;
+}
+
} // namespace arm
} // namespace art
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 55de1ecfa0..be167faae6 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -24,15 +24,22 @@
// Register holding suspend check count down.
// 32-bit is enough for the suspend register.
#define wSUSPEND w19
+// xSUSPEND is 64-bit view of wSUSPEND.
+// Used to save/restore the register scratched by managed code.
+#define xSUSPEND x19
// Register holding Thread::Current().
#define xSELF x18
+// x18 is not preserved by aapcs64, save it on xETR(External Thread reg) for restore and later use.
+#define xETR x21
// Frame Pointer
#define xFP x29
// Link Register
#define xLR x30
// Define the intraprocedural linkage temporary registers.
#define xIP0 x16
+#define wIP0 w16
#define xIP1 x17
+#define wIP1 w17
.macro ENTRY name
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index f353408baa..7f0f56f274 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,28 +19,26 @@
#include "asm_support.h"
-// TODO Thread offsets need to be checked when on Aarch64.
-
// Note: these callee save methods loads require read barriers.
-// Offset of field Runtime::callee_save_methods_[kSaveAll]
+// Offset of field Runtime::callee_save_methods_[kSaveAll] verified in InitCpu
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
-// Offset of field Runtime::callee_save_methods_[kRefsOnly]
+// Offset of field Runtime::callee_save_methods_[kRefsOnly] verified in InitCpu
#define RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET 8
-// Offset of field Runtime::callee_save_methods_[kRefsAndArgs]
+// Offset of field Runtime::callee_save_methods_[kRefsAndArgs] verified in InitCpu
#define RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET 16
-// Offset of field Thread::suspend_count_ verified in InitCpu
+// Offset of field Thread::suspend_count_
#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::card_table_ verified in InitCpu
+// Offset of field Thread::card_table_
#define THREAD_CARD_TABLE_OFFSET 112
-// Offset of field Thread::exception_ verified in InitCpu
+// Offset of field Thread::exception_
#define THREAD_EXCEPTION_OFFSET 120
-// Offset of field Thread::thin_lock_thread_id_ verified in InitCpu
+// Offset of field Thread::thin_lock_thread_id_
#define THREAD_ID_OFFSET 12
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 368
-#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 176
-#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 304
+#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 176
+#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 96
+#define FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE 224
// Expected size of a heap reference
#define HEAP_REFERENCE_SIZE 4
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 74c3023aff..3a7e6896a1 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -21,7 +21,15 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "registers_arm64.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_implicit_suspend();
//
// ARM64 specific fault handler functions.
@@ -29,19 +37,163 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ *out_sp = static_cast<uintptr_t>(sc->sp);
+ VLOG(signals) << "sp: " << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in x0.
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(sc->fault_address);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kArm64));
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(sc->regs[0]);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ }
+
+ // Work out the return PC. This will be the address of the instruction
+ // following the faulting ldr/str instruction.
+ VLOG(signals) << "pc: " << std::hex
+ << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->pc));
+
+ *out_return_pc = sc->pc + 4;
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ // The code that looks for the catch location needs to know the value of the
+ // PC at the point of call. For Null checks we insert a GC map that is immediately after
+ // the load/store instruction that might cause the fault.
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+
+ sc->regs[30] = sc->pc + 4; // LR needs to point to gc map location
+
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
}
+// A suspend check is done using the following instruction sequence:
+// 0xf7223228: f9405640 ldr x0, [x18, #168]
+// .. some intervening instructions
+// 0xf7223230: f9400000 ldr x0, [x0]
+
+// The offset from r18 is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault (at PC-4 and PC).
bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the ldr x0,[r18,#xxx]
+ // where xxx is the offset of the suspend trigger.
+ uint32_t checkinst1 = 0xf9400240 | (Thread::ThreadSuspendTriggerOffset<8>().Int32Value() << 7);
+ uint32_t checkinst2 = 0xf9400000;
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ uint8_t* ptr2 = reinterpret_cast<uint8_t*>(sc->pc);
+ uint8_t* ptr1 = ptr2 - 4;
+ VLOG(signals) << "checking suspend";
+
+ uint32_t inst2 = *reinterpret_cast<uint32_t*>(ptr2);
+ VLOG(signals) << "inst2: " << std::hex << inst2 << " checkinst2: " << checkinst2;
+ if (inst2 != checkinst2) {
+ // Second instruction is not good, not ours.
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = ptr1 - 80; // Compiler will hoist to a max of 20 instructions.
+ bool found = false;
+ while (ptr1 > limit) {
+ uint32_t inst1 = *reinterpret_cast<uint32_t*>(ptr1);
+ VLOG(signals) << "inst1: " << std::hex << inst1 << " checkinst1: " << checkinst1;
+ if (inst1 == checkinst1) {
+ found = true;
+ break;
+ }
+ ptr1 -= 4;
+ }
+ if (found) {
+ VLOG(signals) << "suspend check match";
+ // This is a suspend check. Arrange for the signal handler to return to
+ // art_quick_implicit_suspend. Also set LR so that after the suspend check it
+ // will resume the instruction (current PC + 4). PC points to the
+ // ldr x0,[x0,#0] instruction (r0 will be 0, set by the trigger).
+
+ sc->regs[30] = sc->pc + 4;
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_implicit_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
return false;
}
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
+ struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
+ VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
+ VLOG(signals) << "sigcontext: " << std::hex << sc;
+
+ uintptr_t sp = sc->sp;
+ VLOG(signals) << "sp: " << std::hex << sp;
+
+ uintptr_t fault_addr = sc->fault_address;
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kArm64);
+
+ Thread* self = reinterpret_cast<Thread*>(sc->regs[art::arm64::TR]);
+ CHECK_EQ(self, Thread::Current());
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ uintptr_t prevsp = sp;
+ sp = pregion;
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << sp;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+ VLOG(signals) << "previous frame: " << std::hex << prevsp;
+
+ // Now establish the stack pointer for the signal return.
+ sc->sp = prevsp;
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ sc->regs[art::arm64::IP0] = sp; // aka x16
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ // The value of LR must be the same as it was when we entered the code that
+ // caused this fault. This will be inserted into a callee save frame by
+ // the function to which this handler returns (art_quick_throw_stack_overflow_from_signal).
+ sc->pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+
+ // The kernel will now return to the address in sc->pc.
+ return true;
}
} // namespace art
+
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 2201b55849..04be4a2999 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -24,183 +24,161 @@
* Runtime::CreateCalleeSaveMethod(kSaveAll)
*/
.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ]
- sub sp, sp, #368
- .cfi_adjust_cfa_offset 368
+ sub sp, sp, #176
+ .cfi_adjust_cfa_offset 176
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 368)
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176)
#error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
- // FP args
- stp d0, d1, [sp, #8]
- stp d2, d3, [sp, #24]
- stp d4, d5, [sp, #40]
- stp d6, d7, [sp, #56]
-
// FP callee-saves
- stp d8, d9, [sp, #72]
- stp d10, d11, [sp, #88]
- stp d12, d13, [sp, #104]
- stp d14, d15, [sp, #120]
-
- stp d16, d17, [sp, #136]
- stp d18, d19, [sp, #152]
- stp d20, d21, [sp, #168]
- stp d22, d23, [sp, #184]
- stp d24, d25, [sp, #200]
- stp d26, d27, [sp, #216]
- stp d28, d29, [sp, #232]
- stp d30, d31, [sp, #248]
-
+ stp d8, d9, [sp, #8]
+ stp d10, d11, [sp, #24]
+ stp d12, d13, [sp, #40]
+ stp d14, d15, [sp, #56]
- // Callee saved.
- stp xSELF, x19, [sp, #264]
- .cfi_rel_offset x18, 264
- .cfi_rel_offset x19, 272
+ // Reserved registers
+ stp xSELF, xSUSPEND, [sp, #72]
+ .cfi_rel_offset x18, 72
+ .cfi_rel_offset x19, 80
- stp x20, x21, [sp, #280]
- .cfi_rel_offset x20, 280
- .cfi_rel_offset x21, 288
+ // callee-saves
+ stp x20, x21, [sp, #88]
+ .cfi_rel_offset x20, 88
+ .cfi_rel_offset x21, 96
- stp x22, x23, [sp, #296]
- .cfi_rel_offset x22, 296
- .cfi_rel_offset x23, 304
+ stp x22, x23, [sp, #104]
+ .cfi_rel_offset x22, 104
+ .cfi_rel_offset x23, 112
- stp x24, x25, [sp, #312]
- .cfi_rel_offset x24, 312
- .cfi_rel_offset x25, 320
+ stp x24, x25, [sp, #120]
+ .cfi_rel_offset x24, 120
+ .cfi_rel_offset x25, 128
- stp x26, x27, [sp, #328]
- .cfi_rel_offset x26, 328
- .cfi_rel_offset x27, 336
+ stp x26, x27, [sp, #136]
+ .cfi_rel_offset x26, 136
+ .cfi_rel_offset x27, 144
- stp x28, xFP, [sp, #344] // Save FP.
- .cfi_rel_offset x28, 344
- .cfi_rel_offset x29, 352
+ stp x28, x29, [sp, #152]
+ .cfi_rel_offset x28, 152
+ .cfi_rel_offset x29, 160
- str xLR, [sp, #360]
- .cfi_rel_offset x30, 360
+ str xLR, [sp, #168]
+ .cfi_rel_offset x30, 168
// Loads appropriate callee-save-method
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
-
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kRefsOnly).
*/
-// WIP.
.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ]
- sub sp, sp, #176
- .cfi_adjust_cfa_offset 176
+ sub sp, sp, #96
+ .cfi_adjust_cfa_offset 96
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 176)
+#if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96)
#error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
- // FP callee-saves
- stp d8, d9, [sp, #8]
- stp d10, d11, [sp, #24]
- stp d12, d13, [sp, #40]
- stp d14, d15, [sp, #56]
+ // Callee-saves
+ stp x20, x21, [sp, #8]
+ .cfi_rel_offset x20, 8
+ .cfi_rel_offset x21, 16
- // Callee saved.
- stp xSELF, x19, [sp, #72]
- .cfi_rel_offset x18, 72
- .cfi_rel_offset x19, 80
+ stp x22, x23, [sp, #24]
+ .cfi_rel_offset x22, 24
+ .cfi_rel_offset x23, 32
- stp x20, x21, [sp, #88]
- .cfi_rel_offset x20, 88
- .cfi_rel_offset x21, 96
+ stp x24, x25, [sp, #40]
+ .cfi_rel_offset x24, 40
+ .cfi_rel_offset x25, 48
- stp x22, x23, [sp, #104]
- .cfi_rel_offset x22, 104
- .cfi_rel_offset x23, 112
+ stp x26, x27, [sp, #56]
+ .cfi_rel_offset x26, 56
+ .cfi_rel_offset x27, 64
- stp x24, x25, [sp, #120]
- .cfi_rel_offset x24, 120
- .cfi_rel_offset x25, 128
+ stp x28, x29, [sp, #72]
+ .cfi_rel_offset x28, 72
+ .cfi_rel_offset x29, 80
- stp x26, x27, [sp, #136]
- .cfi_rel_offset x26, 136
- .cfi_rel_offset x27, 144
+ // LR
+ str xLR, [sp, #88]
+ .cfi_rel_offset x30, 88
- stp x28, xFP, [sp, #152] // Save FP.
- .cfi_rel_offset x28, 152
- .cfi_rel_offset x29, 160
-
- str xLR, [sp, #168]
- .cfi_rel_offset x30, 168
+ // Save xSELF to xETR.
+ mov xETR, xSELF
// Loads appropriate callee-save-method
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
+// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
- // FP callee saves
- ldp d8, d9, [sp, #8]
- ldp d10, d11, [sp, #24]
- ldp d12, d13, [sp, #40]
- ldp d14, d15, [sp, #56]
-
- // Callee saved.
- ldp xSELF, x19, [sp, #72]
- .cfi_restore x18
- .cfi_restore x19
+ // Restore xSELF.
+ mov xSELF, xETR
- ldp x20, x21, [sp, #88]
+ // Callee-saves
+ ldp x20, x21, [sp, #8]
.cfi_restore x20
.cfi_restore x21
- ldp x22, x23, [sp, #104]
+ ldp x22, x23, [sp, #24]
.cfi_restore x22
.cfi_restore x23
- ldp x24, x25, [sp, #120]
+ ldp x24, x25, [sp, #40]
.cfi_restore x24
.cfi_restore x25
- ldp x26, x27, [sp, #136]
+ ldp x26, x27, [sp, #56]
.cfi_restore x26
.cfi_restore x27
- ldp x28, xFP, [sp, #152] // Save FP.
+ ldp x28, x29, [sp, #72]
.cfi_restore x28
.cfi_restore x29
- ldr xLR, [sp, #168]
+ // LR
+ ldr xLR, [sp, #88]
.cfi_restore x30
- add sp, sp, #176
- .cfi_adjust_cfa_offset -176
+ add sp, sp, #96
+ .cfi_adjust_cfa_offset -96
.endm
.macro POP_REF_ONLY_CALLEE_SAVE_FRAME
- add sp, sp, #176
- .cfi_adjust_cfa_offset -176
+ // Restore xSELF as it might be scratched.
+ mov xSELF, xETR
+ // ETR
+ ldr xETR, [sp, #16]
+ .cfi_restore x21
+
+ add sp, sp, #96
+ .cfi_adjust_cfa_offset -96
.endm
.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
@@ -210,62 +188,61 @@
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- sub sp, sp, #304
- .cfi_adjust_cfa_offset 304
+ sub sp, sp, #224
+ .cfi_adjust_cfa_offset 224
// Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 304)
+#if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224)
#error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected."
#endif
- stp d0, d1, [sp, #16]
- stp d2, d3, [sp, #32]
- stp d4, d5, [sp, #48]
- stp d6, d7, [sp, #64]
- stp d8, d9, [sp, #80]
- stp d10, d11, [sp, #96]
- stp d12, d13, [sp, #112]
- stp d14, d15, [sp, #128]
-
- stp x1, x2, [sp, #144]
- .cfi_rel_offset x1, 144
- .cfi_rel_offset x2, 152
-
- stp x3, x4, [sp, #160]
- .cfi_rel_offset x3, 160
- .cfi_rel_offset x4, 168
-
- stp x5, x6, [sp, #176]
- .cfi_rel_offset x5, 176
- .cfi_rel_offset x6, 184
-
- stp x7, xSELF, [sp, #192]
- .cfi_rel_offset x7, 192
- .cfi_rel_offset x18, 200
-
- stp x19, x20, [sp, #208]
- .cfi_rel_offset x19, 208
- .cfi_rel_offset x20, 216
-
- stp x21, x22, [sp, #224]
- .cfi_rel_offset x21, 224
- .cfi_rel_offset x22, 232
-
- stp x23, x24, [sp, #240]
- .cfi_rel_offset x23, 240
- .cfi_rel_offset x24, 248
-
- stp x25, x26, [sp, #256]
- .cfi_rel_offset x25, 256
- .cfi_rel_offset x26, 264
-
- stp x27, x28, [sp, #272]
- .cfi_rel_offset x27, 272
- .cfi_rel_offset x28, 280
-
- stp xFP, xLR, [sp, #288]
- .cfi_rel_offset x29, 288
- .cfi_rel_offset x30, 296
+ // FP args
+ stp d0, d1, [sp, #16]
+ stp d2, d3, [sp, #32]
+ stp d4, d5, [sp, #48]
+ stp d6, d7, [sp, #64]
+
+ // args and x20(callee-save)
+ stp x1, x2, [sp, #80]
+ .cfi_rel_offset x1, 80
+ .cfi_rel_offset x2, 88
+
+ stp x3, x4, [sp, #96]
+ .cfi_rel_offset x3, 96
+ .cfi_rel_offset x4, 104
+
+ stp x5, x6, [sp, #112]
+ .cfi_rel_offset x5, 112
+ .cfi_rel_offset x6, 120
+
+ stp x7, x20, [sp, #128]
+ .cfi_rel_offset x7, 128
+ .cfi_rel_offset x20, 136
+
+ // Callee-saves.
+ stp x21, x22, [sp, #144]
+ .cfi_rel_offset x21, 144
+ .cfi_rel_offset x22, 152
+
+ stp x23, x24, [sp, #160]
+ .cfi_rel_offset x23, 160
+ .cfi_rel_offset x24, 168
+
+ stp x25, x26, [sp, #176]
+ .cfi_rel_offset x25, 176
+ .cfi_rel_offset x26, 184
+
+ stp x27, x28, [sp, #192]
+ .cfi_rel_offset x27, 192
+ .cfi_rel_offset x28, 200
+
+ // x29(callee-save) and LR
+ stp x29, xLR, [sp, #208]
+ .cfi_rel_offset x29, 208
+ .cfi_rel_offset x30, 216
+
+ // Save xSELF to xETR.
+ mov xETR, xSELF
.endm
/*
@@ -275,75 +252,73 @@
* TODO This is probably too conservative - saving FP & LR.
*/
.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- adrp x9, :got:_ZN3art7Runtime9instance_E
- ldr x9, [x9, #:got_lo12:_ZN3art7Runtime9instance_E]
+ adrp xIP0, :got:_ZN3art7Runtime9instance_E
+ ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E]
// Our registers aren't intermixed - just spill in order.
- ldr x9,[x9] // x9 = & (art::Runtime * art::Runtime.instance_) .
+ ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) .
- // x9 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
+ // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] .
THIS_LOAD_REQUIRES_READ_BARRIER
- ldr x9, [x9, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
+ ldr xIP0, [xIP0, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ]
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
- str x9, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
+ str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs]
.endm
+// TODO: Probably no need to restore registers preserved by aapcs64.
.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ // Restore xSELF.
+ mov xSELF, xETR
- ldp d0, d1, [sp, #16]
- ldp d2, d3, [sp, #32]
- ldp d4, d5, [sp, #48]
- ldp d6, d7, [sp, #64]
- ldp d8, d9, [sp, #80]
- ldp d10, d11, [sp, #96]
- ldp d12, d13, [sp, #112]
- ldp d14, d15, [sp, #128]
-
- // args.
- ldp x1, x2, [sp, #144]
+ // FP args
+ ldp d0, d1, [sp, #16]
+ ldp d2, d3, [sp, #32]
+ ldp d4, d5, [sp, #48]
+ ldp d6, d7, [sp, #64]
+
+ // args and x20(callee-save)
+ ldp x1, x2, [sp, #80]
.cfi_restore x1
.cfi_restore x2
- ldp x3, x4, [sp, #160]
+ ldp x3, x4, [sp, #96]
.cfi_restore x3
.cfi_restore x4
- ldp x5, x6, [sp, #176]
+ ldp x5, x6, [sp, #112]
.cfi_restore x5
.cfi_restore x6
- ldp x7, xSELF, [sp, #192]
+ ldp x7, x20, [sp, #128]
.cfi_restore x7
- .cfi_restore x18
-
- ldp x19, x20, [sp, #208]
- .cfi_restore x19
.cfi_restore x20
- ldp x21, x22, [sp, #224]
+ // Callee-saves.
+ ldp x21, x22, [sp, #144]
.cfi_restore x21
.cfi_restore x22
- ldp x23, x24, [sp, #240]
+ ldp x23, x24, [sp, #160]
.cfi_restore x23
.cfi_restore x24
- ldp x25, x26, [sp, #256]
+ ldp x25, x26, [sp, #176]
.cfi_restore x25
.cfi_restore x26
- ldp x27, x28, [sp, #272]
+ ldp x27, x28, [sp, #192]
.cfi_restore x27
.cfi_restore x28
- ldp xFP, xLR, [sp, #288]
+ // x29(callee-save) and LR
+ ldp x29, xLR, [sp, #208]
.cfi_restore x29
.cfi_restore x30
- add sp, sp, #304
- .cfi_adjust_cfa_offset -304
+ add sp, sp, #224
+ .cfi_adjust_cfa_offset -224
.endm
.macro RETURN_IF_RESULT_IS_ZERO
@@ -381,7 +356,7 @@
.endm
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x9
+ RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
.endm
// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
@@ -400,7 +375,7 @@
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov x0, xSELF // pass Thread::Current
+ mov x0, xSELF // pass Thread::Current
mov x1, sp // pass SP
b \cxx_name // \cxx_name(Thread*, SP)
END \c_name
@@ -410,7 +385,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context.
- mov x1, xSELF // pass Thread::Current.
+ mov x1, xSELF // pass Thread::Current.
mov x2, sp // pass SP.
b \cxx_name // \cxx_name(arg, Thread*, SP).
brk 0
@@ -421,7 +396,7 @@ END \c_name
.extern \cxx_name
ENTRY \c_name
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
- mov x2, xSELF // pass Thread::Current
+ mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
b \cxx_name // \cxx_name(arg1, arg2, Thread*, SP)
brk 0
@@ -460,6 +435,31 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFr
*/
ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
+ /*
+ * Invoke stack overflow exception from signal handler.
+ * On entry:
+ * xSELF: thread
+ * SP: address of last known frame
+ * IP0: address of next valid SP below protected region in stack
+ *
+ * This is deceptively simple but hides some complexity. It is called in the case of
+ * a stack overflow condition during implicit checks. The signal handler has been
+ * called by the kernel due to a load from the protected stack region. The handler
+ * works out the address of the previous frame and passes this in SP. However there
+ * is a piece of memory somewhere below the current SP that is not accessible (the
+ * memory that caused the signal). The signal handler works out the next
+ * accessible value of SP and passes this in x16/IP0. This code then sets up the SP
+ * to be this new value and calls the code to create and throw the stack overflow
+ * exception.
+ */
+ENTRY art_quick_throw_stack_overflow_from_signal
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov x0, xSELF // pass Thread::Current
+ mov x1, sp // pass SP
+ mov sp, xIP0 // move SP down to below protected region.
+ b artThrowStackOverflowFromCode // artThrowStackOverflowFromCode(Thread*, SP)
+END art_quick_throw_stack_overflow_from_signal
+
/*
* All generated callsites for interface invokes and invocation slow paths will load arguments
* as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain
@@ -478,7 +478,7 @@ ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFr
*
* Adapted from ARM32 code.
*
- * Clobbers x12.
+ * Clobbers xIP0.
*/
.macro INVOKE_TRAMPOLINE c_name, cxx_name
.extern \cxx_name
@@ -491,10 +491,10 @@ ENTRY \c_name
mov x3, xSELF // pass Thread::Current
mov x4, sp
bl \cxx_name // (method_idx, this, caller, Thread*, SP)
- mov x12, x1 // save Method*->code_
+ mov xIP0, x1 // save Method*->code_
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
cbz x0, 1f // did we find the target? if not go to exception delivery
- br x12 // tail call to target
+ br xIP0 // tail call to target
1:
DELIVER_PENDING_EXCEPTION
END \c_name
@@ -511,7 +511,7 @@ INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvo
.macro INVOKE_STUB_CREATE_FRAME
-SAVE_SIZE=6*8 // x4, x5, x19(wSUSPEND), SP, LR & FP saved.
+SAVE_SIZE=6*8 // x4, x5, xSUSPEND, SP, LR & FP saved.
SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
@@ -527,7 +527,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
.cfi_def_cfa_register x10 // before this.
.cfi_adjust_cfa_offset SAVE_SIZE
- stp x9, x19, [x10, #32] // Save old stack pointer and x19(wSUSPEND)
+ stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND
.cfi_rel_offset sp, 32
.cfi_rel_offset x19, 40
@@ -608,7 +608,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
str x0, [x4]
.Lexit_art_quick_invoke_stub\@:
- ldp x2, x19, [xFP, #32] // Restore stack pointer and x19.
+ ldp x2, xSUSPEND, [xFP, #32] // Restore stack pointer and xSUSPEND.
.cfi_restore x19
mov sp, x2
.cfi_restore sp
@@ -636,6 +636,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE
* | FP'' | <- SP'
* +----------------------+
* +----------------------+
+ * | x19 | <- Used as wSUSPEND, won't be restored by managed code.
* | SP' |
* | X5 |
* | X4 | Saved registers
@@ -1241,8 +1242,6 @@ END \name
.endm
// Macros taking opportunity of code similarities for downcalls with referrer.
-
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1256,7 +1255,6 @@ ENTRY \name
END \name
.endm
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1270,7 +1268,6 @@ ENTRY \name
END \name
.endm
-// TODO: xSELF -> x19. Temporarily rely on xSELF being saved in REF_ONLY
.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
.extern \entrypoint
ENTRY \name
@@ -1351,6 +1348,14 @@ ENTRY art_quick_test_suspend
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
+ENTRY art_quick_implicit_suspend
+ mov x0, xSELF
+ SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl
+ mov x1, sp
+ bl artTestSuspendFromCode // (Thread*, SP)
+ RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_implicit_suspend
+
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy
@@ -1363,8 +1368,8 @@ ENTRY art_quick_proxy_invoke_handler
mov x2, xSELF // pass Thread::Current
mov x3, sp // pass SP
bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP)
- ldr xSELF, [sp, #200] // Restore self pointer.
- ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET]
+ // Use xETR as xSELF might be scratched by native function above.
+ ldr x2, [xETR, THREAD_EXCEPTION_OFFSET]
cbnz x2, .Lexception_in_proxy // success if no exception is pending
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
fmov d0, x0 // Store result in d0 in case it was float or double
@@ -1375,14 +1380,14 @@ ENTRY art_quick_proxy_invoke_handler
END art_quick_proxy_invoke_handler
/*
- * Called to resolve an imt conflict. x12 is a hidden argument that holds the target method's
+ * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's
* dex method index.
*/
ENTRY art_quick_imt_conflict_trampoline
ldr w0, [sp, #0] // load caller Method*
ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods
add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data
- ldr w0, [x0, x12, lsl 2] // load the target method
+ ldr w0, [x0, xIP1, lsl 2] // load the target method
b art_quick_invoke_interface_trampoline
END art_quick_imt_conflict_trampoline
@@ -1392,10 +1397,10 @@ ENTRY art_quick_resolution_trampoline
mov x3, sp
bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP)
cbz x0, 1f
- mov x9, x0 // Remember returned code pointer in x9.
+ mov xIP0, x0 // Remember returned code pointer in xIP0.
ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
- br x9
+ br xIP0
1:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
@@ -1419,7 +1424,6 @@ END art_quick_resolution_trampoline
* | X22 | callee save
* | X21 | callee save
* | X20 | callee save
- * | X19 | callee save
* | X7 | arg7
* | X6 | arg6
* | X5 | arg5
@@ -1427,14 +1431,6 @@ END art_quick_resolution_trampoline
* | X3 | arg3
* | X2 | arg2
* | X1 | arg1
- * | D15 | float arg 8
- * | D14 | float arg 8
- * | D13 | float arg 8
- * | D12 | callee save
- * | D11 | callee save
- * | D10 | callee save
- * | D9 | callee save
- * | D8 | callee save
* | D7 | float arg 8
* | D6 | float arg 7
* | D5 | float arg 6
@@ -1476,8 +1472,8 @@ ENTRY art_quick_generic_jni_trampoline
// of the frame when the handle scope is inserted.
mov xFP, sp
- mov x8, #5120
- sub sp, sp, x8
+ mov xIP0, #5120
+ sub sp, sp, xIP0
// prepare for artQuickGenericJniTrampoline call
// (Thread*, SP)
@@ -1517,17 +1513,14 @@ ENTRY art_quick_generic_jni_trampoline
add sp, sp, #128
- blr xIP0 // native call.
-
- // Restore self pointer.
- ldr xSELF, [x28, #200]
+ blr xIP0 // native call.
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// x0 x1 x2 <= C calling convention
mov x1, x0 // Result (from saved)
- mov x0, xSELF // Thread register
+ mov x0, xETR // Thread register, original xSELF might be scratched by native code.
fmov x2, d0 // d0 will contain floating point result, but needs to go into x2
bl artQuickGenericJniEndTrampoline
@@ -1536,11 +1529,9 @@ ENTRY art_quick_generic_jni_trampoline
mov sp, x28
.cfi_def_cfa_register sp
- // Restore self pointer.
- ldr xSELF, [x28, #200]
-
// Pending exceptions possible.
- ldr x1, [xSELF, THREAD_EXCEPTION_OFFSET]
+ // Use xETR as xSELF might be scratched by native code
+ ldr x1, [xETR, THREAD_EXCEPTION_OFFSET]
cbnz x1, .Lexception_in_native
// Tear down the callee-save frame.
@@ -1553,7 +1544,6 @@ ENTRY art_quick_generic_jni_trampoline
.Lentry_error:
mov sp, x28
.cfi_def_cfa_register sp
- ldr xSELF, [x28, #200]
.Lexception_in_native:
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
DELIVER_PENDING_EXCEPTION
@@ -1592,19 +1582,19 @@ END art_quick_to_interpreter_bridge
ENTRY art_quick_instrumentation_entry
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- mov x19, x0 // Preserve method reference in a callee-save.
+ mov x20, x0 // Preserve method reference in a callee-save.
mov x2, xSELF
mov x3, sp
mov x4, xLR
bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP, LR)
- mov x9, x0 // x0 = result of call.
- mov x0, x19 // Reload method reference.
+ mov xIP0, x0 // x0 = result of call.
+ mov x0, x20 // Reload method reference.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF
adr xLR, art_quick_instrumentation_exit
- br x9 // Tail-call method with lr set to art_quick_instrumentation_exit.
+ br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit.
END art_quick_instrumentation_entry
.extern artInstrumentationMethodExitFromCode
@@ -1627,18 +1617,16 @@ ENTRY art_quick_instrumentation_exit
mov x0, xSELF // Pass Thread.
bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res)
- mov x9, x0 // Return address from instrumentation call.
+ mov xIP0, x0 // Return address from instrumentation call.
mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize
ldr d0, [sp, #8] // Restore floating-point result.
ldr x0, [sp], 16 // Restore integer result, and drop stack area.
.cfi_adjust_cfa_offset 16
- // Need to restore x18.
- ldr xSELF, [sp, #72]
POP_REF_ONLY_CALLEE_SAVE_FRAME
- br x9 // Tail-call out.
+ br xIP0 // Tail-call out.
END art_quick_instrumentation_exit
/*
@@ -1703,15 +1691,15 @@ ENTRY art_quick_indexof
.Lindexof_loop4:
ldrh w6, [x0, #2]!
ldrh w7, [x0, #2]!
- ldrh w8, [x0, #2]!
- ldrh w9, [x0, #2]!
+ ldrh wIP0, [x0, #2]!
+ ldrh wIP1, [x0, #2]!
cmp w6, w1
b.eq .Lmatch_0
cmp w7, w1
b.eq .Lmatch_1
- cmp w8, w1
+ cmp wIP0, w1
b.eq .Lmatch_2
- cmp w9, w1
+ cmp wIP1, w1
b.eq .Lmatch_3
subs w2, w2, #4
b.ge .Lindexof_loop4
@@ -1855,17 +1843,17 @@ ENTRY art_quick_string_compareto
ret
.Ldo_memcmp16:
- mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps.
- mov x15, xLR // TODO: Codify and check that?
+ mov xIP0, x0 // Save x0 and LR. __memcmp16 does not use these temps.
+ mov xIP1, xLR // TODO: Codify and check that?
mov x0, x2
uxtw x2, w3
bl __memcmp16
- mov xLR, x15 // Restore LR.
+ mov xLR, xIP1 // Restore LR.
cmp x0, #0 // Check the memcmp difference.
- csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1.
+ csel x0, x0, xIP0, ne // x0 := x0 != 0 ? xIP0(prev x0=length diff) : x1.
ret
END art_quick_string_compareto
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index cb830acdea..15c6c07592 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -20,53 +20,53 @@
#include "quick/quick_method_frame_info.h"
#include "registers_arm64.h"
#include "runtime.h" // for Runtime::CalleeSaveType.
+#include "utils.h" // for POPCOUNT
namespace art {
namespace arm64 {
+// Registers need to be restored but not preserved by aapcs64.
+static constexpr uint32_t kArm64CalleeSaveAlwaysSpills =
+ // Note: ArtMethod::GetReturnPcOffsetInBytes() rely on the assumption that
+ // LR is always saved on the top of the frame for all targets.
+ // That is, lr = *(sp + framesize - pointsize).
+ (1 << art::arm64::LR);
// Callee saved registers
static constexpr uint32_t kArm64CalleeSaveRefSpills =
- (1 << art::arm64::X19) | (1 << art::arm64::X20) | (1 << art::arm64::X21) |
- (1 << art::arm64::X22) | (1 << art::arm64::X23) | (1 << art::arm64::X24) |
- (1 << art::arm64::X25) | (1 << art::arm64::X26) | (1 << art::arm64::X27) |
- (1 << art::arm64::X28);
+ (1 << art::arm64::X20) | (1 << art::arm64::X21) | (1 << art::arm64::X22) |
+ (1 << art::arm64::X23) | (1 << art::arm64::X24) | (1 << art::arm64::X25) |
+ (1 << art::arm64::X26) | (1 << art::arm64::X27) | (1 << art::arm64::X28) |
+ (1 << art::arm64::X29);
// X0 is the method pointer. Not saved.
static constexpr uint32_t kArm64CalleeSaveArgSpills =
(1 << art::arm64::X1) | (1 << art::arm64::X2) | (1 << art::arm64::X3) |
(1 << art::arm64::X4) | (1 << art::arm64::X5) | (1 << art::arm64::X6) |
(1 << art::arm64::X7);
-// TODO This is conservative. Only ALL should include the thread register.
-// The thread register is not preserved by the aapcs64.
-// LR is always saved.
-static constexpr uint32_t kArm64CalleeSaveAllSpills = 0; // (1 << art::arm64::LR);
+static constexpr uint32_t kArm64CalleeSaveAllSpills =
+ // Thread register.
+ (1 << art::arm64::X18) |
+ // Suspend register.
+ 1 << art::arm64::X19;
-// Save callee-saved floating point registers. Rest are scratch/parameters.
+static constexpr uint32_t kArm64CalleeSaveFpAlwaysSpills = 0;
+static constexpr uint32_t kArm64CalleeSaveFpRefSpills = 0;
static constexpr uint32_t kArm64CalleeSaveFpArgSpills =
(1 << art::arm64::D0) | (1 << art::arm64::D1) | (1 << art::arm64::D2) |
(1 << art::arm64::D3) | (1 << art::arm64::D4) | (1 << art::arm64::D5) |
(1 << art::arm64::D6) | (1 << art::arm64::D7);
-static constexpr uint32_t kArm64CalleeSaveFpRefSpills =
+static constexpr uint32_t kArm64FpAllSpills =
(1 << art::arm64::D8) | (1 << art::arm64::D9) | (1 << art::arm64::D10) |
(1 << art::arm64::D11) | (1 << art::arm64::D12) | (1 << art::arm64::D13) |
(1 << art::arm64::D14) | (1 << art::arm64::D15);
-static constexpr uint32_t kArm64FpAllSpills =
- kArm64CalleeSaveFpArgSpills |
- (1 << art::arm64::D16) | (1 << art::arm64::D17) | (1 << art::arm64::D18) |
- (1 << art::arm64::D19) | (1 << art::arm64::D20) | (1 << art::arm64::D21) |
- (1 << art::arm64::D22) | (1 << art::arm64::D23) | (1 << art::arm64::D24) |
- (1 << art::arm64::D25) | (1 << art::arm64::D26) | (1 << art::arm64::D27) |
- (1 << art::arm64::D28) | (1 << art::arm64::D29) | (1 << art::arm64::D30) |
- (1 << art::arm64::D31);
constexpr uint32_t Arm64CalleeSaveCoreSpills(Runtime::CalleeSaveType type) {
- return kArm64CalleeSaveRefSpills |
+ return kArm64CalleeSaveAlwaysSpills | kArm64CalleeSaveRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveArgSpills : 0) |
- (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0) | (1 << art::arm64::FP) |
- (1 << art::arm64::X18) | (1 << art::arm64::LR);
+ (type == Runtime::kSaveAll ? kArm64CalleeSaveAllSpills : 0);
}
constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
- return kArm64CalleeSaveFpRefSpills |
+ return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
(type == Runtime::kRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
(type == Runtime::kSaveAll ? kArm64FpAllSpills : 0);
}
@@ -83,6 +83,22 @@ constexpr QuickMethodFrameInfo Arm64CalleeSaveMethodFrameInfo(Runtime::CalleeSav
Arm64CalleeSaveFpSpills(type));
}
+constexpr size_t Arm64CalleeSaveFpr1Offset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ (POPCOUNT(Arm64CalleeSaveCoreSpills(type)) +
+ POPCOUNT(Arm64CalleeSaveFpSpills(type))) * kArm64PointerSize;
+}
+
+constexpr size_t Arm64CalleeSaveGpr1Offset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type)) * kArm64PointerSize;
+}
+
+constexpr size_t Arm64CalleeSaveLrOffset(Runtime::CalleeSaveType type) {
+ return Arm64CalleeSaveFrameSize(type) -
+ POPCOUNT(Arm64CalleeSaveCoreSpills(type) & (-(1 << LR))) * kArm64PointerSize;
+}
+
} // namespace arm64
} // namespace art
diff --git a/runtime/arch/arm64/registers_arm64.h b/runtime/arch/arm64/registers_arm64.h
index ea346e0ffa..9ccab70bb9 100644
--- a/runtime/arch/arm64/registers_arm64.h
+++ b/runtime/arch/arm64/registers_arm64.h
@@ -57,7 +57,7 @@ enum Register {
X30 = 30,
X31 = 31,
TR = 18, // ART Thread Register - Managed Runtime (Caller Saved Reg)
- ETR = 19, // ART Thread Register - External Calls (Callee Saved Reg)
+ ETR = 21, // ART Thread Register - External Calls (Callee Saved Reg)
IP0 = 16, // Used as scratch by VIXL.
IP1 = 17, // Used as scratch by ART JNI Assembler.
FP = 29,
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index 1144c8c89d..14dc1e3880 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -30,7 +30,7 @@
//
// In both cases, MemCmp16 is declared.
-#if defined(__aarch64__) || defined(__arm__) || defined(__mips)
+#if defined(__aarch64__) || defined(__arm__) || defined(__mips) || defined(__i386__) || defined(__x86_64__)
extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count);
#define MemCmp16 __memcmp16
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 7a2e961f81..d3e7d5e904 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,6 +21,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/entrypoint_utils.h"
#include "entrypoints/math_entrypoints.h"
+#include "atomic.h"
namespace art {
@@ -196,11 +197,11 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pCmplDouble = CmplDouble;
qpoints->pCmplFloat = CmplFloat;
qpoints->pFmod = fmod;
- qpoints->pL2d = __floatdidf;
+ qpoints->pL2d = art_l2d;
qpoints->pFmodf = fmodf;
- qpoints->pL2f = __floatdisf;
- qpoints->pD2iz = __fixdfsi;
- qpoints->pF2iz = __fixsfsi;
+ qpoints->pL2f = art_l2f;
+ qpoints->pD2iz = art_d2i;
+ qpoints->pF2iz = art_f2i;
qpoints->pIdivmod = NULL;
qpoints->pD2l = art_d2l;
qpoints->pF2l = art_f2l;
@@ -236,6 +237,10 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
+
+ // Atomic 64-bit load/store
+ qpoints->pA64Load = QuasiAtomic::Read64;
+ qpoints->pA64Store = QuasiAtomic::Write64;
};
} // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 1ecd7d964b..0e76aabbc7 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -29,7 +29,8 @@
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
}
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ada1523ba3..8786222250 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1076,14 +1076,15 @@ art_quick_instrumentation_exit:
.cfi_startproc
addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp
GENERATE_GLOBAL_POINTER
- move $t0, $sp # remember bottom of caller's frame
+ move $ra, $zero # link register is to here, so clobber with 0 for later checks
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+ move $t0, $sp # remember bottom of caller's frame
addiu $sp, $sp, -48 # save return values and set up args
.cfi_adjust_cfa_offset 48
sw $v0, 32($sp)
- .cfi_rel_offset 2, 0
+ .cfi_rel_offset 2, 32
sw $v1, 36($sp)
- .cfi_rel_offset 3, 4
+ .cfi_rel_offset 3, 36
s.s $f0, 40($sp)
s.s $f1, 44($sp)
s.s $f0, 16($sp) # pass fpr result
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index ae39be13d8..e468c2a8d7 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -81,6 +81,8 @@
#define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
#define CFI_RESTORE(reg) .cfi_restore reg
#define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
+ #define CFI_RESTORE_STATE .cfi_restore_state
+ #define CFI_REMEMBER_STATE .cfi_remember_state
#else
// Mac OS' doesn't like cfi_* directives.
#define CFI_STARTPROC
@@ -90,6 +92,8 @@
#define CFI_DEF_CFA_REGISTER(reg)
#define CFI_RESTORE(reg)
#define CFI_REL_OFFSET(reg,size)
+ #define CFI_RESTORE_STATE
+ #define CFI_REMEMBER_STATE
#endif
// Symbols.
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 7c1980e57b..8b6c9b1ae3 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -21,27 +21,370 @@
#include "globals.h"
#include "base/logging.h"
#include "base/hex_dump.h"
+#include "mirror/art_method.h"
+#include "mirror/art_method-inl.h"
+#include "thread.h"
+#include "thread-inl.h"
+#if defined(__APPLE__)
+#define ucontext __darwin_ucontext
+#define CTX_ESP uc_mcontext->__ss.__esp
+#define CTX_EIP uc_mcontext->__ss.__eip
+#define CTX_EAX uc_mcontext->__ss.__eax
+#define CTX_METHOD uc_mcontext->__ss.__eax
+#elif defined(__x86_64__)
+#define CTX_ESP uc_mcontext.gregs[REG_RSP]
+#define CTX_EIP uc_mcontext.gregs[REG_RIP]
+#define CTX_EAX uc_mcontext.gregs[REG_RAX]
+#define CTX_METHOD uc_mcontext.gregs[REG_RDI]
+#else
+#define CTX_ESP uc_mcontext.gregs[REG_ESP]
+#define CTX_EIP uc_mcontext.gregs[REG_EIP]
+#define CTX_EAX uc_mcontext.gregs[REG_EAX]
+#define CTX_METHOD uc_mcontext.gregs[REG_EAX]
+#endif
//
-// X86 specific fault handler functions.
+// X86 (and X86_64) specific fault handler functions.
//
namespace art {
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow_from_signal();
+extern "C" void art_quick_test_suspend();
+
+// Get the size of an instruction in bytes.
+// Return 0 if the instruction is not handled.
+static uint32_t GetInstructionSize(const uint8_t* pc) {
+#if defined(__x86_64)
+ const bool x86_64 = true;
+#else
+ const bool x86_64 = false;
+#endif
+
+ const uint8_t* startpc = pc;
+
+ uint8_t opcode = *pc++;
+ uint8_t modrm;
+ bool has_modrm = false;
+ bool two_byte = false;
+ uint32_t displacement_size = 0;
+ uint32_t immediate_size = 0;
+
+ // Prefixes.
+ while (true) {
+ bool prefix_present = false;
+ switch (opcode) {
+ // Group 1
+ case 0xf0:
+ case 0xf2:
+ case 0xf3:
+
+ // Group 2
+ case 0x2e:
+ case 0x36:
+ case 0x3e:
+ case 0x26:
+ case 0x64:
+ case 0x65:
+
+ // Group 3
+ case 0x66:
+
+ // Group 4
+ case 0x67:
+ opcode = *pc++;
+ prefix_present = true;
+ break;
+ }
+ if (!prefix_present) {
+ break;
+ }
+ }
+
+ if (x86_64 && opcode >= 0x40 && opcode <= 0x4f) {
+ opcode = *pc++;
+ }
+
+ if (opcode == 0x0f) {
+ // Two byte opcode
+ two_byte = true;
+ opcode = *pc++;
+ }
+
+ bool unhandled_instruction = false;
+
+ if (two_byte) {
+ switch (opcode) {
+ case 0x10: // vmovsd/ss
+ case 0x11: // vmovsd/ss
+ case 0xb6: // movzx
+ case 0xb7:
+ case 0xbe: // movsx
+ case 0xbf:
+ modrm = *pc++;
+ has_modrm = true;
+ break;
+ default:
+ unhandled_instruction = true;
+ break;
+ }
+ } else {
+ switch (opcode) {
+ case 0x89: // mov
+ case 0x8b:
+ case 0x38: // cmp with memory.
+ case 0x39:
+ case 0x3a:
+ case 0x3b:
+ case 0x3c:
+ case 0x3d:
+ case 0x85: // test.
+ modrm = *pc++;
+ has_modrm = true;
+ break;
+
+ case 0x80: // group 1, byte immediate.
+ case 0x83:
+ modrm = *pc++;
+ has_modrm = true;
+ immediate_size = 1;
+ break;
+
+ case 0x81: // group 1, word immediate.
+ modrm = *pc++;
+ has_modrm = true;
+ immediate_size = 4;
+ break;
+
+ default:
+ unhandled_instruction = true;
+ break;
+ }
+ }
+
+ if (unhandled_instruction) {
+ VLOG(signals) << "Unhandled x86 instruction with opcode " << static_cast<int>(opcode);
+ return 0;
+ }
+
+ if (has_modrm) {
+ uint8_t mod = (modrm >> 6) & 0b11;
+
+ // Check for SIB.
+ if (mod != 0b11 && (modrm & 0b111) == 4) {
+ ++pc; // SIB
+ }
+
+ switch (mod) {
+ case 0b00: break;
+ case 0b01: displacement_size = 1; break;
+ case 0b10: displacement_size = 4; break;
+ case 0b11:
+ break;
+ }
+ }
+
+ // Skip displacement and immediate.
+ pc += displacement_size + immediate_size;
+
+ VLOG(signals) << "x86 instruction length calculated as " << (pc - startpc);
+ return pc - startpc;
+}
+
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
+ mirror::ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
+ struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
+ *out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
+ VLOG(signals) << "sp: " << std::hex << *out_sp;
+ if (*out_sp == 0) {
+ return;
+ }
+
+ // In the case of a stack overflow, the stack is not valid and we can't
+ // get the method from the top of the stack. However it's in EAX(x86)/RDI(x86_64).
+ uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);
+ uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
+#if defined(__x86_64__)
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86_64));
+#else
+ reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(kX86));
+#endif
+ if (overflow_addr == fault_addr) {
+ *out_method = reinterpret_cast<mirror::ArtMethod*>(uc->CTX_METHOD);
+ } else {
+ // The method is at the top of the stack.
+ *out_method = (reinterpret_cast<StackReference<mirror::ArtMethod>* >(*out_sp)[0]).AsMirrorPtr();
+ }
+
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ VLOG(signals) << HexDump(pc, 32, true, "PC ");
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ if (instr_size == 0) {
+ // Unknown instruction, tell caller it's not ours.
+ *out_method = nullptr;
+ return;
+ }
+ *out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ uint32_t instr_size = GetInstructionSize(pc);
+ if (instr_size == 0) {
+ // Unknown instruction, can't really happen.
+ return false;
+ }
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + instruction size). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + instr_size);
+ uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t));
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception);
+ VLOG(signals) << "Generating null pointer exception";
+ return true;
}
+// A suspend check is done using the following instruction sequence:
+// (x86)
+// 0xf720f1df: 648B058C000000 mov eax, fs:[0x8c] ; suspend_trigger
+// .. some intervening instructions.
+// 0xf720f1e6: 8500 test eax, [eax]
+// (x86_64)
+// 0x7f579de45d9e: 65488B0425A8000000 movq rax, gs:[0xa8] ; suspend_trigger
+// .. some intervening instructions.
+// 0x7f579de45da7: 8500 test eax, [eax]
+
+// The offset from fs is Thread::ThreadSuspendTriggerOffset().
+// To check for a suspend check, we examine the instructions that caused
+// the fault.
bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+ // These are the instructions to check for. The first one is the mov eax, fs:[xxx]
+ // where xxx is the offset of the suspend trigger.
+#if defined(__x86_64__)
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<8>().Int32Value();
+#else
+ uint32_t trigger = Thread::ThreadSuspendTriggerOffset<4>().Int32Value();
+#endif
+
+ VLOG(signals) << "Checking for suspension point";
+#if defined(__x86_64__)
+ uint8_t checkinst1[] = {0x65, 0x48, 0x8b, 0x04, 0x25, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+#else
+ uint8_t checkinst1[] = {0x64, 0x8b, 0x05, static_cast<uint8_t>(trigger & 0xff),
+ static_cast<uint8_t>((trigger >> 8) & 0xff), 0, 0};
+#endif
+ uint8_t checkinst2[] = {0x85, 0x00};
+
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
+ uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
+
+ if (pc[0] != checkinst2[0] || pc[1] != checkinst2[1]) {
+ // Second instruction is not correct (test eax,[eax]).
+ VLOG(signals) << "Not a suspension point";
+ return false;
+ }
+
+ // The first instruction can a little bit up the stream due to load hoisting
+ // in the compiler.
+ uint8_t* limit = pc - 100; // Compiler will hoist to a max of 20 instructions.
+ uint8_t* ptr = pc - sizeof(checkinst1);
+ bool found = false;
+ while (ptr > limit) {
+ if (memcmp(ptr, checkinst1, sizeof(checkinst1)) == 0) {
+ found = true;
+ break;
+ }
+ ptr -= 1;
+ }
+
+ if (found) {
+ VLOG(signals) << "suspend check match";
+
+ // We need to arrange for the signal handler to return to the null pointer
+ // exception generator. The return address must be the address of the
+ // next instruction (this instruction + 2). The return address
+ // is on the stack at the top address of the current frame.
+
+ // Push the return address onto the stack.
+ uintptr_t retaddr = reinterpret_cast<uintptr_t>(pc + 2);
+ uintptr_t* next_sp = reinterpret_cast<uintptr_t*>(sp - sizeof(uintptr_t));
+ *next_sp = retaddr;
+ uc->CTX_ESP = reinterpret_cast<uintptr_t>(next_sp);
+
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_test_suspend);
+
+ // Now remove the suspend trigger that caused this fault.
+ Thread::Current()->RemoveSuspendTrigger();
+ VLOG(signals) << "removed suspend trigger invoking test suspend";
+ return true;
+ }
+ VLOG(signals) << "Not a suspend check match, first instruction mismatch";
return false;
}
+// The stack overflow check is done using the following instruction:
+// test eax, [esp+ -xxx]
+// where 'xxx' is the size of the overflow area.
+//
+// This is done before any frame is established in the method. The return
+// address for the previous method is on the stack at ESP.
+
bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
+ struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
+ uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
+
+ uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);
+ VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
+ VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
+ ", fault_addr: " << fault_addr;
+
+#if defined(__x86_64__)
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86_64);
+#else
+ uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(kX86);
+#endif
+
+ Thread* self = Thread::Current();
+ uintptr_t pregion = reinterpret_cast<uintptr_t>(self->GetStackEnd()) -
+ Thread::kStackOverflowProtectedSize;
+
+ // Check that the fault address is the value expected for a stack overflow.
+ if (fault_addr != overflow_addr) {
+ VLOG(signals) << "Not a stack overflow";
+ return false;
+ }
+
+ // We know this is a stack overflow. We need to move the sp to the overflow region
+ // that exists below the protected region. Determine the address of the next
+ // available valid address below the protected region.
+ VLOG(signals) << "setting sp to overflow region at " << std::hex << pregion;
+
+ // Since the compiler puts the implicit overflow
+ // check before the callee save instructions, the SP is already pointing to
+ // the previous frame.
+
+ // Tell the stack overflow code where the new stack pointer should be.
+ uc->CTX_EAX = pregion;
+
+ // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from_signal.
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow_from_signal);
+
+ return true;
}
} // namespace art
diff --git a/runtime/arch/x86/memcmp16_x86.S b/runtime/arch/x86/memcmp16_x86.S
new file mode 100644
index 0000000000..a315a378ea
--- /dev/null
+++ b/runtime/arch/x86/memcmp16_x86.S
@@ -0,0 +1,1038 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+#define MEMCMP __memcmp16
+
+/* int32_t memcmp16_compare(const uint16_t* s0, const uint16_t* s1, size_t count); */
+
+#ifndef L
+# define L(label) .L##label
+#endif
+
+#define CFI_PUSH(REG) \
+ CFI_ADJUST_CFA_OFFSET(4); \
+ CFI_REL_OFFSET(REG, 0)
+
+#define CFI_POP(REG) \
+ CFI_ADJUST_CFA_OFFSET(-4); \
+ CFI_RESTORE(REG)
+
+#define PUSH(REG) pushl REG; CFI_PUSH (REG)
+#define POP(REG) popl REG; CFI_POP (REG)
+
+#define PARMS 4
+#define BLK1 PARMS
+#define BLK2 BLK1+4
+#define LEN BLK2+4
+#define RETURN_END POP (%edi); POP (%esi); POP (%ebx); ret
+#define RETURN RETURN_END; CFI_RESTORE_STATE; CFI_REMEMBER_STATE
+
+DEFINE_FUNCTION MEMCMP
+ movl LEN(%esp), %ecx
+
+ shl $1, %ecx
+ jz L(zero)
+
+ movl BLK1(%esp), %eax
+ cmp $48, %ecx
+ movl BLK2(%esp), %edx
+ jae L(48bytesormore)
+
+ PUSH (%ebx)
+ add %ecx, %edx
+ add %ecx, %eax
+ jmp L(less48bytes)
+
+ CFI_POP (%ebx)
+
+ .p2align 4
+L(zero):
+ xor %eax, %eax
+ ret
+
+ .p2align 4
+L(48bytesormore):
+ PUSH (%ebx)
+ PUSH (%esi)
+ PUSH (%edi)
+ CFI_REMEMBER_STATE
+ movdqu (%eax), %xmm3
+ movdqu (%edx), %xmm0
+ movl %eax, %edi
+ movl %edx, %esi
+ pcmpeqb %xmm0, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 16(%edi), %edi
+
+ sub $0xffff, %edx
+ lea 16(%esi), %esi
+ jnz L(less16bytes)
+ mov %edi, %edx
+ and $0xf, %edx
+ xor %edx, %edi
+ sub %edx, %esi
+ add %edx, %ecx
+ mov %esi, %edx
+ and $0xf, %edx
+ jz L(shr_0)
+ xor %edx, %esi
+
+ cmp $0, %edx
+ je L(shr_0)
+ cmp $2, %edx
+ je L(shr_2)
+ cmp $4, %edx
+ je L(shr_4)
+ cmp $6, %edx
+ je L(shr_6)
+ cmp $8, %edx
+ je L(shr_8)
+ cmp $10, %edx
+ je L(shr_10)
+ cmp $12, %edx
+ je L(shr_12)
+ jmp L(shr_14)
+
+ .p2align 4
+L(shr_0):
+ cmp $80, %ecx
+ jae L(shr_0_gobble)
+ lea -48(%ecx), %ecx
+ xor %eax, %eax
+ movaps (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+ movaps 16(%esi), %xmm2
+ pcmpeqb 16(%edi), %xmm2
+ pand %xmm1, %xmm2
+ pmovmskb %xmm2, %edx
+ add $32, %edi
+ add $32, %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea (%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_0_gobble):
+ lea -48(%ecx), %ecx
+ movdqa (%esi), %xmm0
+ xor %eax, %eax
+ pcmpeqb (%edi), %xmm0
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm2
+ pcmpeqb 16(%edi), %xmm2
+L(shr_0_gobble_loop):
+ pand %xmm0, %xmm2
+ sub $32, %ecx
+ pmovmskb %xmm2, %edx
+ movdqa %xmm0, %xmm1
+ movdqa 32(%esi), %xmm0
+ movdqa 48(%esi), %xmm2
+ sbb $0xffff, %edx
+ pcmpeqb 32(%edi), %xmm0
+ pcmpeqb 48(%edi), %xmm2
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ jz L(shr_0_gobble_loop)
+
+ pand %xmm0, %xmm2
+ cmp $0, %ecx
+ jge L(shr_0_gobble_loop_next)
+ inc %edx
+ add $32, %ecx
+L(shr_0_gobble_loop_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm2, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea (%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_2):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_2_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $2,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $2,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 2(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_2_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $2,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $2,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_2_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $2,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $2,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_2_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_2_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_2_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 2(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_4):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_4_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $4,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $4,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 4(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_4_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $4,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $4,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_4_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $4,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $4,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_4_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_4_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_4_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 4(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_6):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_6_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $6,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $6,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 6(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_6_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $6,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $6,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_6_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $6,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $6,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_6_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_6_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_6_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 6(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_8):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_8_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $8,(%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $8,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 8(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_8_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $8,(%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $8,16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_8_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $8,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $8,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_8_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_8_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_8_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 8(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_10):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_10_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $10, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $10,%xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 10(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_10_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $10, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $10, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_10_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $10,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $10,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_10_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_10_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_10_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 10(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_12):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_12_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $12, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $12, %xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 12(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_12_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $12, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $12, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_12_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $12,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $12,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_12_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_12_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_12_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 12(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_14):
+ cmp $80, %ecx
+ lea -48(%ecx), %ecx
+ mov %edx, %eax
+ jae L(shr_14_gobble)
+
+ movdqa 16(%esi), %xmm1
+ movdqa %xmm1, %xmm2
+ palignr $14, (%esi), %xmm1
+ pcmpeqb (%edi), %xmm1
+
+ movdqa 32(%esi), %xmm3
+ palignr $14, %xmm2, %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+ pand %xmm1, %xmm3
+ pmovmskb %xmm3, %edx
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+ lea (%ecx, %edi,1), %eax
+ lea 14(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(shr_14_gobble):
+ sub $32, %ecx
+ movdqa 16(%esi), %xmm0
+ palignr $14, (%esi), %xmm0
+ pcmpeqb (%edi), %xmm0
+
+ movdqa 32(%esi), %xmm3
+ palignr $14, 16(%esi), %xmm3
+ pcmpeqb 16(%edi), %xmm3
+
+L(shr_14_gobble_loop):
+ pand %xmm0, %xmm3
+ sub $32, %ecx
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+
+ movdqa 64(%esi), %xmm3
+ palignr $14,48(%esi), %xmm3
+ sbb $0xffff, %edx
+ movdqa 48(%esi), %xmm0
+ palignr $14,32(%esi), %xmm0
+ pcmpeqb 32(%edi), %xmm0
+ lea 32(%esi), %esi
+ pcmpeqb 48(%edi), %xmm3
+
+ lea 32(%edi), %edi
+ jz L(shr_14_gobble_loop)
+ pand %xmm0, %xmm3
+
+ cmp $0, %ecx
+ jge L(shr_14_gobble_next)
+ inc %edx
+ add $32, %ecx
+L(shr_14_gobble_next):
+ test %edx, %edx
+ jnz L(exit)
+
+ pmovmskb %xmm3, %edx
+ movdqa %xmm0, %xmm1
+ lea 32(%edi), %edi
+ lea 32(%esi), %esi
+ sub $0xffff, %edx
+ jnz L(exit)
+
+ lea (%ecx, %edi,1), %eax
+ lea 14(%ecx, %esi,1), %edx
+ POP (%edi)
+ POP (%esi)
+ jmp L(less48bytes)
+
+ CFI_RESTORE_STATE
+ CFI_REMEMBER_STATE
+ .p2align 4
+L(exit):
+ pmovmskb %xmm1, %ebx
+ sub $0xffff, %ebx
+ jz L(first16bytes)
+ lea -16(%esi), %esi
+ lea -16(%edi), %edi
+ mov %ebx, %edx
+
+L(first16bytes):
+ add %eax, %esi
+L(less16bytes):
+ test %dl, %dl
+ jz L(next_four_words)
+ test $15, %dl
+ jz L(second_two_words)
+ test $3, %dl
+ jz L(second_word)
+ movzwl -16(%edi), %eax
+ movzwl -16(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(second_word):
+ movzwl -14(%edi), %eax
+ movzwl -14(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(second_two_words):
+ test $63, %dl
+ jz L(fourth_word)
+ movzwl -12(%edi), %eax
+ movzwl -12(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(fourth_word):
+ movzwl -10(%edi), %eax
+ movzwl -10(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(next_four_words):
+ test $15, %dh
+ jz L(fourth_two_words)
+ test $3, %dh
+ jz L(sixth_word)
+ movzwl -8(%edi), %eax
+ movzwl -8(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(sixth_word):
+ movzwl -6(%edi), %eax
+ movzwl -6(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(fourth_two_words):
+ test $63, %dh
+ jz L(eighth_word)
+ movzwl -4(%edi), %eax
+ movzwl -4(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+ .p2align 4
+L(eighth_word):
+ movzwl -2(%edi), %eax
+ movzwl -2(%esi), %ebx
+ subl %ebx, %eax
+ RETURN
+
+
+ CFI_PUSH (%ebx)
+
+ .p2align 4
+L(more8bytes):
+ cmp $16, %ecx
+ jae L(more16bytes)
+ cmp $8, %ecx
+ je L(8bytes)
+ cmp $10, %ecx
+ je L(10bytes)
+ cmp $12, %ecx
+ je L(12bytes)
+ jmp L(14bytes)
+
+ .p2align 4
+L(more16bytes):
+ cmp $24, %ecx
+ jae L(more24bytes)
+ cmp $16, %ecx
+ je L(16bytes)
+ cmp $18, %ecx
+ je L(18bytes)
+ cmp $20, %ecx
+ je L(20bytes)
+ jmp L(22bytes)
+
+ .p2align 4
+L(more24bytes):
+ cmp $32, %ecx
+ jae L(more32bytes)
+ cmp $24, %ecx
+ je L(24bytes)
+ cmp $26, %ecx
+ je L(26bytes)
+ cmp $28, %ecx
+ je L(28bytes)
+ jmp L(30bytes)
+
+ .p2align 4
+L(more32bytes):
+ cmp $40, %ecx
+ jae L(more40bytes)
+ cmp $32, %ecx
+ je L(32bytes)
+ cmp $34, %ecx
+ je L(34bytes)
+ cmp $36, %ecx
+ je L(36bytes)
+ jmp L(38bytes)
+
+ .p2align 4
+L(less48bytes):
+ cmp $8, %ecx
+ jae L(more8bytes)
+ cmp $2, %ecx
+ je L(2bytes)
+ cmp $4, %ecx
+ je L(4bytes)
+ jmp L(6bytes)
+
+ .p2align 4
+L(more40bytes):
+ cmp $40, %ecx
+ je L(40bytes)
+ cmp $42, %ecx
+ je L(42bytes)
+ cmp $44, %ecx
+ je L(44bytes)
+ jmp L(46bytes)
+
+ .p2align 4
+L(46bytes):
+ movzwl -46(%eax), %ecx
+ movzwl -46(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(44bytes):
+ movzwl -44(%eax), %ecx
+ movzwl -44(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(42bytes):
+ movzwl -42(%eax), %ecx
+ movzwl -42(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(40bytes):
+ movzwl -40(%eax), %ecx
+ movzwl -40(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(38bytes):
+ movzwl -38(%eax), %ecx
+ movzwl -38(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(36bytes):
+ movzwl -36(%eax), %ecx
+ movzwl -36(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(34bytes):
+ movzwl -34(%eax), %ecx
+ movzwl -34(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(32bytes):
+ movzwl -32(%eax), %ecx
+ movzwl -32(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(30bytes):
+ movzwl -30(%eax), %ecx
+ movzwl -30(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(28bytes):
+ movzwl -28(%eax), %ecx
+ movzwl -28(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(26bytes):
+ movzwl -26(%eax), %ecx
+ movzwl -26(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(24bytes):
+ movzwl -24(%eax), %ecx
+ movzwl -24(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(22bytes):
+ movzwl -22(%eax), %ecx
+ movzwl -22(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(20bytes):
+ movzwl -20(%eax), %ecx
+ movzwl -20(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(18bytes):
+ movzwl -18(%eax), %ecx
+ movzwl -18(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(16bytes):
+ movzwl -16(%eax), %ecx
+ movzwl -16(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(14bytes):
+ movzwl -14(%eax), %ecx
+ movzwl -14(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(12bytes):
+ movzwl -12(%eax), %ecx
+ movzwl -12(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(10bytes):
+ movzwl -10(%eax), %ecx
+ movzwl -10(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(8bytes):
+ movzwl -8(%eax), %ecx
+ movzwl -8(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(6bytes):
+ movzwl -6(%eax), %ecx
+ movzwl -6(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(4bytes):
+ movzwl -4(%eax), %ecx
+ movzwl -4(%edx), %ebx
+ subl %ebx, %ecx
+ jne L(memcmp16_exit)
+L(2bytes):
+ movzwl -2(%eax), %eax
+ movzwl -2(%edx), %ebx
+ subl %ebx, %eax
+ POP (%ebx)
+ ret
+ CFI_PUSH (%ebx)
+
+ .p2align 4
+L(memcmp16_exit):
+ POP (%ebx)
+ mov %ecx, %eax
+ ret
+END_FUNCTION MEMCMP
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 24b9e465e8..6d74b837d1 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -173,6 +173,21 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, EAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the ESP is above the protected region. We need to create a
+ // callee save frame and then move ESP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %esp, %ecx // get current stack pointer
+ mov %eax, %esp // move ESP to the overflow region.
+ PUSH ecx // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ CFI_ADJUST_CFA_OFFSET(4)
+ SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
@@ -1098,7 +1113,8 @@ END_FUNCTION art_quick_imt_conflict_trampoline
DEFINE_FUNCTION art_quick_resolution_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- PUSH esp // pass SP
+ movl %esp, %edi
+ PUSH EDI // pass SP. do not just PUSH ESP; that messes up unwinding
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass receiver
@@ -1262,14 +1278,12 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
mov %esp, %ecx // Remember SP
subl LITERAL(8), %esp // Save float return value.
CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%esp)
+ movq %xmm0, (%esp)
PUSH edx // Save gpr return value.
PUSH eax
- subl LITERAL(8), %esp // Align stack
- movd %xmm0, (%esp)
- subl LITERAL(8), %esp // Pass float return value.
- CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%esp)
+ subl LITERAL(16), %esp // Align stack
+ CFI_ADJUST_CFA_OFFSET(16)
+ movq %xmm0, (%esp) // Pass float return value.
PUSH edx // Pass gpr return value.
PUSH eax
PUSH ecx // Pass SP.
@@ -1284,7 +1298,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
// (ebx is pretending to be our LR).
POP eax // Restore gpr return value.
POP edx
- movd (%esp), %xmm0 // Restore fpr return value.
+ movq (%esp), %xmm0 // Restore fpr return value.
addl LITERAL(8), %esp
CFI_ADJUST_CFA_OFFSET(-8)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
diff --git a/runtime/arch/x86_64/fault_handler_x86_64.cc b/runtime/arch/x86_64/fault_handler_x86_64.cc
deleted file mode 100644
index 233d3c7d1a..0000000000
--- a/runtime/arch/x86_64/fault_handler_x86_64.cc
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include "fault_handler.h"
-#include <sys/ucontext.h>
-#include "base/macros.h"
-#include "globals.h"
-#include "base/logging.h"
-#include "base/hex_dump.h"
-
-
-//
-// X86_64 specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp) {
-}
-
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
- return false;
-}
-} // namespace art
diff --git a/runtime/arch/x86_64/memcmp16_x86_64.S b/runtime/arch/x86_64/memcmp16_x86_64.S
new file mode 100755
index 0000000000..46e4ba36cf
--- /dev/null
+++ b/runtime/arch/x86_64/memcmp16_x86_64.S
@@ -0,0 +1,1210 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86_64.S"
+
+#define MEMCMP __memcmp16
+
+/*
+ * Half of Silvermont L1 Data Cache size
+ *(see original file cache.h in bionic/libc/arch-x86_64/).
+ * This value is used for specific optimization on big lengths.
+ */
+#define DATA_CACHE_SIZE_HALF (12*1024)
+
+#ifndef L
+# define L(label) .L##label
+#endif
+
+#ifndef ALIGN
+# define ALIGN(n) .p2align n
+#endif
+
+#define JMPTBL(I, B) (I - B)
+
+#define BRANCH_TO_JMPTBL_ENTRY(TABLE, INDEX, SCALE) \
+ lea TABLE(%rip), %r11; \
+ movslq (%r11, INDEX, SCALE), %rcx; \
+ add %r11, %rcx; \
+ jmp *%rcx; \
+ ud2
+
+DEFINE_FUNCTION MEMCMP
+ pxor %xmm0, %xmm0
+ shl $1, %rdx
+ cmp $79, %rdx
+ ja L(79bytesormore)
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(79bytesormore):
+ movdqu (%rsi), %xmm1
+ movdqu (%rdi), %xmm2
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+ mov %rsi, %rcx
+ and $-16, %rsi
+ add $16, %rsi
+ sub %rsi, %rcx
+
+ sub %rcx, %rdi
+ add %rcx, %rdx
+ test $0xf, %rdi
+ jz L(2aligned)
+
+ cmp $128, %rdx
+ ja L(128bytesormore)
+L(less128bytes):
+ sub $64, %rdx
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+ cmp $32, %rdx
+ jb L(less32bytesin64)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin64):
+ add $64, %rdi
+ add $64, %rsi
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(128bytesormore):
+ cmp $512, %rdx
+ ja L(512bytesormore)
+ cmp $256, %rdx
+ ja L(less512bytes)
+L(less256bytes):
+ sub $128, %rdx
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqu 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqu 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ add $128, %rsi
+ add $128, %rdi
+
+ cmp $64, %rdx
+ jae L(less128bytes)
+
+ cmp $32, %rdx
+ jb L(less32bytesin128)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin128):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(less512bytes):
+ sub $256, %rdx
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqu 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqu 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqu 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqu 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqu 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqu 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ movdqu 128(%rdi), %xmm2
+ pxor 128(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(144bytesin256)
+
+ movdqu 144(%rdi), %xmm2
+ pxor 144(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(160bytesin256)
+
+ movdqu 160(%rdi), %xmm2
+ pxor 160(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(176bytesin256)
+
+ movdqu 176(%rdi), %xmm2
+ pxor 176(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(192bytesin256)
+
+ movdqu 192(%rdi), %xmm2
+ pxor 192(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(208bytesin256)
+
+ movdqu 208(%rdi), %xmm2
+ pxor 208(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(224bytesin256)
+
+ movdqu 224(%rdi), %xmm2
+ pxor 224(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(240bytesin256)
+
+ movdqu 240(%rdi), %xmm2
+ pxor 240(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(256bytesin256)
+
+ add $256, %rsi
+ add $256, %rdi
+
+ cmp $128, %rdx
+ jae L(less256bytes)
+
+ cmp $64, %rdx
+ jae L(less128bytes)
+
+ cmp $32, %rdx
+ jb L(less32bytesin256)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin256):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(512bytesormore):
+#ifdef DATA_CACHE_SIZE_HALF
+ mov $DATA_CACHE_SIZE_HALF, %r8
+#else
+ mov __x86_64_data_cache_size_half(%rip), %r8
+#endif
+ mov %r8, %r9
+ shr $1, %r8
+ add %r9, %r8
+ cmp %r8, %rdx
+ ja L(L2_L3_cache_unaglined)
+ sub $64, %rdx
+ ALIGN (4)
+L(64bytesormore_loop):
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqu 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqu 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqu 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(64bytesormore_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+L(L2_L3_cache_unaglined):
+ sub $64, %rdx
+ ALIGN (4)
+L(L2_L3_unaligned_128bytes_loop):
+ prefetchnta 0x1c0(%rdi)
+ prefetchnta 0x1c0(%rsi)
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqu 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqu 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqu 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(L2_L3_unaligned_128bytes_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+/*
+ * This case is for machines which are sensitive for unaligned instructions.
+ */
+ ALIGN (4)
+L(2aligned):
+ cmp $128, %rdx
+ ja L(128bytesormorein2aligned)
+L(less128bytesin2aligned):
+ sub $64, %rdx
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+ cmp $32, %rdx
+ jb L(less32bytesin64in2alinged)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin64in2alinged):
+ add $64, %rdi
+ add $64, %rsi
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(128bytesormorein2aligned):
+ cmp $512, %rdx
+ ja L(512bytesormorein2aligned)
+ cmp $256, %rdx
+ ja L(256bytesormorein2aligned)
+L(less256bytesin2alinged):
+ sub $128, %rdx
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqa 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqa 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ add $128, %rsi
+ add $128, %rdi
+
+ cmp $64, %rdx
+ jae L(less128bytesin2aligned)
+
+ cmp $32, %rdx
+ jb L(less32bytesin128in2aligned)
+
+ movdqu (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqu 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin128in2aligned):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(256bytesormorein2aligned):
+
+ sub $256, %rdx
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+
+ movdqa 32(%rdi), %xmm2
+ pxor 32(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(48bytesin256)
+
+ movdqa 48(%rdi), %xmm2
+ pxor 48(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(64bytesin256)
+
+ movdqa 64(%rdi), %xmm2
+ pxor 64(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(80bytesin256)
+
+ movdqa 80(%rdi), %xmm2
+ pxor 80(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(96bytesin256)
+
+ movdqa 96(%rdi), %xmm2
+ pxor 96(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(112bytesin256)
+
+ movdqa 112(%rdi), %xmm2
+ pxor 112(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(128bytesin256)
+
+ movdqa 128(%rdi), %xmm2
+ pxor 128(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(144bytesin256)
+
+ movdqa 144(%rdi), %xmm2
+ pxor 144(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(160bytesin256)
+
+ movdqa 160(%rdi), %xmm2
+ pxor 160(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(176bytesin256)
+
+ movdqa 176(%rdi), %xmm2
+ pxor 176(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(192bytesin256)
+
+ movdqa 192(%rdi), %xmm2
+ pxor 192(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(208bytesin256)
+
+ movdqa 208(%rdi), %xmm2
+ pxor 208(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(224bytesin256)
+
+ movdqa 224(%rdi), %xmm2
+ pxor 224(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(240bytesin256)
+
+ movdqa 240(%rdi), %xmm2
+ pxor 240(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(256bytesin256)
+
+ add $256, %rsi
+ add $256, %rdi
+
+ cmp $128, %rdx
+ jae L(less256bytesin2alinged)
+
+ cmp $64, %rdx
+ jae L(less128bytesin2aligned)
+
+ cmp $32, %rdx
+ jb L(less32bytesin256in2alinged)
+
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(16bytesin256)
+
+ movdqa 16(%rdi), %xmm2
+ pxor 16(%rsi), %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(32bytesin256)
+ sub $32, %rdx
+ add $32, %rdi
+ add $32, %rsi
+L(less32bytesin256in2alinged):
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+ ALIGN (4)
+L(512bytesormorein2aligned):
+#ifdef DATA_CACHE_SIZE_HALF
+ mov $DATA_CACHE_SIZE_HALF, %r8
+#else
+ mov __x86_64_data_cache_size_half(%rip), %r8
+#endif
+ mov %r8, %r9
+ shr $1, %r8
+ add %r9, %r8
+ cmp %r8, %rdx
+ ja L(L2_L3_cache_aglined)
+
+ sub $64, %rdx
+ ALIGN (4)
+L(64bytesormore_loopin2aligned):
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqa 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqa 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqa 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(64bytesormore_loopin2aligned)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+L(L2_L3_cache_aglined):
+ sub $64, %rdx
+ ALIGN (4)
+L(L2_L3_aligned_128bytes_loop):
+ prefetchnta 0x1c0(%rdi)
+ prefetchnta 0x1c0(%rsi)
+ movdqa (%rdi), %xmm2
+ pxor (%rsi), %xmm2
+ movdqa %xmm2, %xmm1
+
+ movdqa 16(%rdi), %xmm3
+ pxor 16(%rsi), %xmm3
+ por %xmm3, %xmm1
+
+ movdqa 32(%rdi), %xmm4
+ pxor 32(%rsi), %xmm4
+ por %xmm4, %xmm1
+
+ movdqa 48(%rdi), %xmm5
+ pxor 48(%rsi), %xmm5
+ por %xmm5, %xmm1
+
+ ptest %xmm1, %xmm0
+ jnc L(64bytesormore_loop_end)
+ add $64, %rsi
+ add $64, %rdi
+ sub $64, %rdx
+ jae L(L2_L3_aligned_128bytes_loop)
+
+ add $64, %rdx
+ add %rdx, %rsi
+ add %rdx, %rdi
+ BRANCH_TO_JMPTBL_ENTRY(L(table_64bytes), %rdx, 2)
+
+
+ ALIGN (4)
+L(64bytesormore_loop_end):
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm2, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm3, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ ptest %xmm4, %xmm0
+ jnc L(16bytes)
+
+ add $16, %rdi
+ add $16, %rsi
+ jmp L(16bytes)
+
+L(256bytesin256):
+ add $256, %rdi
+ add $256, %rsi
+ jmp L(16bytes)
+L(240bytesin256):
+ add $240, %rdi
+ add $240, %rsi
+ jmp L(16bytes)
+L(224bytesin256):
+ add $224, %rdi
+ add $224, %rsi
+ jmp L(16bytes)
+L(208bytesin256):
+ add $208, %rdi
+ add $208, %rsi
+ jmp L(16bytes)
+L(192bytesin256):
+ add $192, %rdi
+ add $192, %rsi
+ jmp L(16bytes)
+L(176bytesin256):
+ add $176, %rdi
+ add $176, %rsi
+ jmp L(16bytes)
+L(160bytesin256):
+ add $160, %rdi
+ add $160, %rsi
+ jmp L(16bytes)
+L(144bytesin256):
+ add $144, %rdi
+ add $144, %rsi
+ jmp L(16bytes)
+L(128bytesin256):
+ add $128, %rdi
+ add $128, %rsi
+ jmp L(16bytes)
+L(112bytesin256):
+ add $112, %rdi
+ add $112, %rsi
+ jmp L(16bytes)
+L(96bytesin256):
+ add $96, %rdi
+ add $96, %rsi
+ jmp L(16bytes)
+L(80bytesin256):
+ add $80, %rdi
+ add $80, %rsi
+ jmp L(16bytes)
+L(64bytesin256):
+ add $64, %rdi
+ add $64, %rsi
+ jmp L(16bytes)
+L(48bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(32bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(16bytesin256):
+ add $16, %rdi
+ add $16, %rsi
+L(16bytes):
+ mov -16(%rdi), %rax
+ mov -16(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(8bytes):
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(12bytes):
+ mov -12(%rdi), %rax
+ mov -12(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(4bytes):
+ mov -4(%rsi), %ecx
+ mov -4(%rdi), %eax
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+L(0bytes):
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(66bytes):
+ movdqu -66(%rdi), %xmm1
+ movdqu -66(%rsi), %xmm2
+ mov $-66, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(50bytes):
+ movdqu -50(%rdi), %xmm1
+ movdqu -50(%rsi), %xmm2
+ mov $-50, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(34bytes):
+ movdqu -34(%rdi), %xmm1
+ movdqu -34(%rsi), %xmm2
+ mov $-34, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(18bytes):
+ mov -18(%rdi), %rax
+ mov -18(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+L(10bytes):
+ mov -10(%rdi), %rax
+ mov -10(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ movzwl -2(%rdi), %eax
+ movzwl -2(%rsi), %ecx
+ cmp %cl, %al
+ jne L(end)
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+ ALIGN (4)
+L(14bytes):
+ mov -14(%rdi), %rax
+ mov -14(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(6bytes):
+ mov -6(%rdi), %eax
+ mov -6(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+L(2bytes):
+ movzwl -2(%rsi), %ecx
+ movzwl -2(%rdi), %eax
+ cmp %cl, %al
+ jne L(end)
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+ ALIGN (4)
+L(68bytes):
+ movdqu -68(%rdi), %xmm2
+ movdqu -68(%rsi), %xmm1
+ mov $-68, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(52bytes):
+ movdqu -52(%rdi), %xmm2
+ movdqu -52(%rsi), %xmm1
+ mov $-52, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(36bytes):
+ movdqu -36(%rdi), %xmm2
+ movdqu -36(%rsi), %xmm1
+ mov $-36, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(20bytes):
+ movdqu -20(%rdi), %xmm2
+ movdqu -20(%rsi), %xmm1
+ mov $-20, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -4(%rdi), %eax
+ mov -4(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(70bytes):
+ movdqu -70(%rsi), %xmm1
+ movdqu -70(%rdi), %xmm2
+ mov $-70, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(54bytes):
+ movdqu -54(%rsi), %xmm1
+ movdqu -54(%rdi), %xmm2
+ mov $-54, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(38bytes):
+ movdqu -38(%rsi), %xmm1
+ movdqu -38(%rdi), %xmm2
+ mov $-38, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(22bytes):
+ movdqu -22(%rsi), %xmm1
+ movdqu -22(%rdi), %xmm2
+ mov $-22, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(72bytes):
+ movdqu -72(%rsi), %xmm1
+ movdqu -72(%rdi), %xmm2
+ mov $-72, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(56bytes):
+ movdqu -56(%rdi), %xmm2
+ movdqu -56(%rsi), %xmm1
+ mov $-56, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(40bytes):
+ movdqu -40(%rdi), %xmm2
+ movdqu -40(%rsi), %xmm1
+ mov $-40, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(24bytes):
+ movdqu -24(%rdi), %xmm2
+ movdqu -24(%rsi), %xmm1
+ mov $-24, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(74bytes):
+ movdqu -74(%rsi), %xmm1
+ movdqu -74(%rdi), %xmm2
+ mov $-74, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(58bytes):
+ movdqu -58(%rdi), %xmm2
+ movdqu -58(%rsi), %xmm1
+ mov $-58, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(42bytes):
+ movdqu -42(%rdi), %xmm2
+ movdqu -42(%rsi), %xmm1
+ mov $-42, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(26bytes):
+ movdqu -26(%rdi), %xmm2
+ movdqu -26(%rsi), %xmm1
+ mov $-26, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -10(%rdi), %rax
+ mov -10(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ movzwl -2(%rdi), %eax
+ movzwl -2(%rsi), %ecx
+ jmp L(end)
+
+ ALIGN (4)
+L(76bytes):
+ movdqu -76(%rsi), %xmm1
+ movdqu -76(%rdi), %xmm2
+ mov $-76, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(60bytes):
+ movdqu -60(%rdi), %xmm2
+ movdqu -60(%rsi), %xmm1
+ mov $-60, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(44bytes):
+ movdqu -44(%rdi), %xmm2
+ movdqu -44(%rsi), %xmm1
+ mov $-44, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(28bytes):
+ movdqu -28(%rdi), %xmm2
+ movdqu -28(%rsi), %xmm1
+ mov $-28, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -12(%rdi), %rax
+ mov -12(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -4(%rdi), %eax
+ mov -4(%rsi), %ecx
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(78bytes):
+ movdqu -78(%rsi), %xmm1
+ movdqu -78(%rdi), %xmm2
+ mov $-78, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(62bytes):
+ movdqu -62(%rdi), %xmm2
+ movdqu -62(%rsi), %xmm1
+ mov $-62, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(46bytes):
+ movdqu -46(%rdi), %xmm2
+ movdqu -46(%rsi), %xmm1
+ mov $-46, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(30bytes):
+ movdqu -30(%rdi), %xmm2
+ movdqu -30(%rsi), %xmm1
+ mov $-30, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+ mov -14(%rdi), %rax
+ mov -14(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+ ALIGN (4)
+L(64bytes):
+ movdqu -64(%rdi), %xmm2
+ movdqu -64(%rsi), %xmm1
+ mov $-64, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(48bytes):
+ movdqu -48(%rdi), %xmm2
+ movdqu -48(%rsi), %xmm1
+ mov $-48, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+L(32bytes):
+ movdqu -32(%rdi), %xmm2
+ movdqu -32(%rsi), %xmm1
+ mov $-32, %dl
+ pxor %xmm1, %xmm2
+ ptest %xmm2, %xmm0
+ jnc L(less16bytes)
+
+ mov -16(%rdi), %rax
+ mov -16(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+
+ mov -8(%rdi), %rax
+ mov -8(%rsi), %rcx
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ xor %eax, %eax
+ ret
+
+/*
+ * Aligned 8 bytes to avoid 2 branch "taken" in one 16 alinged code block.
+ */
+ ALIGN (3)
+L(less16bytes):
+ movsbq %dl, %rdx
+ mov (%rsi, %rdx), %rcx
+ mov (%rdi, %rdx), %rax
+ cmp %rax, %rcx
+ jne L(diffin8bytes)
+ mov 8(%rsi, %rdx), %rcx
+ mov 8(%rdi, %rdx), %rax
+L(diffin8bytes):
+ cmp %eax, %ecx
+ jne L(diffin4bytes)
+ shr $32, %rcx
+ shr $32, %rax
+L(diffin4bytes):
+ cmp %cx, %ax
+ jne L(end)
+ shr $16, %ecx
+ shr $16, %eax
+ jmp L(end)
+
+ ALIGN (4)
+L(end):
+ and $0xffff, %eax
+ and $0xffff, %ecx
+ sub %ecx, %eax
+ ret
+
+END_FUNCTION MEMCMP
+
+ ALIGN (3)
+L(table_64bytes):
+ .int JMPTBL (L(0bytes), L(table_64bytes))
+ .int JMPTBL (L(2bytes), L(table_64bytes))
+ .int JMPTBL (L(4bytes), L(table_64bytes))
+ .int JMPTBL (L(6bytes), L(table_64bytes))
+ .int JMPTBL (L(8bytes), L(table_64bytes))
+ .int JMPTBL (L(10bytes), L(table_64bytes))
+ .int JMPTBL (L(12bytes), L(table_64bytes))
+ .int JMPTBL (L(14bytes), L(table_64bytes))
+ .int JMPTBL (L(16bytes), L(table_64bytes))
+ .int JMPTBL (L(18bytes), L(table_64bytes))
+ .int JMPTBL (L(20bytes), L(table_64bytes))
+ .int JMPTBL (L(22bytes), L(table_64bytes))
+ .int JMPTBL (L(24bytes), L(table_64bytes))
+ .int JMPTBL (L(26bytes), L(table_64bytes))
+ .int JMPTBL (L(28bytes), L(table_64bytes))
+ .int JMPTBL (L(30bytes), L(table_64bytes))
+ .int JMPTBL (L(32bytes), L(table_64bytes))
+ .int JMPTBL (L(34bytes), L(table_64bytes))
+ .int JMPTBL (L(36bytes), L(table_64bytes))
+ .int JMPTBL (L(38bytes), L(table_64bytes))
+ .int JMPTBL (L(40bytes), L(table_64bytes))
+ .int JMPTBL (L(42bytes), L(table_64bytes))
+ .int JMPTBL (L(44bytes), L(table_64bytes))
+ .int JMPTBL (L(46bytes), L(table_64bytes))
+ .int JMPTBL (L(48bytes), L(table_64bytes))
+ .int JMPTBL (L(50bytes), L(table_64bytes))
+ .int JMPTBL (L(52bytes), L(table_64bytes))
+ .int JMPTBL (L(54bytes), L(table_64bytes))
+ .int JMPTBL (L(56bytes), L(table_64bytes))
+ .int JMPTBL (L(58bytes), L(table_64bytes))
+ .int JMPTBL (L(60bytes), L(table_64bytes))
+ .int JMPTBL (L(62bytes), L(table_64bytes))
+ .int JMPTBL (L(64bytes), L(table_64bytes))
+ .int JMPTBL (L(66bytes), L(table_64bytes))
+ .int JMPTBL (L(68bytes), L(table_64bytes))
+ .int JMPTBL (L(70bytes), L(table_64bytes))
+ .int JMPTBL (L(72bytes), L(table_64bytes))
+ .int JMPTBL (L(74bytes), L(table_64bytes))
+ .int JMPTBL (L(76bytes), L(table_64bytes))
+ .int JMPTBL (L(78bytes), L(table_64bytes))
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 50b2de4a4f..f021ada6ba 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -284,6 +284,18 @@ NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
*/
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
+// On entry to this function, RAX contains the ESP value for the overflow region.
+DEFINE_FUNCTION art_quick_throw_stack_overflow_from_signal
+ // Here, the RSP is above the protected region. We need to create a
+ // callee save frame and then move RSP down to the overflow region.
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
+ mov %rsp, %rsi // get current stack pointer, pass SP as second arg
+ mov %rax, %rsp // move RSP to the overflow region.
+ mov %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current() as first arg
+ call PLT_SYMBOL(artThrowStackOverflowFromCode) // artThrowStackOverflowFromCode(Thread*, SP)
+ int3 // unreached
+END_FUNCTION art_quick_throw_stack_overflow_from_signal
+
/*
* Called by managed code, saves callee saves and then calls artThrowException
* that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
@@ -1495,7 +1507,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
PUSH rax // Save integer result.
subq LITERAL(8), %rsp // Save floating-point result.
CFI_ADJUST_CFA_OFFSET(8)
- movd %xmm0, (%rsp)
+ movq %xmm0, (%rsp)
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
movq %rax, %rdx // Pass integer result.
@@ -1506,7 +1518,7 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
- movd (%rsp), %xmm0 // Restore floating-point result.
+ movq (%rsp), %xmm0 // Restore floating-point result.
addq LITERAL(8), %rsp
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index fe5a2ef4fd..fae9271d9e 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -176,6 +176,7 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#endif
#define PURE __attribute__ ((__pure__))
+#define WARN_UNUSED __attribute__((warn_unused_result))
template<typename T> void UNUSED(const T&) {}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 7779547725..abe0aa0249 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -35,12 +35,14 @@ Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
Mutex* Locks::mem_maps_lock_ = nullptr;
Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
+Mutex* Locks::thread_list_suspend_thread_lock_ = nullptr;
Mutex* Locks::thread_suspend_count_lock_ = nullptr;
Mutex* Locks::trace_lock_ = nullptr;
Mutex* Locks::profiler_lock_ = nullptr;
@@ -149,7 +151,8 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
for (int i = kLockLevelCount - 1; i >= 0; --i) {
if (i != level_) {
BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
+ // We expect waits to happen while holding the thread list suspend thread lock.
+ if (held_mutex != NULL && i != kThreadListSuspendThreadLock) {
LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
<< "(level " << LockLevel(i) << ") while performing wait on "
<< "\"" << name_ << "\" (level " << level_ << ")";
@@ -161,16 +164,10 @@ void BaseMutex::CheckSafeToWait(Thread* self) {
}
}
-inline void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
+void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) {
if (kLogLockContentions) {
// Atomically add value to wait_time.
- uint64_t new_val, old_val;
- volatile int64_t* addr = reinterpret_cast<volatile int64_t*>(&wait_time);
- volatile const int64_t* caddr = const_cast<volatile const int64_t*>(addr);
- do {
- old_val = static_cast<uint64_t>(QuasiAtomic::Read64(caddr));
- new_val = old_val + value;
- } while (!QuasiAtomic::Cas64(static_cast<int64_t>(old_val), static_cast<int64_t>(new_val), addr));
+ wait_time.FetchAndAddSequentiallyConsistent(value);
}
}
@@ -204,7 +201,7 @@ void BaseMutex::DumpContention(std::ostream& os) const {
if (kLogLockContentions) {
const ContentionLogData* data = contention_log_data_;
const ContentionLogEntry* log = data->contention_log;
- uint64_t wait_time = data->wait_time;
+ uint64_t wait_time = data->wait_time.LoadRelaxed();
uint32_t contention_count = data->contention_count.LoadRelaxed();
if (contention_count == 0) {
os << "never contended";
@@ -838,9 +835,11 @@ void Locks::Init() {
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
+ DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
DCHECK(mutator_lock_ != nullptr);
DCHECK(thread_list_lock_ != nullptr);
+ DCHECK(thread_list_suspend_thread_lock_ != nullptr);
DCHECK(thread_suspend_count_lock_ != nullptr);
DCHECK(trace_lock_ != nullptr);
DCHECK(profiler_lock_ != nullptr);
@@ -848,13 +847,18 @@ void Locks::Init() {
DCHECK(intern_table_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
- LockLevel current_lock_level = kMutatorLock;
- DCHECK(mutator_lock_ == nullptr);
- mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
+ LockLevel current_lock_level = kThreadListSuspendThreadLock;
+ DCHECK(thread_list_suspend_thread_lock_ == nullptr);
+ thread_list_suspend_thread_lock_ =
+ new Mutex("thread list suspend thread by .. lock", current_lock_level);
#define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
- DCHECK_LT(new_level, current_lock_level); \
- current_lock_level = new_level;
+ DCHECK_LT(new_level, current_lock_level); \
+ current_lock_level = new_level;
+
+ UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
DCHECK(heap_bitmap_lock_ == nullptr);
@@ -876,6 +880,10 @@ void Locks::Init() {
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+ DCHECK(jni_libraries_lock_ == nullptr);
+ jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
DCHECK(breakpoint_lock_ == nullptr);
breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 8d2cd07aea..fd766295ac 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -70,10 +70,10 @@ enum LockLevel {
kMarkSweepMarkStackLock,
kTransactionLogLock,
kInternTableLock,
+ kOatFileSecondaryLookupLock,
kDefaultMutexLevel,
kMarkSweepLargeObjectLock,
kPinTableLock,
- kLoadLibraryLock,
kJdwpObjectRegistryLock,
kModifyLdtLock,
kAllocatedThreadIdsLock,
@@ -82,6 +82,7 @@ enum LockLevel {
kBreakpointLock,
kMonitorLock,
kMonitorListLock,
+ kJniLoadLibraryLock,
kThreadListLock,
kBreakpointInvokeLock,
kDeoptimizationLock,
@@ -93,6 +94,7 @@ enum LockLevel {
kRuntimeShutdownLock,
kHeapBitmapLock,
kMutatorLock,
+ kThreadListSuspendThreadLock,
kZygoteCreationLock,
kLockLevelCount // Must come last.
@@ -160,7 +162,7 @@ class BaseMutex {
// Number of times the Mutex has been contended.
AtomicInteger contention_count;
// Sum of time waited by all contenders in ns.
- volatile uint64_t wait_time;
+ Atomic<uint64_t> wait_time;
void AddToWaitTime(uint64_t value);
ContentionLogData() : wait_time(0) {}
};
@@ -474,6 +476,15 @@ class Locks {
public:
static void Init();
+ // There's a potential race for two threads to try to suspend each other and for both of them
+ // to succeed and get blocked becoming runnable. This lock ensures that only one thread is
+ // requesting suspension of another at any time. As the the thread list suspend thread logic
+ // transitions to runnable, if the current thread were tried to be suspended then this thread
+ // would block holding this lock until it could safely request thread suspension of the other
+ // thread without that thread having a suspension request against this thread. This avoids a
+ // potential deadlock cycle.
+ static Mutex* thread_list_suspend_thread_lock_;
+
// The mutator_lock_ is used to allow mutators to execute in a shared (reader) mode or to block
// mutators by having an exclusive (writer) owner. In normal execution each mutator thread holds
// a share on the mutator_lock_. The garbage collector may also execute with shared access but
@@ -532,7 +543,7 @@ class Locks {
// else | .. running ..
// Goto x | .. running ..
// .. running .. | .. running ..
- static ReaderWriterMutex* mutator_lock_;
+ static ReaderWriterMutex* mutator_lock_ ACQUIRED_AFTER(thread_list_suspend_thread_lock_);
// Allow reader-writer mutual exclusion on the mark and live bitmaps of the heap.
static ReaderWriterMutex* heap_bitmap_lock_ ACQUIRED_AFTER(mutator_lock_);
@@ -550,8 +561,11 @@ class Locks {
// attaching and detaching.
static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
+ // Guards maintaining loading library data structures.
+ static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
// Guards breakpoints.
- static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
+ static Mutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
// Guards lists of classes within the class linker.
static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index 351de3d48f..bf091d00d2 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -58,6 +58,22 @@ bool ScopedFlock::Init(const char* filename, std::string* error_msg) {
}
}
+bool ScopedFlock::Init(File* file, std::string* error_msg) {
+ file_.reset(new File(dup(file->Fd())));
+ if (file_->Fd() == -1) {
+ file_.reset();
+ *error_msg = StringPrintf("Failed to duplicate open file '%s': %s",
+ file->GetPath().c_str(), strerror(errno));
+ return false;
+ }
+ if (0 != TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_EX))) {
+ file_.reset();
+ *error_msg = StringPrintf("Failed to lock file '%s': %s", file->GetPath().c_str(), strerror(errno));
+ return false;
+ }
+ return true;
+}
+
File* ScopedFlock::GetFile() {
CHECK(file_.get() != NULL);
return file_.get();
diff --git a/runtime/base/scoped_flock.h b/runtime/base/scoped_flock.h
index f8ed805bea..08612e3016 100644
--- a/runtime/base/scoped_flock.h
+++ b/runtime/base/scoped_flock.h
@@ -37,6 +37,10 @@ class ScopedFlock {
// changed (usually due to a new file being created at the same path)
// between attempts to lock it.
bool Init(const char* filename, std::string* error_msg);
+ // Attempt to acquire an exclusive file lock (see flock(2)) on 'file'.
+ // Returns true if the lock could be acquired or false if an error
+ // occured.
+ bool Init(File* file, std::string* error_msg);
// Returns the (locked) file associated with this instance.
File* GetFile();
@@ -45,6 +49,7 @@ class ScopedFlock {
bool HasFile();
~ScopedFlock();
+
private:
std::unique_ptr<File> file_;
DISALLOW_COPY_AND_ASSIGN(ScopedFlock);
diff --git a/runtime/base/unix_file/fd_file_test.cc b/runtime/base/unix_file/fd_file_test.cc
index 33b3d3e186..3481f2ff9f 100644
--- a/runtime/base/unix_file/fd_file_test.cc
+++ b/runtime/base/unix_file/fd_file_test.cc
@@ -59,6 +59,9 @@ TEST_F(FdFileTest, OpenClose) {
EXPECT_TRUE(file.Open(good_path, O_RDONLY));
EXPECT_GE(file.Fd(), 0);
EXPECT_TRUE(file.IsOpened());
+
+ file.Close();
+ ASSERT_EQ(unlink(good_path.c_str()), 0);
}
TEST_F(FdFileTest, ReadFullyEmptyFile) {
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
index 7e45321d48..59334d45ad 100644
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ b/runtime/base/unix_file/mapped_file_test.cc
@@ -30,7 +30,7 @@ class MappedFileTest : public RandomAccessFileTest {
}
void SetUp() {
- art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
+ RandomAccessFileTest::SetUp();
good_path_ = GetTmpPath("some-file.txt");
int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
@@ -42,6 +42,12 @@ class MappedFileTest : public RandomAccessFileTest {
ASSERT_TRUE(CopyFile(src, &dst));
}
+ void TearDown() {
+ ASSERT_EQ(unlink(good_path_.c_str()), 0);
+
+ RandomAccessFileTest::TearDown();
+ }
+
virtual RandomAccessFile* MakeTestFile() {
TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
MappedFile* f = new MappedFile;
diff --git a/runtime/base/unix_file/random_access_file_test.h b/runtime/base/unix_file/random_access_file_test.h
index 1d0b866960..0002433628 100644
--- a/runtime/base/unix_file/random_access_file_test.h
+++ b/runtime/base/unix_file/random_access_file_test.h
@@ -35,7 +35,11 @@ class RandomAccessFileTest : public testing::Test {
virtual RandomAccessFile* MakeTestFile() = 0;
virtual void SetUp() {
- art::CommonRuntimeTest::SetEnvironmentVariables(android_data_);
+ art::CommonRuntimeTest::SetUpAndroidData(android_data_);
+ }
+
+ virtual void TearDown() {
+ art::CommonRuntimeTest::TearDownAndroidData(android_data_, true);
}
std::string GetTmpPath(const std::string& name) {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 9ad8a07d3c..99277a0629 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -25,6 +25,7 @@
#include "dex_file-inl.h"
#include "field_helper.h"
#include "gc/space/space.h"
+#include "java_vm_ext.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -35,62 +36,16 @@
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
+#include "well_known_classes.h"
namespace art {
-static void JniAbort(const char* jni_function_name, const char* msg) {
- Thread* self = Thread::Current();
- ScopedObjectAccess soa(self);
- mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
-
- std::ostringstream os;
- os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
-
- if (jni_function_name != nullptr) {
- os << "\n in call to " << jni_function_name;
- }
- // TODO: is this useful given that we're about to dump the calling thread's stack?
- if (current_method != nullptr) {
- os << "\n from " << PrettyMethod(current_method);
- }
- os << "\n";
- self->Dump(os);
-
- JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- if (vm->check_jni_abort_hook != nullptr) {
- vm->check_jni_abort_hook(vm->check_jni_abort_hook_data, os.str());
- } else {
- // Ensure that we get a native stack trace for this thread.
- self->TransitionFromRunnableToSuspended(kNative);
- LOG(FATAL) << os.str();
- self->TransitionFromSuspendedToRunnable(); // Unreachable, keep annotalysis happy.
- }
-}
-
-static void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
- std::string msg;
- StringAppendV(&msg, fmt, ap);
- JniAbort(jni_function_name, msg.c_str());
-}
-
-void JniAbortF(const char* jni_function_name, const char* fmt, ...) {
- va_list args;
- va_start(args, fmt);
- JniAbortV(jni_function_name, fmt, args);
- va_end(args);
-}
-
/*
* ===========================================================================
* JNI function helpers
* ===========================================================================
*/
-static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
- return GetIndirectRefKind(localRef) == kHandleScopeOrInvalid &&
- reinterpret_cast<JNIEnvExt*>(env)->self->HandleScopeContains(localRef);
-}
-
// Flags passed into ScopedCheck.
#define kFlag_Default 0x0000
@@ -109,134 +64,88 @@ static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
#define kFlag_Invocation 0x8000 // Part of the invocation interface (JavaVM*).
#define kFlag_ForceTrace 0x80000000 // Add this to a JNI function's flags if you want to trace every call.
-
-static const char* gBuiltInPrefixes[] = {
- "Landroid/",
- "Lcom/android/",
- "Lcom/google/android/",
- "Ldalvik/",
- "Ljava/",
- "Ljavax/",
- "Llibcore/",
- "Lorg/apache/harmony/",
- nullptr
+/*
+ * Java primitive types:
+ * B - jbyte
+ * C - jchar
+ * D - jdouble
+ * F - jfloat
+ * I - jint
+ * J - jlong
+ * S - jshort
+ * Z - jboolean (shown as true and false)
+ * V - void
+ *
+ * Java reference types:
+ * L - jobject
+ * a - jarray
+ * c - jclass
+ * s - jstring
+ * t - jthrowable
+ *
+ * JNI types:
+ * b - jboolean (shown as JNI_TRUE and JNI_FALSE)
+ * f - jfieldID
+ * i - JNI error value (JNI_OK, JNI_ERR, JNI_EDETACHED, JNI_EVERSION)
+ * m - jmethodID
+ * p - void*
+ * r - jint (for release mode arguments)
+ * u - const char* (Modified UTF-8)
+ * z - jsize (for lengths; use i if negative values are okay)
+ * v - JavaVM*
+ * w - jobjectRefType
+ * E - JNIEnv*
+ * . - no argument; just print "..." (used for varargs JNI calls)
+ *
+ */
+union JniValueType {
+ jarray a;
+ jboolean b;
+ jclass c;
+ jfieldID f;
+ jint i;
+ jmethodID m;
+ const void* p; // Pointer.
+ jint r; // Release mode.
+ jstring s;
+ jthrowable t;
+ const char* u; // Modified UTF-8.
+ JavaVM* v;
+ jobjectRefType w;
+ jsize z;
+ jbyte B;
+ jchar C;
+ jdouble D;
+ JNIEnv* E;
+ jfloat F;
+ jint I;
+ jlong J;
+ jobject L;
+ jshort S;
+ const void* V; // void
+ jboolean Z;
};
-static bool ShouldTrace(JavaVMExt* vm, mirror::ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
- // when a native method that matches the -Xjnitrace argument calls a JNI function
- // such as NewByteArray.
- // If -verbose:third-party-jni is on, we want to log any JNI function calls
- // made by a third-party native method.
- std::string class_name(method->GetDeclaringClassDescriptor());
- if (!vm->trace.empty() && class_name.find(vm->trace) != std::string::npos) {
- return true;
- }
- if (VLOG_IS_ON(third_party_jni)) {
- // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
- // like part of Android.
- for (size_t i = 0; gBuiltInPrefixes[i] != nullptr; ++i) {
- if (StartsWith(class_name, gBuiltInPrefixes[i])) {
- return false;
- }
- }
- return true;
- }
- return false;
-}
-
class ScopedCheck {
public:
- // For JNIEnv* functions.
- explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : soa_(env) {
- Init(flags, functionName, true);
- CheckThread(flags);
+ explicit ScopedCheck(int flags, const char* functionName, bool has_method = true)
+ : function_name_(functionName), flags_(flags), indent_(0), has_method_(has_method) {
}
- // For JavaVM* functions.
- // TODO: it's not correct that this is a lock function, but making it so aids annotalysis.
- explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
- : soa_(vm) {
- Init(kFlag_Invocation, functionName, has_method);
- }
-
- ~ScopedCheck() UNLOCK_FUNCTION(Locks::mutator_lock_) {}
-
- const ScopedObjectAccess& soa() {
- return soa_;
- }
-
- bool ForceCopy() {
- return Runtime::Current()->GetJavaVM()->force_copy;
- }
+ ~ScopedCheck() {}
// Checks that 'class_name' is a valid "fully-qualified" JNI class name, like "java/lang/Thread"
// or "[Ljava/lang/Object;". A ClassLoader can actually normalize class names a couple of
// times, so using "java.lang.Thread" instead of "java/lang/Thread" might work in some
// circumstances, but this is incorrect.
- void CheckClassName(const char* class_name) {
+ bool CheckClassName(const char* class_name) {
if ((class_name == nullptr) || !IsValidJniClassName(class_name)) {
- JniAbortF(function_name_,
- "illegal class name '%s'\n"
- " (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
- class_name);
- }
- }
-
- /*
- * Verify that the field is of the appropriate type. If the field has an
- * object type, "java_object" is the object we're trying to assign into it.
- *
- * Works for both static and instance fields.
- */
- void CheckFieldType(jvalue value, jfieldID fid, char prim, bool isStatic)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::ArtField> f(hs.NewHandle(CheckFieldID(fid)));
- if (f.Get() == nullptr) {
- return;
- }
- mirror::Class* field_type = FieldHelper(f).GetType();
- if (!field_type->IsPrimitive()) {
- jobject java_object = value.l;
- if (java_object != nullptr) {
- mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
- // If java_object is a weak global ref whose referent has been cleared,
- // obj will be NULL. Otherwise, obj should always be non-NULL
- // and valid.
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- return;
- } else {
- if (!obj->InstanceOf(field_type)) {
- JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %s",
- PrettyField(f.Get()).c_str(), PrettyTypeOf(obj).c_str());
- return;
- }
- }
- }
- } else if (field_type != Runtime::Current()->GetClassLinker()->FindPrimitiveClass(prim)) {
- JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %c",
- PrettyField(f.Get()).c_str(), prim);
- return;
- }
-
- if (isStatic != f.Get()->IsStatic()) {
- if (isStatic) {
- JniAbortF(function_name_, "accessing non-static field %s as static",
- PrettyField(f.Get()).c_str());
- } else {
- JniAbortF(function_name_, "accessing static field %s as non-static",
- PrettyField(f.Get()).c_str());
- }
- return;
+ AbortF("illegal class name '%s'\n"
+ " (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
+ class_name);
+ return false;
}
+ return true;
}
/*
@@ -244,59 +153,87 @@ class ScopedCheck {
*
* Assumes "jobj" has already been validated.
*/
- void CheckInstanceFieldID(jobject java_object, jfieldID fid)
+ bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "field operation on invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- return;
+ mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
+ if (o == nullptr) {
+ AbortF("field operation on NULL object: %p", java_object);
+ return false;
+ }
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("field operation on invalid %s: %p",
+ ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ java_object);
+ return false;
}
- mirror::ArtField* f = CheckFieldID(fid);
+ mirror::ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
- return;
+ return false;
}
mirror::Class* c = o->GetClass();
if (c->FindInstanceField(f->GetName(), f->GetTypeDescriptor()) == nullptr) {
- JniAbortF(function_name_, "jfieldID %s not valid for an object of class %s",
- PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+ AbortF("jfieldID %s not valid for an object of class %s",
+ PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+ return false;
}
+ return true;
}
/*
* Verify that the pointer value is non-NULL.
*/
- void CheckNonNull(const void* ptr) {
- if (ptr == nullptr) {
- JniAbortF(function_name_, "non-nullable argument was NULL");
+ bool CheckNonNull(const void* ptr) {
+ if (UNLIKELY(ptr == nullptr)) {
+ AbortF("non-nullable argument was NULL");
+ return false;
}
+ return true;
}
/*
* Verify that the method's return type matches the type of call.
* 'expectedType' will be "L" for all objects, including arrays.
*/
- void CheckSig(jmethodID mid, const char* expectedType, bool isStatic)
+ bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
+ jmethodID mid, Primitive::Type type, InvokeType invoke)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- if (*expectedType != m->GetShorty()[0]) {
- JniAbortF(function_name_, "the return type of %s does not match %s",
- function_name_, PrettyMethod(m).c_str());
+ if (type != Primitive::GetType(m->GetShorty()[0])) {
+ AbortF("the return type of %s does not match %s", function_name_, PrettyMethod(m).c_str());
+ return false;
}
- if (isStatic != m->IsStatic()) {
- if (isStatic) {
- JniAbortF(function_name_, "calling non-static method %s with %s",
- PrettyMethod(m).c_str(), function_name_);
+ bool is_static = (invoke == kStatic);
+ if (is_static != m->IsStatic()) {
+ if (is_static) {
+ AbortF("calling non-static method %s with %s",
+ PrettyMethod(m).c_str(), function_name_);
} else {
- JniAbortF(function_name_, "calling static method %s with %s",
- PrettyMethod(m).c_str(), function_name_);
+ AbortF("calling static method %s with %s",
+ PrettyMethod(m).c_str(), function_name_);
}
+ return false;
}
+ if (invoke != kVirtual) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
+ AbortF("can't call %s %s with class %s", invoke == kStatic ? "static" : "nonvirtual",
+ PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ return false;
+ }
+ }
+ if (invoke != kStatic) {
+ mirror::Object* o = soa.Decode<mirror::Object*>(jobj);
+ if (!o->InstanceOf(m->GetDeclaringClass())) {
+ AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ return false;
+ }
+ }
+ return true;
}
/*
@@ -304,17 +241,18 @@ class ScopedCheck {
*
* Assumes "java_class" has already been validated.
*/
- void CheckStaticFieldID(jclass java_class, jfieldID fid)
+ bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
- mirror::ArtField* f = CheckFieldID(fid);
+ mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
+ mirror::ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
- return;
+ return false;
}
if (f->GetDeclaringClass() != c) {
- JniAbortF(function_name_, "static jfieldID %p not valid for class %s",
- fid, PrettyClass(c).c_str());
+ AbortF("static jfieldID %p not valid for class %s", fid, PrettyClass(c).c_str());
+ return false;
}
+ return true;
}
/*
@@ -326,17 +264,18 @@ class ScopedCheck {
*
* Instances of "java_class" must be instances of the method's declaring class.
*/
- void CheckStaticMethod(jclass java_class, jmethodID mid)
+ bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
+ mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
- JniAbortF(function_name_, "can't call static %s on class %s",
- PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ AbortF("can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+ return false;
}
+ return true;
}
/*
@@ -346,17 +285,18 @@ class ScopedCheck {
* (Note the mid might point to a declaration in an interface; this
* will be handled automatically by the instanceof check.)
*/
- void CheckVirtualMethod(jobject java_object, jmethodID mid)
+ bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = CheckMethodID(mid);
+ mirror::ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
- return;
+ return false;
}
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
+ mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
if (!o->InstanceOf(m->GetDeclaringClass())) {
- JniAbortF(function_name_, "can't call %s on instance of %s",
- PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+ return false;
}
+ return true;
}
/**
@@ -395,11 +335,10 @@ class ScopedCheck {
*
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
- void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- va_list ap;
-
+ bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* traceMethod = nullptr;
- if (has_method_ && (!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni))) {
+ if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
// use DetachCurrentThread or GetEnv on a thread that's not yet attached.
Thread* self = Thread::Current();
@@ -409,129 +348,70 @@ class ScopedCheck {
}
if (((flags_ & kFlag_ForceTrace) != 0) ||
- (traceMethod != nullptr && ShouldTrace(soa_.Vm(), traceMethod))) {
- va_start(ap, fmt0);
+ (traceMethod != nullptr && soa.Vm()->ShouldTrace(traceMethod))) {
std::string msg;
- for (const char* fmt = fmt0; *fmt;) {
- char ch = *fmt++;
- if (ch == 'B') { // jbyte
- jbyte b = va_arg(ap, int);
- if (b >= 0 && b < 10) {
- StringAppendF(&msg, "%d", b);
- } else {
- StringAppendF(&msg, "%#x (%d)", b, b);
- }
- } else if (ch == 'C') { // jchar
- jchar c = va_arg(ap, int);
- if (c < 0x7f && c >= ' ') {
- StringAppendF(&msg, "U+%x ('%c')", c, c);
- } else {
- StringAppendF(&msg, "U+%x", c);
- }
- } else if (ch == 'F' || ch == 'D') { // jfloat, jdouble
- StringAppendF(&msg, "%g", va_arg(ap, double));
- } else if (ch == 'I' || ch == 'S') { // jint, jshort
- StringAppendF(&msg, "%d", va_arg(ap, int));
- } else if (ch == 'J') { // jlong
- StringAppendF(&msg, "%" PRId64, va_arg(ap, jlong));
- } else if (ch == 'Z') { // jboolean
- StringAppendF(&msg, "%s", va_arg(ap, int) ? "true" : "false");
- } else if (ch == 'V') { // void
- msg += "void";
- } else if (ch == 'v') { // JavaVM*
- JavaVM* vm = va_arg(ap, JavaVM*);
- StringAppendF(&msg, "(JavaVM*)%p", vm);
- } else if (ch == 'E') { // JNIEnv*
- JNIEnv* env = va_arg(ap, JNIEnv*);
- StringAppendF(&msg, "(JNIEnv*)%p", env);
- } else if (ch == 'L' || ch == 'a' || ch == 's') { // jobject, jarray, jstring
- // For logging purposes, these are identical.
- jobject o = va_arg(ap, jobject);
- if (o == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "%p", o);
- }
- } else if (ch == 'b') { // jboolean (JNI-style)
- jboolean b = va_arg(ap, int);
- msg += (b ? "JNI_TRUE" : "JNI_FALSE");
- } else if (ch == 'c') { // jclass
- jclass jc = va_arg(ap, jclass);
- mirror::Class* c = reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(jc));
- if (c == nullptr) {
- msg += "NULL";
- } else if (c == kInvalidIndirectRefObject ||
- !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
- StringAppendF(&msg, "INVALID POINTER:%p", jc);
- } else if (!c->IsClass()) {
- msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
- } else {
- msg += PrettyClass(c);
- if (!entry) {
- StringAppendF(&msg, " (%p)", jc);
- }
- }
- } else if (ch == 'f') { // jfieldID
- jfieldID fid = va_arg(ap, jfieldID);
- mirror::ArtField* f = reinterpret_cast<mirror::ArtField*>(fid);
- msg += PrettyField(f);
- if (!entry) {
- StringAppendF(&msg, " (%p)", fid);
- }
- } else if (ch == 'z') { // non-negative jsize
- // You might expect jsize to be size_t, but it's not; it's the same as jint.
- // We only treat this specially so we can do the non-negative check.
- // TODO: maybe this wasn't worth it?
- jint i = va_arg(ap, jint);
- StringAppendF(&msg, "%d", i);
- } else if (ch == 'm') { // jmethodID
- jmethodID mid = va_arg(ap, jmethodID);
- mirror::ArtMethod* m = reinterpret_cast<mirror::ArtMethod*>(mid);
- msg += PrettyMethod(m);
- if (!entry) {
- StringAppendF(&msg, " (%p)", mid);
- }
- } else if (ch == 'p') { // void* ("pointer")
- void* p = va_arg(ap, void*);
- if (p == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "(void*) %p", p);
- }
- } else if (ch == 'r') { // jint (release mode)
- jint releaseMode = va_arg(ap, jint);
- if (releaseMode == 0) {
- msg += "0";
- } else if (releaseMode == JNI_ABORT) {
- msg += "JNI_ABORT";
- } else if (releaseMode == JNI_COMMIT) {
- msg += "JNI_COMMIT";
- } else {
- StringAppendF(&msg, "invalid release mode %d", releaseMode);
- }
- } else if (ch == 'u') { // const char* (Modified UTF-8)
- const char* utf = va_arg(ap, const char*);
- if (utf == nullptr) {
- msg += "NULL";
- } else {
- StringAppendF(&msg, "\"%s\"", utf);
- }
- } else if (ch == '.') {
- msg += "...";
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ TracePossibleHeapValue(soa, entry, fmt[i], args[i], &msg);
+ if (fmt[i + 1] != '\0') {
+ StringAppendF(&msg, ", ");
+ }
+ }
+
+ if ((flags_ & kFlag_ForceTrace) != 0) {
+ LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
+ } else if (entry) {
+ if (has_method_) {
+ std::string methodName(PrettyMethod(traceMethod, false));
+ LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
+ indent_ = methodName.size() + 1;
} else {
- JniAbortF(function_name_, "unknown trace format specifier: %c", ch);
- return;
+ LOG(INFO) << "JNI: -> " << function_name_ << "(" << msg << ")";
+ indent_ = 0;
+ }
+ } else {
+ LOG(INFO) << StringPrintf("JNI: %*s<- %s returned %s", indent_, "", function_name_, msg.c_str());
+ }
+ }
+
+ // We always do the thorough checks on entry, and never on exit...
+ if (entry) {
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ if (!CheckPossibleHeapValue(soa, fmt[i], args[i])) {
+ return false;
}
- if (*fmt) {
+ }
+ }
+ return true;
+ }
+
+ bool CheckNonHeap(JavaVMExt* vm, bool entry, const char* fmt, JniValueType* args) {
+ bool should_trace = (flags_ & kFlag_ForceTrace) != 0;
+ if (!should_trace && vm->IsTracingEnabled()) {
+ // We need to guard some of the invocation interface's calls: a bad caller might
+ // use DetachCurrentThread or GetEnv on a thread that's not yet attached.
+ Thread* self = Thread::Current();
+ if ((flags_ & kFlag_Invocation) == 0 || self != nullptr) {
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+ should_trace = (traceMethod != nullptr && vm->ShouldTrace(traceMethod));
+ }
+ }
+ if (should_trace) {
+ std::string msg;
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ TraceNonHeapValue(fmt[i], args[i], &msg);
+ if (fmt[i + 1] != '\0') {
StringAppendF(&msg, ", ");
}
}
- va_end(ap);
if ((flags_ & kFlag_ForceTrace) != 0) {
LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
} else if (entry) {
if (has_method_) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
std::string methodName(PrettyMethod(traceMethod, false));
LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
indent_ = methodName.size() + 1;
@@ -546,43 +426,176 @@ class ScopedCheck {
// We always do the thorough checks on entry, and never on exit...
if (entry) {
- va_start(ap, fmt0);
- for (const char* fmt = fmt0; *fmt; ++fmt) {
- char ch = *fmt;
- if (ch == 'a') {
- CheckArray(va_arg(ap, jarray));
- } else if (ch == 'c') {
- CheckInstance(kClass, va_arg(ap, jclass));
- } else if (ch == 'L') {
- CheckObject(va_arg(ap, jobject));
- } else if (ch == 'r') {
- CheckReleaseMode(va_arg(ap, jint));
- } else if (ch == 's') {
- CheckInstance(kString, va_arg(ap, jstring));
- } else if (ch == 'u') {
- if ((flags_ & kFlag_Release) != 0) {
- CheckNonNull(va_arg(ap, const char*));
- } else {
- bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
- CheckUtfString(va_arg(ap, const char*), nullable);
- }
- } else if (ch == 'z') {
- CheckLengthPositive(va_arg(ap, jsize));
- } else if (strchr("BCISZbfmpEv", ch) != nullptr) {
- va_arg(ap, uint32_t); // Skip this argument.
- } else if (ch == 'D' || ch == 'F') {
- va_arg(ap, double); // Skip this argument.
- } else if (ch == 'J') {
- va_arg(ap, uint64_t); // Skip this argument.
- } else if (ch == '.') {
- } else {
- LOG(FATAL) << "Unknown check format specifier: " << ch;
+ for (size_t i = 0; fmt[i] != '\0'; ++i) {
+ if (!CheckNonHeapValue(fmt[i], args[i])) {
+ return false;
}
}
- va_end(ap);
}
+ return true;
}
+ bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
+ if (method == nullptr) {
+ AbortF("expected non-null method");
+ return false;
+ }
+ mirror::Class* c = method->GetClass();
+ if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method) != c &&
+ soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor) != c) {
+ AbortF("expected java.lang.reflect.Method or "
+ "java.lang.reflect.Constructor but got object of type %s: %p",
+ PrettyTypeOf(method).c_str(), jmethod);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = soa.DecodeMethod(mid);
+ if (method == nullptr) {
+ AbortF("expected non-null constructor");
+ return false;
+ }
+ if (!method->IsConstructor() || method->IsStatic()) {
+ AbortF("expected a constructor but %s: %p", PrettyTypeOf(method).c_str(), mid);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
+ if (field == nullptr) {
+ AbortF("expected non-null java.lang.reflect.Field");
+ return false;
+ }
+ mirror::Class* c = field->GetClass();
+ if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Field) != c) {
+ AbortF("expected java.lang.reflect.Field but got object of type %s: %p",
+ PrettyTypeOf(field).c_str(), jfield);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
+ if (!obj->GetClass()->IsThrowableClass()) {
+ AbortF("expected java.lang.Throwable but got object of type "
+ "%s: %p", PrettyTypeOf(obj).c_str(), obj);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!c->IsThrowableClass()) {
+ AbortF("expected java.lang.Throwable class but got object of "
+ "type %s: %p", PrettyDescriptor(c).c_str(), c);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ IndirectRefKind found_kind;
+ if (expected_kind == kLocal) {
+ found_kind = GetIndirectRefKind(obj);
+ if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
+ found_kind = kLocal;
+ }
+ } else {
+ found_kind = GetIndirectRefKind(obj);
+ }
+ if (obj != nullptr && found_kind != expected_kind) {
+ AbortF("expected reference of kind %s but found %s: %p",
+ ToStr<IndirectRefKind>(expected_kind).c_str(),
+ ToStr<IndirectRefKind>(GetIndirectRefKind(obj)).c_str(),
+ obj);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (!c->IsInstantiableNonArray()) {
+ AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (!CheckArray(soa, array)) {
+ return false;
+ }
+ mirror::Array* a = soa.Decode<mirror::Array*>(array);
+ if (a->GetClass()->GetComponentType()->GetPrimitiveType() != type) {
+ AbortF("incompatible array type %s expected %s[]: %p",
+ PrettyDescriptor(a->GetClass()).c_str(), PrettyDescriptor(type).c_str(), array);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
+ Primitive::Type type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
+ return false;
+ }
+ if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
+ return false;
+ }
+ mirror::ArtField* field = soa.DecodeField(fid);
+ DCHECK(field != nullptr); // Already checked by Check.
+ if (is_static != field->IsStatic()) {
+ AbortF("attempt to access %s field %s: %p",
+ field->IsStatic() ? "static" : "non-static", PrettyField(field).c_str(), fid);
+ return false;
+ }
+ if (type != field->GetTypeAsPrimitiveType()) {
+ AbortF("attempt to access field %s of type %s with the wrong type %s: %p",
+ PrettyField(field).c_str(), PrettyDescriptor(field->GetTypeDescriptor()).c_str(),
+ PrettyDescriptor(type).c_str(), fid);
+ return false;
+ }
+ if (is_static) {
+ mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+ if (o == nullptr || !o->IsClass()) {
+ AbortF("attempt to access static field %s with a class argument of type %s: %p",
+ PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+ return false;
+ }
+ mirror::Class* c = o->AsClass();
+ if (field->GetDeclaringClass() != c) {
+ AbortF("attempt to access static field %s with an incompatible class argument of %s: %p",
+ PrettyField(field).c_str(), PrettyDescriptor(c).c_str(), fid);
+ return false;
+ }
+ } else {
+ mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+ if (o == nullptr || !field->GetDeclaringClass()->IsAssignableFrom(o->GetClass())) {
+ AbortF("attempt to access field %s from an object argument of type %s: %p",
+ PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private:
enum InstanceKind {
kClass,
kDirectByteBuffer,
@@ -598,7 +611,7 @@ class ScopedCheck {
* Because we're looking at an object on the GC heap, we have to switch
* to "running" mode before doing the checks.
*/
- bool CheckInstance(InstanceKind kind, jobject java_object)
+ bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const char* what = nullptr;
switch (kind) {
@@ -622,15 +635,20 @@ class ScopedCheck {
}
if (java_object == nullptr) {
- JniAbortF(function_name_, "%s received null %s", function_name_, what);
- return false;
+ if (null_ok) {
+ return true;
+ } else {
+ AbortF("%s received NULL %s", function_name_, what);
+ return false;
+ }
}
- mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
+ mirror::Object* obj = soa.Decode<mirror::Object*>(java_object);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
- what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("%s is an invalid %s: %p (%p)",
+ what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+ java_object, obj);
return false;
}
@@ -652,114 +670,333 @@ class ScopedCheck {
break;
}
if (!okay) {
- JniAbortF(function_name_, "%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
+ AbortF("%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
return false;
}
return true;
}
- private:
- // Set "has_method" to true if we have a valid thread with a method pointer.
- // We won't have one before attaching a thread, after detaching a thread, or
- // when shutting down the runtime.
- void Init(int flags, const char* functionName, bool has_method) {
- flags_ = flags;
- function_name_ = functionName;
- has_method_ = has_method;
+ /*
+ * Verify that the "mode" argument passed to a primitive array Release
+ * function is one of the valid values.
+ */
+ bool CheckReleaseMode(jint mode) {
+ if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
+ AbortF("unknown value for release mode: %d", mode);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (fmt) {
+ case 'a': // jarray
+ return CheckArray(soa, arg.a);
+ case 'c': // jclass
+ return CheckInstance(soa, kClass, arg.c, false);
+ case 'f': // jfieldID
+ return CheckFieldID(soa, arg.f) != nullptr;
+ case 'm': // jmethodID
+ return CheckMethodID(soa, arg.m) != nullptr;
+ case 'r': // release int
+ return CheckReleaseMode(arg.r);
+ case 's': // jstring
+ return CheckInstance(soa, kString, arg.s, false);
+ case 't': // jthrowable
+ return CheckInstance(soa, kThrowable, arg.t, false);
+ case 'E': // JNIEnv*
+ return CheckThread(arg.E);
+ case 'L': // jobject
+ return CheckInstance(soa, kObject, arg.L, true);
+ default:
+ return CheckNonHeapValue(fmt, arg);
+ }
+ }
+
+ bool CheckNonHeapValue(char fmt, JniValueType arg) {
+ switch (fmt) {
+ case '.': // ...
+ case 'p': // TODO: pointer - null or readable?
+ case 'v': // JavaVM*
+ case 'B': // jbyte
+ case 'C': // jchar
+ case 'D': // jdouble
+ case 'F': // jfloat
+ case 'I': // jint
+ case 'J': // jlong
+ case 'S': // jshort
+ break; // Ignored.
+ case 'b': // jboolean, why two? Fall-through.
+ case 'Z':
+ return CheckBoolean(arg.Z);
+ case 'u': // utf8
+ if ((flags_ & kFlag_Release) != 0) {
+ return CheckNonNull(arg.u);
+ } else {
+ bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
+ return CheckUtfString(arg.u, nullable);
+ }
+ case 'w': // jobjectRefType
+ switch (arg.w) {
+ case JNIInvalidRefType:
+ case JNILocalRefType:
+ case JNIGlobalRefType:
+ case JNIWeakGlobalRefType:
+ break;
+ default:
+ AbortF("Unknown reference type");
+ return false;
+ }
+ break;
+ case 'z': // jsize
+ return CheckLengthPositive(arg.z);
+ default:
+ AbortF("unknown format specifier: '%c'", fmt);
+ return false;
+ }
+ return true;
}
+ void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
+ std::string* msg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ switch (fmt) {
+ case 'L': // jobject fall-through.
+ case 'a': // jarray fall-through.
+ case 's': // jstring fall-through.
+ case 't': // jthrowable fall-through.
+ if (arg.L == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "%p", arg.L);
+ }
+ break;
+ case 'c': { // jclass
+ jclass jc = arg.c;
+ mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+ if (c == nullptr) {
+ *msg += "NULL";
+ } else if (c == kInvalidIndirectRefObject ||
+ !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+ StringAppendF(msg, "INVALID POINTER:%p", jc);
+ } else if (!c->IsClass()) {
+ *msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
+ } else {
+ *msg += PrettyClass(c);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", jc);
+ }
+ }
+ break;
+ }
+ case 'f': { // jfieldID
+ jfieldID fid = arg.f;
+ mirror::ArtField* f = soa.DecodeField(fid);
+ *msg += PrettyField(f);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", fid);
+ }
+ break;
+ }
+ case 'm': { // jmethodID
+ jmethodID mid = arg.m;
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ *msg += PrettyMethod(m);
+ if (!entry) {
+ StringAppendF(msg, " (%p)", mid);
+ }
+ break;
+ }
+ default:
+ TraceNonHeapValue(fmt, arg, msg);
+ break;
+ }
+ }
+
+ void TraceNonHeapValue(char fmt, JniValueType arg, std::string* msg) {
+ switch (fmt) {
+ case 'B': // jbyte
+ if (arg.B >= 0 && arg.B < 10) {
+ StringAppendF(msg, "%d", arg.B);
+ } else {
+ StringAppendF(msg, "%#x (%d)", arg.B, arg.B);
+ }
+ break;
+ case 'C': // jchar
+ if (arg.C < 0x7f && arg.C >= ' ') {
+ StringAppendF(msg, "U+%x ('%c')", arg.C, arg.C);
+ } else {
+ StringAppendF(msg, "U+%x", arg.C);
+ }
+ break;
+ case 'F': // jfloat
+ StringAppendF(msg, "%g", arg.F);
+ break;
+ case 'D': // jdouble
+ StringAppendF(msg, "%g", arg.D);
+ break;
+ case 'S': // jshort
+ StringAppendF(msg, "%d", arg.S);
+ break;
+ case 'i': // jint - fall-through.
+ case 'I': // jint
+ StringAppendF(msg, "%d", arg.I);
+ break;
+ case 'J': // jlong
+ StringAppendF(msg, "%" PRId64, arg.J);
+ break;
+ case 'Z': // jboolean
+ case 'b': // jboolean (JNI-style)
+ *msg += arg.b == JNI_TRUE ? "true" : "false";
+ break;
+ case 'V': // void
+ DCHECK(arg.V == nullptr);
+ *msg += "void";
+ break;
+ case 'v': // JavaVM*
+ StringAppendF(msg, "(JavaVM*)%p", arg.v);
+ break;
+ case 'E':
+ StringAppendF(msg, "(JNIEnv*)%p", arg.E);
+ break;
+ case 'z': // non-negative jsize
+ // You might expect jsize to be size_t, but it's not; it's the same as jint.
+ // We only treat this specially so we can do the non-negative check.
+ // TODO: maybe this wasn't worth it?
+ StringAppendF(msg, "%d", arg.z);
+ break;
+ case 'p': // void* ("pointer")
+ if (arg.p == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "(void*) %p", arg.p);
+ }
+ break;
+ case 'r': { // jint (release mode)
+ jint releaseMode = arg.r;
+ if (releaseMode == 0) {
+ *msg += "0";
+ } else if (releaseMode == JNI_ABORT) {
+ *msg += "JNI_ABORT";
+ } else if (releaseMode == JNI_COMMIT) {
+ *msg += "JNI_COMMIT";
+ } else {
+ StringAppendF(msg, "invalid release mode %d", releaseMode);
+ }
+ break;
+ }
+ case 'u': // const char* (Modified UTF-8)
+ if (arg.u == nullptr) {
+ *msg += "NULL";
+ } else {
+ StringAppendF(msg, "\"%s\"", arg.u);
+ }
+ break;
+ case 'w': // jobjectRefType
+ switch (arg.w) {
+ case JNIInvalidRefType:
+ *msg += "invalid reference type";
+ break;
+ case JNILocalRefType:
+ *msg += "local ref type";
+ break;
+ case JNIGlobalRefType:
+ *msg += "global ref type";
+ break;
+ case JNIWeakGlobalRefType:
+ *msg += "weak global ref type";
+ break;
+ default:
+ *msg += "unknown ref type";
+ break;
+ }
+ break;
+ case '.':
+ *msg += "...";
+ break;
+ default:
+ LOG(FATAL) << function_name_ << ": unknown trace format specifier: '" << fmt << "'";
+ }
+ }
/*
* Verify that "array" is non-NULL and points to an Array object.
*
* Since we're dealing with objects, switch to "running" mode.
*/
- void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (java_array == nullptr) {
- JniAbortF(function_name_, "jarray was NULL");
- return;
+ bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (UNLIKELY(java_array == nullptr)) {
+ AbortF("jarray was NULL");
+ return false;
}
- mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+ if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("jarray is an invalid %s: %p (%p)",
+ ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
+ java_array, a);
+ return false;
} else if (!a->IsArrayInstance()) {
- JniAbortF(function_name_, "jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
+ AbortF("jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
+ return false;
}
+ return true;
}
- void CheckLengthPositive(jsize length) {
+ bool CheckBoolean(jboolean z) {
+ if (z != JNI_TRUE && z != JNI_FALSE) {
+ AbortF("unexpected jboolean value: %d", z);
+ return false;
+ }
+ return true;
+ }
+
+ bool CheckLengthPositive(jsize length) {
if (length < 0) {
- JniAbortF(function_name_, "negative jsize: %d", length);
+ AbortF("negative jsize: %d", length);
+ return false;
}
+ return true;
}
- mirror::ArtField* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (fid == nullptr) {
- JniAbortF(function_name_, "jfieldID was NULL");
+ AbortF("jfieldID was NULL");
return nullptr;
}
- mirror::ArtField* f = soa_.DecodeField(fid);
+ mirror::ArtField* f = soa.DecodeField(fid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "invalid jfieldID: %p", fid);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("invalid jfieldID: %p", fid);
return nullptr;
}
return f;
}
- mirror::ArtMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (mid == nullptr) {
- JniAbortF(function_name_, "jmethodID was NULL");
+ AbortF("jmethodID was NULL");
return nullptr;
}
- mirror::ArtMethod* m = soa_.DecodeMethod(mid);
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- JniAbortF(function_name_, "invalid jmethodID: %p", mid);
+ Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+ AbortF("invalid jmethodID: %p", mid);
return nullptr;
}
return m;
}
- /*
- * Verify that "jobj" is a valid object, and that it's an object that JNI
- * is allowed to know about. We allow NULL references.
- *
- * Switches to "running" mode before performing checks.
- */
- void CheckObject(jobject java_object)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (java_object == nullptr) {
- return;
- }
-
- mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
- Runtime::Current()->GetHeap()->DumpSpaces();
- // TODO: when we remove work_around_app_jni_bugs, this should be impossible.
- JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
- }
- }
-
- /*
- * Verify that the "mode" argument passed to a primitive array Release
- * function is one of the valid values.
- */
- void CheckReleaseMode(jint mode) {
- if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
- JniAbortF(function_name_, "unknown value for release mode: %d", mode);
- }
- }
-
- void CheckThread(int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
if (self == nullptr) {
- JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid());
- return;
+ AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
+ return false;
}
// Get the *correct* JNIEnv by going through our TLS pointer.
@@ -767,21 +1004,22 @@ class ScopedCheck {
// Verify that the current thread is (a) attached and (b) associated with
// this particular instance of JNIEnv.
- if (soa_.Env() != threadEnv) {
- JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
- ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
- return;
+ if (env != threadEnv) {
+ AbortF("thread %s using JNIEnv* from thread %s",
+ ToStr<Thread>(*self).c_str(), ToStr<Thread>(*self).c_str());
+ return false;
}
// Verify that, if this thread previously made a critical "get" call, we
// do the corresponding "release" call before we try anything else.
- switch (flags & kFlag_CritMask) {
+ switch (flags_ & kFlag_CritMask) {
case kFlag_CritOkay: // okay to call this method
break;
case kFlag_CritBad: // not okay to call
if (threadEnv->critical) {
- JniAbortF(function_name_, "thread %s using JNI after critical get", ToStr<Thread>(*self).c_str());
- return;
+ AbortF("thread %s using JNI after critical get",
+ ToStr<Thread>(*self).c_str());
+ return false;
}
break;
case kFlag_CritGet: // this is a "get" call
@@ -791,44 +1029,46 @@ class ScopedCheck {
case kFlag_CritRelease: // this is a "release" call
threadEnv->critical--;
if (threadEnv->critical < 0) {
- JniAbortF(function_name_, "thread %s called too many critical releases", ToStr<Thread>(*self).c_str());
- return;
+ AbortF("thread %s called too many critical releases",
+ ToStr<Thread>(*self).c_str());
+ return false;
}
break;
default:
- LOG(FATAL) << "Bad flags (internal error): " << flags;
+ LOG(FATAL) << "Bad flags (internal error): " << flags_;
}
// Verify that, if an exception has been raised, the native code doesn't
// make any JNI calls other than the Exception* methods.
- if ((flags & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
+ if ((flags_ & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
std::string type(PrettyTypeOf(exception));
- JniAbortF(function_name_, "JNI %s called with pending exception '%s' thrown in %s",
- function_name_, type.c_str(), throw_location.Dump().c_str());
- return;
+ AbortF("JNI %s called with pending exception '%s' thrown in %s",
+ function_name_, type.c_str(), throw_location.Dump().c_str());
+ return false;
}
+ return true;
}
// Verifies that "bytes" points to valid Modified UTF-8 data.
- void CheckUtfString(const char* bytes, bool nullable) {
+ bool CheckUtfString(const char* bytes, bool nullable) {
if (bytes == nullptr) {
if (!nullable) {
- JniAbortF(function_name_, "non-nullable const char* was NULL");
- return;
+ AbortF("non-nullable const char* was NULL");
+ return false;
}
- return;
+ return true;
}
const char* errorKind = nullptr;
uint8_t utf8 = CheckUtfBytes(bytes, &errorKind);
if (errorKind != nullptr) {
- JniAbortF(function_name_,
- "input is not valid Modified UTF-8: illegal %s byte %#x\n"
- " string: '%s'", errorKind, utf8, bytes);
- return;
+ AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n"
+ " string: '%s'", errorKind, utf8, bytes);
+ return false;
}
+ return true;
}
static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) {
@@ -880,92 +1120,120 @@ class ScopedCheck {
return 0;
}
- const ScopedObjectAccess soa_;
- const char* function_name_;
- int flags_;
- bool has_method_;
+ void AbortF(const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) {
+ va_list args;
+ va_start(args, fmt);
+ Runtime::Current()->GetJavaVM()->JniAbortV(function_name_, fmt, args);
+ va_end(args);
+ }
+
+ // The name of the JNI function being checked.
+ const char* const function_name_;
+
+ const int flags_;
int indent_;
+ const bool has_method_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
};
-#define CHECK_JNI_ENTRY(flags, types, args...) \
- ScopedCheck sc(env, flags, __FUNCTION__); \
- sc.Check(true, types, ##args)
-
-#define CHECK_JNI_EXIT(type, exp) ({ \
- auto _rc = (exp); \
- sc.Check(false, type, _rc); \
- _rc; })
-#define CHECK_JNI_EXIT_VOID() \
- sc.Check(false, "V")
-
/*
* ===========================================================================
* Guarded arrays
* ===========================================================================
*/
-#define kGuardLen 512 /* must be multiple of 2 */
-#define kGuardPattern 0xd5e3 /* uncommon values; d5e3d5e3 invalid addr */
-#define kGuardMagic 0xffd5aa96
-
/* this gets tucked in at the start of the buffer; struct size must be even */
-struct GuardedCopy {
- uint32_t magic;
- uLong adler;
- size_t original_length;
- const void* original_ptr;
-
- /* find the GuardedCopy given the pointer into the "live" data */
- static inline const GuardedCopy* FromData(const void* dataBuf) {
- return reinterpret_cast<const GuardedCopy*>(ActualBuffer(dataBuf));
- }
-
+class GuardedCopy {
+ public:
/*
* Create an over-sized buffer to hold the contents of "buf". Copy it in,
* filling in the area around it with guard data.
- *
- * We use a 16-bit pattern to make a rogue memset less likely to elude us.
*/
- static void* Create(const void* buf, size_t len, bool modOkay) {
- size_t newLen = ActualLength(len);
- uint8_t* newBuf = DebugAlloc(newLen);
+ static void* Create(const void* original_buf, size_t len, bool mod_okay) {
+ const size_t new_len = LengthIncludingRedZones(len);
+ uint8_t* const new_buf = DebugAlloc(new_len);
- // Fill it in with a pattern.
- uint16_t* pat = reinterpret_cast<uint16_t*>(newBuf);
- for (size_t i = 0; i < newLen / 2; i++) {
- *pat++ = kGuardPattern;
+ // If modification is not expected, grab a checksum.
+ uLong adler = 0;
+ if (!mod_okay) {
+ adler = adler32(adler32(0L, Z_NULL, 0), reinterpret_cast<const Bytef*>(original_buf), len);
+ }
+
+ GuardedCopy* copy = new (new_buf) GuardedCopy(original_buf, len, adler);
+
+ // Fill begin region with canary pattern.
+ const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+ for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+ const_cast<char*>(copy->StartRedZone())[i] = kCanary[j];
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
}
// Copy the data in; note "len" could be zero.
- memcpy(newBuf + kGuardLen / 2, buf, len);
+ memcpy(const_cast<uint8_t*>(copy->BufferWithinRedZones()), original_buf, len);
- // If modification is not expected, grab a checksum.
- uLong adler = 0;
- if (!modOkay) {
- adler = adler32(0L, Z_NULL, 0);
- adler = adler32(adler, reinterpret_cast<const Bytef*>(buf), len);
- *reinterpret_cast<uLong*>(newBuf) = adler;
+ // Fill end region with canary pattern.
+ for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+ const_cast<char*>(copy->EndRedZone())[i] = kCanary[j];
+ if (kCanary[j] == '\0') {
+ j = 0;
+ }
}
- GuardedCopy* pExtra = reinterpret_cast<GuardedCopy*>(newBuf);
- pExtra->magic = kGuardMagic;
- pExtra->adler = adler;
- pExtra->original_ptr = buf;
- pExtra->original_length = len;
+ return const_cast<uint8_t*>(copy->BufferWithinRedZones());
+ }
+
+ /*
+ * Create a guarded copy of a primitive array. Modifications to the copied
+ * data are allowed. Returns a pointer to the copied data.
+ */
+ static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+ size_t component_size = a->GetClass()->GetComponentSize();
+ size_t byte_count = a->GetLength() * component_size;
+ void* result = Create(a->GetRawData(component_size, 0), byte_count, true);
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ return result;
+ }
+
+ /*
+ * Perform the array "release" operation, which may or may not copy data
+ * back into the managed heap, and may or may not release the underlying storage.
+ */
+ static void* ReleaseGuardedPACopy(const char* function_name, JNIEnv* env, jarray java_array,
+ void* embedded_buf, int mode) {
+ ScopedObjectAccess soa(env);
+ mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
- return newBuf + kGuardLen / 2;
+ if (!GuardedCopy::Check(function_name, embedded_buf, true)) {
+ return nullptr;
+ }
+ if (mode != JNI_ABORT) {
+ size_t len = FromEmbedded(embedded_buf)->original_length_;
+ memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), embedded_buf, len);
+ }
+ if (mode != JNI_COMMIT) {
+ return Destroy(embedded_buf);
+ }
+ return embedded_buf;
}
+
/*
* Free up the guard buffer, scrub it, and return the original pointer.
*/
- static void* Destroy(void* dataBuf) {
- const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
- void* original_ptr = const_cast<void*>(pExtra->original_ptr);
- size_t len = pExtra->original_length;
- DebugFree(dataBuf, len);
+ static void* Destroy(void* embedded_buf) {
+ GuardedCopy* copy = FromEmbedded(embedded_buf);
+ void* original_ptr = const_cast<void*>(copy->original_ptr_);
+ size_t len = LengthIncludingRedZones(copy->original_length_);
+ DebugFree(copy, len);
return original_ptr;
}
@@ -975,137 +1243,144 @@ struct GuardedCopy {
*
* The caller has already checked that "dataBuf" is non-NULL.
*/
- static void Check(const char* functionName, const void* dataBuf, bool modOkay) {
+ static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
+ const GuardedCopy* copy = FromEmbedded(embedded_buf);
+ return copy->CheckHeader(function_name, mod_okay) && copy->CheckRedZones(function_name);
+ }
+
+ private:
+ GuardedCopy(const void* original_buf, size_t len, uLong adler) :
+ magic_(kGuardMagic), adler_(adler), original_ptr_(original_buf), original_length_(len) {
+ }
+
+ static uint8_t* DebugAlloc(size_t len) {
+ void* result = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (result == MAP_FAILED) {
+ PLOG(FATAL) << "GuardedCopy::create mmap(" << len << ") failed";
+ }
+ return reinterpret_cast<uint8_t*>(result);
+ }
+
+ static void DebugFree(void* buf, size_t len) {
+ if (munmap(buf, len) != 0) {
+ PLOG(FATAL) << "munmap(" << buf << ", " << len << ") failed";
+ }
+ }
+
+ static size_t LengthIncludingRedZones(size_t len) {
+ return len + kRedZoneSize;
+ }
+
+ // Get the GuardedCopy from the interior pointer.
+ static GuardedCopy* FromEmbedded(void* embedded_buf) {
+ return reinterpret_cast<GuardedCopy*>(
+ reinterpret_cast<uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
+ }
+
+ static const GuardedCopy* FromEmbedded(const void* embedded_buf) {
+ return reinterpret_cast<const GuardedCopy*>(
+ reinterpret_cast<const uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
+ }
+
+ static void AbortF(const char* jni_function_name, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ Runtime::Current()->GetJavaVM()->JniAbortV(jni_function_name, fmt, args);
+ va_end(args);
+ }
+
+ bool CheckHeader(const char* function_name, bool mod_okay) const {
static const uint32_t kMagicCmp = kGuardMagic;
- const uint8_t* fullBuf = ActualBuffer(dataBuf);
- const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
// Before we do anything with "pExtra", check the magic number. We
// do the check with memcmp rather than "==" in case the pointer is
// unaligned. If it points to completely bogus memory we're going
// to crash, but there's no easy way around that.
- if (memcmp(&pExtra->magic, &kMagicCmp, 4) != 0) {
+ if (UNLIKELY(memcmp(&magic_, &kMagicCmp, 4) != 0)) {
uint8_t buf[4];
- memcpy(buf, &pExtra->magic, 4);
- JniAbortF(functionName,
- "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
- buf[3], buf[2], buf[1], buf[0], dataBuf); // Assumes little-endian.
+ memcpy(buf, &magic_, 4);
+ AbortF(function_name,
+ "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
+ buf[3], buf[2], buf[1], buf[0], this); // Assumes little-endian.
+ return false;
}
- size_t len = pExtra->original_length;
-
- // Check bottom half of guard; skip over optional checksum storage.
- const uint16_t* pat = reinterpret_cast<const uint16_t*>(fullBuf);
- for (size_t i = sizeof(GuardedCopy) / 2; i < (kGuardLen / 2 - sizeof(GuardedCopy)) / 2; i++) {
- if (pat[i] != kGuardPattern) {
- JniAbortF(functionName, "guard pattern(1) disturbed at %p +%zd", fullBuf, i*2);
+ // If modification is not expected, verify checksum. Strictly speaking this is wrong: if we
+ // told the client that we made a copy, there's no reason they can't alter the buffer.
+ if (!mod_okay) {
+ uLong computed_adler =
+ adler32(adler32(0L, Z_NULL, 0), BufferWithinRedZones(), original_length_);
+ if (computed_adler != adler_) {
+ AbortF(function_name, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
+ computed_adler, adler_, this);
+ return false;
}
}
+ return true;
+ }
- int offset = kGuardLen / 2 + len;
- if (offset & 0x01) {
- // Odd byte; expected value depends on endian.
- const uint16_t patSample = kGuardPattern;
- uint8_t expected_byte = reinterpret_cast<const uint8_t*>(&patSample)[1];
- if (fullBuf[offset] != expected_byte) {
- JniAbortF(functionName, "guard pattern disturbed in odd byte after %p +%d 0x%02x 0x%02x",
- fullBuf, offset, fullBuf[offset], expected_byte);
+ bool CheckRedZones(const char* function_name) const {
+ // Check the begin red zone.
+ const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+ for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+ if (UNLIKELY(StartRedZone()[i] != kCanary[j])) {
+ AbortF(function_name, "guard pattern before buffer disturbed at %p +%zd", this, i);
+ return false;
}
- offset++;
- }
-
- // Check top half of guard.
- pat = reinterpret_cast<const uint16_t*>(fullBuf + offset);
- for (size_t i = 0; i < kGuardLen / 4; i++) {
- if (pat[i] != kGuardPattern) {
- JniAbortF(functionName, "guard pattern(2) disturbed at %p +%zd", fullBuf, offset + i*2);
+ if (kCanary[j] == '\0') {
+ j = 0;
}
}
- // If modification is not expected, verify checksum. Strictly speaking
- // this is wrong: if we told the client that we made a copy, there's no
- // reason they can't alter the buffer.
- if (!modOkay) {
- uLong adler = adler32(0L, Z_NULL, 0);
- adler = adler32(adler, (const Bytef*)dataBuf, len);
- if (pExtra->adler != adler) {
- JniAbortF(functionName, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
- pExtra->adler, adler, dataBuf);
+ // Check end region.
+ for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+ if (UNLIKELY(EndRedZone()[i] != kCanary[j])) {
+ size_t offset_from_buffer_start =
+ &(EndRedZone()[i]) - &(StartRedZone()[kStartCanaryLength]);
+ AbortF(function_name, "guard pattern after buffer disturbed at %p +%zd", this,
+ offset_from_buffer_start);
+ return false;
+ }
+ if (kCanary[j] == '\0') {
+ j = 0;
}
}
+ return true;
}
- private:
- static uint8_t* DebugAlloc(size_t len) {
- void* result = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
- if (result == MAP_FAILED) {
- PLOG(FATAL) << "GuardedCopy::create mmap(" << len << ") failed";
- }
- return reinterpret_cast<uint8_t*>(result);
- }
-
- static void DebugFree(void* dataBuf, size_t len) {
- uint8_t* fullBuf = ActualBuffer(dataBuf);
- size_t totalByteCount = ActualLength(len);
- // TODO: we could mprotect instead, and keep the allocation around for a while.
- // This would be even more expensive, but it might catch more errors.
- // if (mprotect(fullBuf, totalByteCount, PROT_NONE) != 0) {
- // PLOG(WARNING) << "mprotect(PROT_NONE) failed";
- // }
- if (munmap(fullBuf, totalByteCount) != 0) {
- PLOG(FATAL) << "munmap(" << reinterpret_cast<void*>(fullBuf) << ", " << totalByteCount << ") failed";
- }
+ // Location that canary value will be written before the guarded region.
+ const char* StartRedZone() const {
+ const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+ return reinterpret_cast<const char*>(buf + sizeof(GuardedCopy));
}
- static const uint8_t* ActualBuffer(const void* dataBuf) {
- return reinterpret_cast<const uint8_t*>(dataBuf) - kGuardLen / 2;
+ // Return the interior embedded buffer.
+ const uint8_t* BufferWithinRedZones() const {
+ const uint8_t* embedded_buf = reinterpret_cast<const uint8_t*>(this) + (kRedZoneSize / 2);
+ return embedded_buf;
}
- static uint8_t* ActualBuffer(void* dataBuf) {
- return reinterpret_cast<uint8_t*>(dataBuf) - kGuardLen / 2;
+ // Location that canary value will be written after the guarded region.
+ const char* EndRedZone() const {
+ const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+ size_t buf_len = LengthIncludingRedZones(original_length_);
+ return reinterpret_cast<const char*>(buf + (buf_len - (kRedZoneSize / 2)));
}
- // Underlying length of a user allocation of 'length' bytes.
- static size_t ActualLength(size_t length) {
- return (length + kGuardLen + 1) & ~0x01;
- }
-};
+ static constexpr size_t kRedZoneSize = 512;
+ static constexpr size_t kEndCanaryLength = kRedZoneSize / 2;
-/*
- * Create a guarded copy of a primitive array. Modifications to the copied
- * data are allowed. Returns a pointer to the copied data.
- */
-static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) {
- ScopedObjectAccess soa(env);
-
- mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
- size_t component_size = a->GetClass()->GetComponentSize();
- size_t byte_count = a->GetLength() * component_size;
- void* result = GuardedCopy::Create(a->GetRawData(component_size, 0), byte_count, true);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- return result;
-}
+ // Value written before and after the guarded array.
+ static const char* const kCanary;
-/*
- * Perform the array "release" operation, which may or may not copy data
- * back into the managed heap, and may or may not release the underlying storage.
- */
-static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, int mode) {
- ScopedObjectAccess soa(env);
- mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+ static constexpr uint32_t kGuardMagic = 0xffd5aa96;
- GuardedCopy::Check(__FUNCTION__, dataBuf, true);
-
- if (mode != JNI_ABORT) {
- size_t len = GuardedCopy::FromData(dataBuf)->original_length;
- memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), dataBuf, len);
- }
- if (mode != JNI_COMMIT) {
- GuardedCopy::Destroy(dataBuf);
- }
-}
+ const uint32_t magic_;
+ const uLong adler_;
+ const void* const original_ptr_;
+ const size_t original_length_;
+};
+const char* const GuardedCopy::kCanary = "JNI BUFFER RED ZONE";
/*
* ===========================================================================
@@ -1116,667 +1391,1953 @@ static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf,
class CheckJNI {
public:
static jint GetVersion(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_Default, "E", env);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetVersion(env));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[1] = {{.E = env }};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.I = baseEnv(env)->GetVersion(env);
+ if (sc.Check(soa, false, "I", &result)) {
+ return result.I;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.p = vm}};
+ if (sc.Check(soa, true, "Ep", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->GetJavaVM(env, vm);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env }, {.c = c}, {.p = methods}, {.I = nMethods}};
+ if (sc.Check(soa, true, "EcpI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->RegisterNatives(env, c, methods, nMethods);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jint UnregisterNatives(JNIEnv* env, jclass c) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->UnregisterNatives(env, c);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
+ // Note: we use "Ep" rather than "EL" because this is the one JNI function that it's okay to
+ // pass an invalid reference to.
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env }, {.p = obj}};
+ if (sc.Check(soa, true, "Ep", args)) {
+ JniValueType result;
+ result.w = baseEnv(env)->GetObjectRefType(env, obj);
+ if (sc.Check(soa, false, "w", &result)) {
+ return result.w;
+ }
+ }
+ return JNIInvalidRefType;
}
- static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf, jsize bufLen) {
- CHECK_JNI_ENTRY(kFlag_Default, "EuLpz", env, name, loader, buf, bufLen);
- sc.CheckClassName(name);
- return CHECK_JNI_EXIT("c", baseEnv(env)->DefineClass(env, name, loader, buf, bufLen));
+ static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf,
+ jsize bufLen) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.u = name}, {.L = loader}, {.p = buf}, {.z = bufLen}};
+ if (sc.Check(soa, true, "EuLpz", args) && sc.CheckClassName(name)) {
+ JniValueType result;
+ result.c = baseEnv(env)->DefineClass(env, name, loader, buf, bufLen);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jclass FindClass(JNIEnv* env, const char* name) {
- CHECK_JNI_ENTRY(kFlag_Default, "Eu", env, name);
- sc.CheckClassName(name);
- return CHECK_JNI_EXIT("c", baseEnv(env)->FindClass(env, name));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = name}};
+ if (sc.Check(soa, true, "Eu", args) && sc.CheckClassName(name)) {
+ JniValueType result;
+ result.c = baseEnv(env)->FindClass(env, name);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jclass GetSuperclass(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("c", baseEnv(env)->GetSuperclass(env, c));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args)) {
+ JniValueType result;
+ result.c = baseEnv(env)->GetSuperclass(env, c);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jboolean IsAssignableFrom(JNIEnv* env, jclass c1, jclass c2) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecc", env, c1, c2);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsAssignableFrom(env, c1, c2));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c1}, {.c = c2}};
+ if (sc.Check(soa, true, "Ecc", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsAssignableFrom(env, c1, c2);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static jmethodID FromReflectedMethod(JNIEnv* env, jobject method) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, method);
- // TODO: check that 'field' is a java.lang.reflect.Method.
- return CHECK_JNI_EXIT("m", baseEnv(env)->FromReflectedMethod(env, method));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = method}};
+ if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedMethod(soa, method)) {
+ JniValueType result;
+ result.m = baseEnv(env)->FromReflectedMethod(env, method);
+ if (sc.Check(soa, false, "m", &result)) {
+ return result.m;
+ }
+ }
+ return nullptr;
}
static jfieldID FromReflectedField(JNIEnv* env, jobject field) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, field);
- // TODO: check that 'field' is a java.lang.reflect.Field.
- return CHECK_JNI_EXIT("f", baseEnv(env)->FromReflectedField(env, field));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = field}};
+ if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedField(soa, field)) {
+ JniValueType result;
+ result.f = baseEnv(env)->FromReflectedField(env, field);
+ if (sc.Check(soa, false, "f", &result)) {
+ return result.f;
+ }
+ }
+ return nullptr;
}
static jobject ToReflectedMethod(JNIEnv* env, jclass cls, jmethodID mid, jboolean isStatic) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecmb", env, cls, mid, isStatic);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.c = cls}, {.m = mid}, {.b = isStatic}};
+ if (sc.Check(soa, true, "Ecmb", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic);
+ if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+ DCHECK(sc.CheckReflectedMethod(soa, result.L));
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jobject ToReflectedField(JNIEnv* env, jclass cls, jfieldID fid, jboolean isStatic) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecfb", env, cls, fid, isStatic);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedField(env, cls, fid, isStatic));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.c = cls}, {.f = fid}, {.b = isStatic}};
+ if (sc.Check(soa, true, "Ecfb", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->ToReflectedField(env, cls, fid, isStatic);
+ if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+ DCHECK(sc.CheckReflectedField(soa, result.L));
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jint Throw(JNIEnv* env, jthrowable obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- // TODO: check that 'obj' is a java.lang.Throwable.
- return CHECK_JNI_EXIT("I", baseEnv(env)->Throw(env, obj));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.t = obj}};
+ if (sc.Check(soa, true, "Et", args) && sc.CheckThrowable(soa, obj)) {
+ JniValueType result;
+ result.i = baseEnv(env)->Throw(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jint ThrowNew(JNIEnv* env, jclass c, const char* message) {
- CHECK_JNI_ENTRY(kFlag_NullableUtf, "Ecu", env, c, message);
- return CHECK_JNI_EXIT("I", baseEnv(env)->ThrowNew(env, c, message));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.c = c}, {.u = message}};
+ if (sc.Check(soa, true, "Ecu", args) && sc.CheckThrowableClass(soa, c)) {
+ JniValueType result;
+ result.i = baseEnv(env)->ThrowNew(env, c, message);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- return CHECK_JNI_EXIT("L", baseEnv(env)->ExceptionOccurred(env));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.t = baseEnv(env)->ExceptionOccurred(env);
+ if (sc.Check(soa, false, "t", &result)) {
+ return result.t;
+ }
+ }
+ return nullptr;
}
static void ExceptionDescribe(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- baseEnv(env)->ExceptionDescribe(env);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ baseEnv(env)->ExceptionDescribe(env);
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
static void ExceptionClear(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
- baseEnv(env)->ExceptionClear(env);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ baseEnv(env)->ExceptionClear(env);
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static jboolean ExceptionCheck(JNIEnv* env) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay | kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[1] = {{.E = env}};
+ if (sc.Check(soa, true, "E", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->ExceptionCheck(env);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static void FatalError(JNIEnv* env, const char* msg) {
// The JNI specification doesn't say it's okay to call FatalError with a pending exception,
// but you're about to abort anyway, and it's quite likely that you have a pending exception,
// and it's not unimaginable that you don't know that you do. So we allow it.
- CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_NullableUtf, "Eu", env, msg);
- baseEnv(env)->FatalError(env, msg);
- CHECK_JNI_EXIT_VOID();
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay | kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = msg}};
+ if (sc.Check(soa, true, "Eu", args)) {
+ JniValueType result;
+ baseEnv(env)->FatalError(env, msg);
+ // Unreachable.
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
static jint PushLocalFrame(JNIEnv* env, jint capacity) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EI", env, capacity);
- return CHECK_JNI_EXIT("I", baseEnv(env)->PushLocalFrame(env, capacity));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.I = capacity}};
+ if (sc.Check(soa, true, "EI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->PushLocalFrame(env, capacity);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jobject PopLocalFrame(JNIEnv* env, jobject res) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, res);
- return CHECK_JNI_EXIT("L", baseEnv(env)->PopLocalFrame(env, res));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = res}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->PopLocalFrame(env, res);
+ sc.Check(soa, false, "L", &result);
+ return result.L;
+ }
+ return nullptr;
}
static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewGlobalRef(env, obj));
+ return NewRef(__FUNCTION__, env, obj, kGlobal);
}
- static jobject NewLocalRef(JNIEnv* env, jobject ref) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, ref);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewLocalRef(env, ref));
+ static jobject NewLocalRef(JNIEnv* env, jobject obj) {
+ return NewRef(__FUNCTION__, env, obj, kLocal);
}
- static void DeleteGlobalRef(JNIEnv* env, jobject globalRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, globalRef);
- if (globalRef != nullptr && GetIndirectRefKind(globalRef) != kGlobal) {
- JniAbortF(__FUNCTION__, "DeleteGlobalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(globalRef)).c_str(), globalRef);
- } else {
- baseEnv(env)->DeleteGlobalRef(env, globalRef);
- CHECK_JNI_EXIT_VOID();
- }
+ static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
+ return NewRef(__FUNCTION__, env, obj, kWeakGlobal);
}
- static void DeleteWeakGlobalRef(JNIEnv* env, jweak weakGlobalRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, weakGlobalRef);
- if (weakGlobalRef != nullptr && GetIndirectRefKind(weakGlobalRef) != kWeakGlobal) {
- JniAbortF(__FUNCTION__, "DeleteWeakGlobalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(weakGlobalRef)).c_str(), weakGlobalRef);
- } else {
- baseEnv(env)->DeleteWeakGlobalRef(env, weakGlobalRef);
- CHECK_JNI_EXIT_VOID();
- }
+ static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
+ DeleteRef(__FUNCTION__, env, obj, kGlobal);
}
- static void DeleteLocalRef(JNIEnv* env, jobject localRef) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, localRef);
- if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsHandleScopeLocalRef(env, localRef)) {
- JniAbortF(__FUNCTION__, "DeleteLocalRef on %s: %p",
- ToStr<IndirectRefKind>(GetIndirectRefKind(localRef)).c_str(), localRef);
- } else {
- baseEnv(env)->DeleteLocalRef(env, localRef);
- CHECK_JNI_EXIT_VOID();
- }
+ static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
+ DeleteRef(__FUNCTION__, env, obj, kWeakGlobal);
+ }
+
+ static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+ DeleteRef(__FUNCTION__, env, obj, kLocal);
}
static jint EnsureLocalCapacity(JNIEnv *env, jint capacity) {
- CHECK_JNI_ENTRY(kFlag_Default, "EI", env, capacity);
- return CHECK_JNI_EXIT("I", baseEnv(env)->EnsureLocalCapacity(env, capacity));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.I = capacity}};
+ if (sc.Check(soa, true, "EI", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->EnsureLocalCapacity(env, capacity);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
+ }
+ return JNI_ERR;
}
static jboolean IsSameObject(JNIEnv* env, jobject ref1, jobject ref2) {
- CHECK_JNI_ENTRY(kFlag_Default, "ELL", env, ref1, ref2);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsSameObject(env, ref1, ref2));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.L = ref1}, {.L = ref2}};
+ if (sc.Check(soa, true, "ELL", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsSameObject(env, ref1, ref2);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static jobject AllocObject(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("L", baseEnv(env)->AllocObject(env, c));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.c = c}};
+ if (sc.Check(soa, true, "Ec", args) && sc.CheckInstantiableNonArray(soa, c)) {
+ JniValueType result;
+ result.L = baseEnv(env)->AllocObject(env, c);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
+ }
+
+ static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ sc.CheckConstructor(soa, mid)) {
+ JniValueType result;
+ result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
va_list args;
va_start(args, mid);
- jobject result = baseEnv(env)->NewObjectV(env, c, mid, args);
+ jobject result = NewObjectV(env, c, mid, args);
va_end(args);
- return CHECK_JNI_EXIT("L", result);
- }
-
- static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list args) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectV(env, c, mid, args));
+ return result;
}
- static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectA(env, c, mid, args));
+ static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+ sc.CheckConstructor(soa, mid)) {
+ JniValueType result;
+ result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
}
static jclass GetObjectClass(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("c", baseEnv(env)->GetObjectClass(env, obj));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.c = baseEnv(env)->GetObjectClass(env, obj);
+ if (sc.Check(soa, false, "c", &result)) {
+ return result.c;
+ }
+ }
+ return nullptr;
}
static jboolean IsInstanceOf(JNIEnv* env, jobject obj, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "ELc", env, obj, c);
- return CHECK_JNI_EXIT("b", baseEnv(env)->IsInstanceOf(env, obj, c));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.c = c}};
+ if (sc.Check(soa, true, "ELc", args)) {
+ JniValueType result;
+ result.b = baseEnv(env)->IsInstanceOf(env, obj, c);
+ if (sc.Check(soa, false, "b", &result)) {
+ return result.b;
+ }
+ }
+ return JNI_FALSE;
}
static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("m", baseEnv(env)->GetMethodID(env, c, name, sig));
+ return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, false);
}
- static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("f", baseEnv(env)->GetFieldID(env, c, name, sig));
+ static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, true);
}
- static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("m", baseEnv(env)->GetStaticMethodID(env, c, name, sig));
+ static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+ return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, false);
}
static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
- return CHECK_JNI_EXIT("f", baseEnv(env)->GetStaticFieldID(env, c, name, sig));
- }
-
-#define FIELD_ACCESSORS(_ctype, _jname, _jvalue_type, _type) \
- static _ctype GetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecf", env, c, fid); \
- sc.CheckStaticFieldID(c, fid); \
- return CHECK_JNI_EXIT(_type, baseEnv(env)->GetStatic##_jname##Field(env, c, fid)); \
- } \
- static _ctype Get##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELf", env, obj, fid); \
- sc.CheckInstanceFieldID(obj, fid); \
- return CHECK_JNI_EXIT(_type, baseEnv(env)->Get##_jname##Field(env, obj, fid)); \
- } \
- static void SetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid, _ctype value) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecf" _type, env, c, fid, value); \
- sc.CheckStaticFieldID(c, fid); \
- /* "value" arg only used when type == ref */ \
- jvalue java_type_value; \
- java_type_value._jvalue_type = value; \
- sc.CheckFieldType(java_type_value, fid, _type[0], true); \
- baseEnv(env)->SetStatic##_jname##Field(env, c, fid, value); \
- CHECK_JNI_EXIT_VOID(); \
- } \
- static void Set##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid, _ctype value) { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELf" _type, env, obj, fid, value); \
- sc.CheckInstanceFieldID(obj, fid); \
- /* "value" arg only used when type == ref */ \
- jvalue java_type_value; \
- java_type_value._jvalue_type = value; \
- sc.CheckFieldType(java_type_value, fid, _type[0], false); \
- baseEnv(env)->Set##_jname##Field(env, obj, fid, value); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-FIELD_ACCESSORS(jobject, Object, l, "L");
-FIELD_ACCESSORS(jboolean, Boolean, z, "Z");
-FIELD_ACCESSORS(jbyte, Byte, b, "B");
-FIELD_ACCESSORS(jchar, Char, c, "C");
-FIELD_ACCESSORS(jshort, Short, s, "S");
-FIELD_ACCESSORS(jint, Int, i, "I");
-FIELD_ACCESSORS(jlong, Long, j, "J");
-FIELD_ACCESSORS(jfloat, Float, f, "F");
-FIELD_ACCESSORS(jdouble, Double, d, "D");
-
-#define CALL(_ctype, _jname, _retdecl, _retasgn, _retok, _retsig) \
- /* Virtual... */ \
- static _ctype Call##_jname##Method(JNIEnv* env, jobject obj, \
- jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype Call##_jname##MethodV(JNIEnv* env, jobject obj, \
- jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
- _retok; \
- } \
- static _ctype Call##_jname##MethodA(JNIEnv* env, jobject obj, \
- jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->Call##_jname##MethodA(env, obj, mid, args)); \
- _retok; \
- } \
- /* Non-virtual... */ \
- static _ctype CallNonvirtual##_jname##Method(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype CallNonvirtual##_jname##MethodV(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
- _retok; \
- } \
- static _ctype CallNonvirtual##_jname##MethodA(JNIEnv* env, \
- jobject obj, jclass c, jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, false); \
- sc.CheckVirtualMethod(obj, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodA(env, obj, c, mid, args)); \
- _retok; \
- } \
- /* Static... */ \
- static _ctype CallStatic##_jname##Method(JNIEnv* env, jclass c, jmethodID mid, ...) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- va_list args; \
- va_start(args, mid); \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
- va_end(args); \
- _retok; \
- } \
- static _ctype CallStatic##_jname##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
- _retok; \
- } \
- static _ctype CallStatic##_jname##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) \
- { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
- sc.CheckSig(mid, _retsig, true); \
- sc.CheckStaticMethod(c, mid); \
- _retdecl; \
- _retasgn(baseEnv(env)->CallStatic##_jname##MethodA(env, c, mid, args)); \
- _retok; \
- }
-
-#define NON_VOID_RETURN(_retsig, _ctype) return CHECK_JNI_EXIT(_retsig, (_ctype) result)
-#define VOID_RETURN CHECK_JNI_EXIT_VOID()
-
-CALL(jobject, Object, mirror::Object* result, result = reinterpret_cast<mirror::Object*>, NON_VOID_RETURN("L", jobject), "L");
-CALL(jboolean, Boolean, jboolean result, result =, NON_VOID_RETURN("Z", jboolean), "Z");
-CALL(jbyte, Byte, jbyte result, result =, NON_VOID_RETURN("B", jbyte), "B");
-CALL(jchar, Char, jchar result, result =, NON_VOID_RETURN("C", jchar), "C");
-CALL(jshort, Short, jshort result, result =, NON_VOID_RETURN("S", jshort), "S");
-CALL(jint, Int, jint result, result =, NON_VOID_RETURN("I", jint), "I");
-CALL(jlong, Long, jlong result, result =, NON_VOID_RETURN("J", jlong), "J");
-CALL(jfloat, Float, jfloat result, result =, NON_VOID_RETURN("F", jfloat), "F");
-CALL(jdouble, Double, jdouble result, result =, NON_VOID_RETURN("D", jdouble), "D");
-CALL(void, Void, , , VOID_RETURN, "V");
-
- static jstring NewString(JNIEnv* env, const jchar* unicodeChars, jsize len) {
- CHECK_JNI_ENTRY(kFlag_Default, "Epz", env, unicodeChars, len);
- return CHECK_JNI_EXIT("s", baseEnv(env)->NewString(env, unicodeChars, len));
+ return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, true);
+ }
+
+#define FIELD_ACCESSORS(jtype, name, ptype, shorty) \
+ static jtype GetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid) { \
+ return GetField(__FUNCTION__, env, c, fid, true, ptype).shorty; \
+ } \
+ \
+ static jtype Get##name##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
+ return GetField(__FUNCTION__, env, obj, fid, false, ptype).shorty; \
+ } \
+ \
+ static void SetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid, jtype v) { \
+ JniValueType value; \
+ value.shorty = v; \
+ SetField(__FUNCTION__, env, c, fid, true, ptype, value); \
+ } \
+ \
+ static void Set##name##Field(JNIEnv* env, jobject obj, jfieldID fid, jtype v) { \
+ JniValueType value; \
+ value.shorty = v; \
+ SetField(__FUNCTION__, env, obj, fid, false, ptype, value); \
+ }
+
+ FIELD_ACCESSORS(jobject, Object, Primitive::kPrimNot, L)
+ FIELD_ACCESSORS(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+ FIELD_ACCESSORS(jbyte, Byte, Primitive::kPrimByte, B)
+ FIELD_ACCESSORS(jchar, Char, Primitive::kPrimChar, C)
+ FIELD_ACCESSORS(jshort, Short, Primitive::kPrimShort, S)
+ FIELD_ACCESSORS(jint, Int, Primitive::kPrimInt, I)
+ FIELD_ACCESSORS(jlong, Long, Primitive::kPrimLong, J)
+ FIELD_ACCESSORS(jfloat, Float, Primitive::kPrimFloat, F)
+ FIELD_ACCESSORS(jdouble, Double, Primitive::kPrimDouble, D)
+#undef FIELD_ACCESSORS
+
+ static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ }
+
+ static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+ jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ }
+
+ static void CallStaticVoidMethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+ CallMethodA(__FUNCTION__, env, c, nullptr, mid, vargs, Primitive::kPrimVoid, kStatic);
+ }
+
+ static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) {
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ }
+
+ static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+ va_list vargs) {
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ }
+
+ static void CallStaticVoidMethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+ }
+
+ static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+ va_end(vargs);
+ }
+
+ static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass c, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+ va_end(vargs);
+ }
+
+ static void CallStaticVoidMethod(JNIEnv* env, jclass c, jmethodID mid, ...) {
+ va_list vargs;
+ va_start(vargs, mid);
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+ va_end(vargs);
+ }
+
+#define CALL(rtype, name, ptype, shorty) \
+ static rtype Call##name##MethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ } \
+ \
+ static rtype CallNonvirtual##name##MethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ } \
+ \
+ static rtype CallStatic##name##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) { \
+ return CallMethodA(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ } \
+ \
+ static rtype Call##name##MethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ } \
+ \
+ static rtype CallNonvirtual##name##MethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ } \
+ \
+ static rtype CallStatic##name##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) { \
+ return CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ } \
+ \
+ static rtype Call##name##Method(JNIEnv* env, jobject obj, jmethodID mid, ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+ va_end(vargs); \
+ return result; \
+ } \
+ \
+ static rtype CallNonvirtual##name##Method(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+ ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+ va_end(vargs); \
+ return result; \
+ } \
+ \
+ static rtype CallStatic##name##Method(JNIEnv* env, jclass c, jmethodID mid, ...) { \
+ va_list vargs; \
+ va_start(vargs, mid); \
+ rtype result = \
+ CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+ va_end(vargs); \
+ return result; \
+ }
+
+ CALL(jobject, Object, Primitive::kPrimNot, L)
+ CALL(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+ CALL(jbyte, Byte, Primitive::kPrimByte, B)
+ CALL(jchar, Char, Primitive::kPrimChar, C)
+ CALL(jshort, Short, Primitive::kPrimShort, S)
+ CALL(jint, Int, Primitive::kPrimInt, I)
+ CALL(jlong, Long, Primitive::kPrimLong, J)
+ CALL(jfloat, Float, Primitive::kPrimFloat, F)
+ CALL(jdouble, Double, Primitive::kPrimDouble, D)
+#undef CALL
+
+ static jstring NewString(JNIEnv* env, const jchar* unicode_chars, jsize len) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.p = unicode_chars}, {.z = len}};
+ if (sc.Check(soa, true, "Epz", args)) {
+ JniValueType result;
+ result.s = baseEnv(env)->NewString(env, unicode_chars, len);
+ if (sc.Check(soa, false, "s", &result)) {
+ return result.s;
+ }
+ }
+ return nullptr;
}
- static jsize GetStringLength(JNIEnv* env, jstring string) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringLength(env, string));
+ static jstring NewStringUTF(JNIEnv* env, const char* chars) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.u = chars}};
+ if (sc.Check(soa, true, "Eu", args)) {
+ JniValueType result;
+ // TODO: stale? show pointer and truncate string.
+ result.s = baseEnv(env)->NewStringUTF(env, chars);
+ if (sc.Check(soa, false, "s", &result)) {
+ return result.s;
+ }
+ }
+ return nullptr;
}
- static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy);
- const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
- int byteCount = s->GetLength() * 2;
- result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
+ static jsize GetStringLength(JNIEnv* env, jstring string) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.s = string}};
+ if (sc.Check(soa, true, "Es", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetStringLength(env, string);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
}
}
- return CHECK_JNI_EXIT("p", result);
+ return JNI_ERR;
}
- static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Esp", env, string, chars);
- sc.CheckNonNull(chars);
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, chars, false);
- chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(chars)));
+ static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.s = string}};
+ if (sc.Check(soa, true, "Es", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetStringUTFLength(env, string);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
+ }
}
- baseEnv(env)->ReleaseStringChars(env, string, chars);
- CHECK_JNI_EXIT_VOID();
+ return JNI_ERR;
}
- static jstring NewStringUTF(JNIEnv* env, const char* bytes) {
- CHECK_JNI_ENTRY(kFlag_NullableUtf, "Eu", env, bytes); // TODO: show pointer and truncate string.
- return CHECK_JNI_EXIT("s", baseEnv(env)->NewStringUTF(env, bytes));
+ static const jchar* GetStringChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, false, false));
}
- static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringUTFLength(env, string));
+ static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const char*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, true, false));
}
- static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, string, isCopy);
- const char* result = baseEnv(env)->GetStringUTFChars(env, string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- result = (const char*) GuardedCopy::Create(result, strlen(result) + 1, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
- }
- }
- return CHECK_JNI_EXIT("u", result); // TODO: show pointer and truncate string.
+ static const jchar* GetStringCritical(JNIEnv* env, jstring string, jboolean* is_copy) {
+ return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+ is_copy, false, true));
+ }
+
+ static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, false);
}
static void ReleaseStringUTFChars(JNIEnv* env, jstring string, const char* utf) {
- CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_Release, "Esu", env, string, utf); // TODO: show pointer and truncate string.
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, utf, false);
- utf = reinterpret_cast<const char*>(GuardedCopy::Destroy(const_cast<char*>(utf)));
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, utf, true, false);
+ }
+
+ static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* chars) {
+ ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, true);
+ }
+
+ static void GetStringRegion(JNIEnv* env, jstring string, jsize start, jsize len, jchar* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EsIIp", args)) {
+ baseEnv(env)->GetStringRegion(env, string, start, len, buf);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
}
- baseEnv(env)->ReleaseStringUTFChars(env, string, utf);
- CHECK_JNI_EXIT_VOID();
}
- static jsize GetArrayLength(JNIEnv* env, jarray array) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "Ea", env, array);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetArrayLength(env, array));
+ static void GetStringUTFRegion(JNIEnv* env, jstring string, jsize start, jsize len, char* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EsIIp", args)) {
+ baseEnv(env)->GetStringUTFRegion(env, string, start, len, buf);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
- static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass elementClass, jobject initialElement) {
- CHECK_JNI_ENTRY(kFlag_Default, "EzcL", env, length, elementClass, initialElement);
- return CHECK_JNI_EXIT("a", baseEnv(env)->NewObjectArray(env, length, elementClass, initialElement));
+ static jsize GetArrayLength(JNIEnv* env, jarray array) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.a = array}};
+ if (sc.Check(soa, true, "Ea", args)) {
+ JniValueType result;
+ result.z = baseEnv(env)->GetArrayLength(env, array);
+ if (sc.Check(soa, false, "z", &result)) {
+ return result.z;
+ }
+ }
+ return JNI_ERR;
+ }
+
+ static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_class,
+ jobject initial_element) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] =
+ {{.E = env}, {.z = length}, {.c = element_class}, {.L = initial_element}};
+ if (sc.Check(soa, true, "EzcL", args)) {
+ JniValueType result;
+ // Note: assignability tests of initial_element are done in the base implementation.
+ result.a = baseEnv(env)->NewObjectArray(env, length, element_class, initial_element);
+ if (sc.Check(soa, false, "a", &result)) {
+ return down_cast<jobjectArray>(result.a);
+ }
+ }
+ return nullptr;
}
static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index) {
- CHECK_JNI_ENTRY(kFlag_Default, "EaI", env, array, index);
- return CHECK_JNI_EXIT("L", baseEnv(env)->GetObjectArrayElement(env, array, index));
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.z = index}};
+ if (sc.Check(soa, true, "Eaz", args)) {
+ JniValueType result;
+ result.L = baseEnv(env)->GetObjectArrayElement(env, array, index);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
}
static void SetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index, jobject value) {
- CHECK_JNI_ENTRY(kFlag_Default, "EaIL", env, array, index, value);
- baseEnv(env)->SetObjectArrayElement(env, array, index, value);
- CHECK_JNI_EXIT_VOID();
- }
-
-#define NEW_PRIMITIVE_ARRAY(_artype, _jname) \
- static _artype New##_jname##Array(JNIEnv* env, jsize length) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Ez", env, length); \
- return CHECK_JNI_EXIT("a", baseEnv(env)->New##_jname##Array(env, length)); \
- }
-NEW_PRIMITIVE_ARRAY(jbooleanArray, Boolean);
-NEW_PRIMITIVE_ARRAY(jbyteArray, Byte);
-NEW_PRIMITIVE_ARRAY(jcharArray, Char);
-NEW_PRIMITIVE_ARRAY(jshortArray, Short);
-NEW_PRIMITIVE_ARRAY(jintArray, Int);
-NEW_PRIMITIVE_ARRAY(jlongArray, Long);
-NEW_PRIMITIVE_ARRAY(jfloatArray, Float);
-NEW_PRIMITIVE_ARRAY(jdoubleArray, Double);
-
-struct ForceCopyGetChecker {
- public:
- ForceCopyGetChecker(ScopedCheck& sc, jboolean* isCopy) {
- force_copy = sc.ForceCopy();
- no_copy = 0;
- if (force_copy && isCopy != nullptr) {
- // Capture this before the base call tramples on it.
- no_copy = *reinterpret_cast<uint32_t*>(isCopy);
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.z = index}, {.L = value}};
+ // Note: the index arguments is checked as 'I' rather than 'z' as invalid indices result in
+ // ArrayIndexOutOfBoundsExceptions in the base implementation. Similarly invalid stores result
+ // in ArrayStoreExceptions.
+ if (sc.Check(soa, true, "EaIL", args)) {
+ baseEnv(env)->SetObjectArrayElement(env, array, index, value);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
}
}
- template<typename ResultT>
- ResultT Check(JNIEnv* env, jarray array, jboolean* isCopy, ResultT result) {
- if (force_copy && result != nullptr) {
- result = reinterpret_cast<ResultT>(CreateGuardedPACopy(env, array, isCopy));
- }
- return result;
+ static jbooleanArray NewBooleanArray(JNIEnv* env, jsize length) {
+ return down_cast<jbooleanArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimBoolean));
}
- uint32_t no_copy;
- bool force_copy;
-};
+ static jbyteArray NewByteArray(JNIEnv* env, jsize length) {
+ return down_cast<jbyteArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimByte));
+ }
-#define GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
- static _ctype* Get##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, jboolean* isCopy) { \
- CHECK_JNI_ENTRY(kFlag_Default, "Eap", env, array, isCopy); \
- _ctype* result = ForceCopyGetChecker(sc, isCopy).Check(env, array, isCopy, baseEnv(env)->Get##_jname##ArrayElements(env, array, isCopy)); \
- return CHECK_JNI_EXIT("p", result); \
- }
-
-#define RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
- static void Release##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, _ctype* elems, jint mode) { \
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Eapr", env, array, elems, mode); \
- sc.CheckNonNull(elems); \
- if (sc.ForceCopy()) { \
- ReleaseGuardedPACopy(env, array, elems, mode); \
- } \
- baseEnv(env)->Release##_jname##ArrayElements(env, array, elems, mode); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
- static void Get##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, _ctype* buf) { \
- CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
- baseEnv(env)->Get##_jname##ArrayRegion(env, array, start, len, buf); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
- static void Set##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, const _ctype* buf) { \
- CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
- baseEnv(env)->Set##_jname##ArrayRegion(env, array, start, len, buf); \
- CHECK_JNI_EXIT_VOID(); \
- }
-
-#define PRIMITIVE_ARRAY_FUNCTIONS(_ctype, _jname, _typechar) \
- GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
- RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
- GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname); \
- SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);
-
-// TODO: verify primitive array type matches call type.
-PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, 'Z');
-PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, 'B');
-PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, 'C');
-PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, 'S');
-PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, 'I');
-PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, 'J');
-PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, 'F');
-PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D');
+ static jcharArray NewCharArray(JNIEnv* env, jsize length) {
+ return down_cast<jcharArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimChar));
+ }
- static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
- CHECK_JNI_ENTRY(kFlag_Default, "EcpI", env, c, methods, nMethods);
- return CHECK_JNI_EXIT("I", baseEnv(env)->RegisterNatives(env, c, methods, nMethods));
+ static jshortArray NewShortArray(JNIEnv* env, jsize length) {
+ return down_cast<jshortArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimShort));
}
- static jint UnregisterNatives(JNIEnv* env, jclass c) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
- return CHECK_JNI_EXIT("I", baseEnv(env)->UnregisterNatives(env, c));
+ static jintArray NewIntArray(JNIEnv* env, jsize length) {
+ return down_cast<jintArray>(NewPrimitiveArray(__FUNCTION__, env, length, Primitive::kPrimInt));
}
+ static jlongArray NewLongArray(JNIEnv* env, jsize length) {
+ return down_cast<jlongArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimLong));
+ }
+
+ static jfloatArray NewFloatArray(JNIEnv* env, jsize length) {
+ return down_cast<jfloatArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimFloat));
+ }
+
+ static jdoubleArray NewDoubleArray(JNIEnv* env, jsize length) {
+ return down_cast<jdoubleArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+ Primitive::kPrimDouble));
+ }
+
+#define PRIMITIVE_ARRAY_FUNCTIONS(ctype, name, ptype) \
+ static ctype* Get##name##ArrayElements(JNIEnv* env, ctype##Array array, jboolean* is_copy) { \
+ return reinterpret_cast<ctype*>( \
+ GetPrimitiveArrayElements(__FUNCTION__, ptype, env, array, is_copy)); \
+ } \
+ \
+ static void Release##name##ArrayElements(JNIEnv* env, ctype##Array array, ctype* elems, \
+ jint mode) { \
+ ReleasePrimitiveArrayElements(__FUNCTION__, ptype, env, array, elems, mode); \
+ } \
+ \
+ static void Get##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+ ctype* buf) { \
+ GetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+ } \
+ \
+ static void Set##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+ const ctype* buf) { \
+ SetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+ }
+
+ PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, Primitive::kPrimBoolean)
+ PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, Primitive::kPrimByte)
+ PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, Primitive::kPrimChar)
+ PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, Primitive::kPrimShort)
+ PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, Primitive::kPrimInt)
+ PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, Primitive::kPrimLong)
+ PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, Primitive::kPrimFloat)
+ PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, Primitive::kPrimDouble)
+#undef PRIMITIVE_ARRAY_FUNCTIONS
+
static jint MonitorEnter(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
- return JNI_ERR; // Only for jni_internal_test. Real code will have aborted already.
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->MonitorEnter(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
}
- return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorEnter(env, obj));
+ return JNI_ERR;
}
static jint MonitorExit(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, obj);
- if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
- return JNI_ERR; // Only for jni_internal_test. Real code will have aborted already.
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ result.i = baseEnv(env)->MonitorExit(env, obj);
+ if (sc.Check(soa, false, "i", &result)) {
+ return result.i;
+ }
}
- return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorExit(env, obj));
+ return JNI_ERR;
}
- static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
- CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, vm);
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetJavaVM(env, vm));
+ static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritGet, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Eap", args)) {
+ JniValueType result;
+ result.p = baseEnv(env)->GetPrimitiveArrayCritical(env, array, is_copy);
+ if (result.p != nullptr && soa.ForceCopy()) {
+ result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+ }
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
+ }
+ return nullptr;
}
- static void GetStringRegion(JNIEnv* env, jstring str, jsize start, jsize len, jchar* buf) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
- baseEnv(env)->GetStringRegion(env, str, start, len, buf);
- CHECK_JNI_EXIT_VOID();
+ static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_CritRelease | kFlag_ExcepOkay, __FUNCTION__);
+ sc.CheckNonNull(carray);
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.p = carray}, {.r = mode}};
+ if (sc.Check(soa, true, "Eapr", args)) {
+ if (soa.ForceCopy()) {
+ GuardedCopy::ReleaseGuardedPACopy(__FUNCTION__, env, array, carray, mode);
+ }
+ baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
- static void GetStringUTFRegion(JNIEnv* env, jstring str, jsize start, jsize len, char* buf) {
- CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
- baseEnv(env)->GetStringUTFRegion(env, str, start, len, buf);
- CHECK_JNI_EXIT_VOID();
+ static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[3] = {{.E = env}, {.p = address}, {.J = capacity}};
+ if (sc.Check(soa, true, "EpJ", args)) {
+ JniValueType result;
+ // Note: the validity of address and capacity are checked in the base implementation.
+ result.L = baseEnv(env)->NewDirectByteBuffer(env, address, capacity);
+ if (sc.Check(soa, false, "L", &result)) {
+ return result.L;
+ }
+ }
+ return nullptr;
}
- static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritGet, "Eap", env, array, isCopy);
- void* result = baseEnv(env)->GetPrimitiveArrayCritical(env, array, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- result = CreateGuardedPACopy(env, array, isCopy);
+ static void* GetDirectBufferAddress(JNIEnv* env, jobject buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = buf}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ // Note: this is implemented in the base environment by a GetLongField which will sanity
+ // check the type of buf in GetLongField above.
+ result.p = baseEnv(env)->GetDirectBufferAddress(env, buf);
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
}
- return CHECK_JNI_EXIT("p", result);
+ return nullptr;
}
- static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
- CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Eapr", env, array, carray, mode);
- sc.CheckNonNull(carray);
- if (sc.ForceCopy()) {
- ReleaseGuardedPACopy(env, array, carray, mode);
+ static jlong GetDirectBufferCapacity(JNIEnv* env, jobject buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.L = buf}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ // Note: this is implemented in the base environment by a GetIntField which will sanity
+ // check the type of buf in GetIntField above.
+ result.J = baseEnv(env)->GetDirectBufferCapacity(env, buf);
+ if (sc.Check(soa, false, "J", &result)) {
+ return result.J;
+ }
}
- baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
- CHECK_JNI_EXIT_VOID();
+ return JNI_ERR;
}
- static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* isCopy) {
- CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy);
- const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy);
- if (sc.ForceCopy() && result != nullptr) {
- mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
- int byteCount = s->GetLength() * 2;
- result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
- if (isCopy != nullptr) {
- *isCopy = JNI_TRUE;
+ private:
+ static JavaVMExt* GetJavaVMExt(JNIEnv* env) {
+ return reinterpret_cast<JNIEnvExt*>(env)->vm;
+ }
+
+ static const JNINativeInterface* baseEnv(JNIEnv* env) {
+ return reinterpret_cast<JNIEnvExt*>(env)->unchecked_functions;
+ }
+
+ static jobject NewRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ if (sc.Check(soa, true, "EL", args)) {
+ JniValueType result;
+ switch (kind) {
+ case kGlobal:
+ result.L = baseEnv(env)->NewGlobalRef(env, obj);
+ break;
+ case kLocal:
+ result.L = baseEnv(env)->NewLocalRef(env, obj);
+ break;
+ case kWeakGlobal:
+ result.L = baseEnv(env)->NewWeakGlobalRef(env, obj);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected reference kind: " << kind;
+ }
+ if (sc.Check(soa, false, "L", &result)) {
+ DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
+ DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ return result.L;
}
}
- return CHECK_JNI_EXIT("p", result);
+ return nullptr;
+ }
+
+ static void DeleteRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, function_name);
+ JniValueType args[2] = {{.E = env}, {.L = obj}};
+ sc.Check(soa, true, "EL", args);
+ if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ JniValueType result;
+ switch (kind) {
+ case kGlobal:
+ baseEnv(env)->DeleteGlobalRef(env, obj);
+ break;
+ case kLocal:
+ baseEnv(env)->DeleteLocalRef(env, obj);
+ break;
+ case kWeakGlobal:
+ baseEnv(env)->DeleteWeakGlobalRef(env, obj);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected reference kind: " << kind;
+ }
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
- static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* carray) {
- CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Esp", env, string, carray);
- sc.CheckNonNull(carray);
- if (sc.ForceCopy()) {
- GuardedCopy::Check(__FUNCTION__, carray, false);
- carray = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(carray)));
+ static jmethodID GetMethodIDInternal(const char* function_name, JNIEnv* env, jclass c,
+ const char* name, const char* sig, bool is_static) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+ if (sc.Check(soa, true, "Ecuu", args)) {
+ JniValueType result;
+ if (is_static) {
+ result.m = baseEnv(env)->GetStaticMethodID(env, c, name, sig);
+ } else {
+ result.m = baseEnv(env)->GetMethodID(env, c, name, sig);
+ }
+ if (sc.Check(soa, false, "m", &result)) {
+ return result.m;
+ }
}
- baseEnv(env)->ReleaseStringCritical(env, string, carray);
- CHECK_JNI_EXIT_VOID();
+ return nullptr;
}
- static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewWeakGlobalRef(env, obj));
+ static jfieldID GetFieldIDInternal(const char* function_name, JNIEnv* env, jclass c,
+ const char* name, const char* sig, bool is_static) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+ if (sc.Check(soa, true, "Ecuu", args)) {
+ JniValueType result;
+ if (is_static) {
+ result.f = baseEnv(env)->GetStaticFieldID(env, c, name, sig);
+ } else {
+ result.f = baseEnv(env)->GetFieldID(env, c, name, sig);
+ }
+ if (sc.Check(soa, false, "f", &result)) {
+ return result.f;
+ }
+ }
+ return nullptr;
+ }
+
+ static JniValueType GetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+ bool is_static, Primitive::Type type) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.f = fid}};
+ JniValueType result;
+ if (sc.Check(soa, true, is_static ? "Ecf" : "ELf", args) &&
+ sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+ const char* result_check = nullptr;
+ switch (type) {
+ case Primitive::kPrimNot:
+ if (is_static) {
+ result.L = baseEnv(env)->GetStaticObjectField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.L = baseEnv(env)->GetObjectField(env, obj, fid);
+ }
+ result_check = "L";
+ break;
+ case Primitive::kPrimBoolean:
+ if (is_static) {
+ result.Z = baseEnv(env)->GetStaticBooleanField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.Z = baseEnv(env)->GetBooleanField(env, obj, fid);
+ }
+ result_check = "Z";
+ break;
+ case Primitive::kPrimByte:
+ if (is_static) {
+ result.B = baseEnv(env)->GetStaticByteField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.B = baseEnv(env)->GetByteField(env, obj, fid);
+ }
+ result_check = "B";
+ break;
+ case Primitive::kPrimChar:
+ if (is_static) {
+ result.C = baseEnv(env)->GetStaticCharField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.C = baseEnv(env)->GetCharField(env, obj, fid);
+ }
+ result_check = "C";
+ break;
+ case Primitive::kPrimShort:
+ if (is_static) {
+ result.S = baseEnv(env)->GetStaticShortField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.S = baseEnv(env)->GetShortField(env, obj, fid);
+ }
+ result_check = "S";
+ break;
+ case Primitive::kPrimInt:
+ if (is_static) {
+ result.I = baseEnv(env)->GetStaticIntField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.I = baseEnv(env)->GetIntField(env, obj, fid);
+ }
+ result_check = "I";
+ break;
+ case Primitive::kPrimLong:
+ if (is_static) {
+ result.J = baseEnv(env)->GetStaticLongField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.J = baseEnv(env)->GetLongField(env, obj, fid);
+ }
+ result_check = "J";
+ break;
+ case Primitive::kPrimFloat:
+ if (is_static) {
+ result.F = baseEnv(env)->GetStaticFloatField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.F = baseEnv(env)->GetFloatField(env, obj, fid);
+ }
+ result_check = "F";
+ break;
+ case Primitive::kPrimDouble:
+ if (is_static) {
+ result.D = baseEnv(env)->GetStaticDoubleField(env, down_cast<jclass>(obj), fid);
+ } else {
+ result.D = baseEnv(env)->GetDoubleField(env, obj, fid);
+ }
+ result_check = "D";
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type: " << type;
+ break;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
+ }
+ result.J = 0;
+ return result;
}
- static jboolean ExceptionCheck(JNIEnv* env) {
- CHECK_JNI_ENTRY(kFlag_CritOkay | kFlag_ExcepOkay, "E", env);
- return CHECK_JNI_EXIT("b", baseEnv(env)->ExceptionCheck(env));
+ static void SetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+ bool is_static, Primitive::Type type, JniValueType value) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[4] = {{.E = env}, {.L = obj}, {.f = fid}, value};
+ char sig[5] = { 'E', is_static ? 'c' : 'L', 'f',
+ type == Primitive::kPrimNot ? 'L' : Primitive::Descriptor(type)[0], '\0'};
+ if (sc.Check(soa, true, sig, args) &&
+ sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+ switch (type) {
+ case Primitive::kPrimNot:
+ if (is_static) {
+ baseEnv(env)->SetStaticObjectField(env, down_cast<jclass>(obj), fid, value.L);
+ } else {
+ baseEnv(env)->SetObjectField(env, obj, fid, value.L);
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ if (is_static) {
+ baseEnv(env)->SetStaticBooleanField(env, down_cast<jclass>(obj), fid, value.Z);
+ } else {
+ baseEnv(env)->SetBooleanField(env, obj, fid, value.Z);
+ }
+ break;
+ case Primitive::kPrimByte:
+ if (is_static) {
+ baseEnv(env)->SetStaticByteField(env, down_cast<jclass>(obj), fid, value.B);
+ } else {
+ baseEnv(env)->SetByteField(env, obj, fid, value.B);
+ }
+ break;
+ case Primitive::kPrimChar:
+ if (is_static) {
+ baseEnv(env)->SetStaticCharField(env, down_cast<jclass>(obj), fid, value.C);
+ } else {
+ baseEnv(env)->SetCharField(env, obj, fid, value.C);
+ }
+ break;
+ case Primitive::kPrimShort:
+ if (is_static) {
+ baseEnv(env)->SetStaticShortField(env, down_cast<jclass>(obj), fid, value.S);
+ } else {
+ baseEnv(env)->SetShortField(env, obj, fid, value.S);
+ }
+ break;
+ case Primitive::kPrimInt:
+ if (is_static) {
+ baseEnv(env)->SetStaticIntField(env, down_cast<jclass>(obj), fid, value.I);
+ } else {
+ baseEnv(env)->SetIntField(env, obj, fid, value.I);
+ }
+ break;
+ case Primitive::kPrimLong:
+ if (is_static) {
+ baseEnv(env)->SetStaticLongField(env, down_cast<jclass>(obj), fid, value.J);
+ } else {
+ baseEnv(env)->SetLongField(env, obj, fid, value.J);
+ }
+ break;
+ case Primitive::kPrimFloat:
+ if (is_static) {
+ baseEnv(env)->SetStaticFloatField(env, down_cast<jclass>(obj), fid, value.F);
+ } else {
+ baseEnv(env)->SetFloatField(env, obj, fid, value.F);
+ }
+ break;
+ case Primitive::kPrimDouble:
+ if (is_static) {
+ baseEnv(env)->SetStaticDoubleField(env, down_cast<jclass>(obj), fid, value.D);
+ } else {
+ baseEnv(env)->SetDoubleField(env, obj, fid, value.D);
+ }
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type: " << type;
+ break;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
- static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
- // Note: we use "Ep" rather than "EL" because this is the one JNI function
- // that it's okay to pass an invalid reference to.
- CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, obj);
- // TODO: proper decoding of jobjectRefType!
- return CHECK_JNI_EXIT("I", baseEnv(env)->GetObjectRefType(env, obj));
+ static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
+ jclass c, jmethodID mid, InvokeType invoke)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool checked;
+ switch (invoke) {
+ case kVirtual: {
+ DCHECK(c == nullptr);
+ JniValueType args[3] = {{.E = env}, {.L = obj}, {.m = mid}};
+ checked = sc.Check(soa, true, "ELm.", args);
+ break;
+ }
+ case kDirect: {
+ JniValueType args[4] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}};
+ checked = sc.Check(soa, true, "ELcm.", args);
+ break;
+ }
+ case kStatic: {
+ DCHECK(obj == nullptr);
+ JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+ checked = sc.Check(soa, true, "Ecm.", args);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ checked = false;
+ break;
+ }
+ return checked;
+ }
+
+ static JniValueType CallMethodA(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+ jmethodID mid, jvalue* vargs, Primitive::Type type,
+ InvokeType invoke) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType result;
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+ const char* result_check;
+ switch (type) {
+ case Primitive::kPrimNot:
+ result_check = "L";
+ switch (invoke) {
+ case kVirtual:
+ result.L = baseEnv(env)->CallObjectMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.L = baseEnv(env)->CallNonvirtualObjectMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.L = baseEnv(env)->CallStaticObjectMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ result_check = "Z";
+ switch (invoke) {
+ case kVirtual:
+ result.Z = baseEnv(env)->CallBooleanMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.Z = baseEnv(env)->CallNonvirtualBooleanMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.Z = baseEnv(env)->CallStaticBooleanMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimByte:
+ result_check = "B";
+ switch (invoke) {
+ case kVirtual:
+ result.B = baseEnv(env)->CallByteMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.B = baseEnv(env)->CallNonvirtualByteMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.B = baseEnv(env)->CallStaticByteMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimChar:
+ result_check = "C";
+ switch (invoke) {
+ case kVirtual:
+ result.C = baseEnv(env)->CallCharMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.C = baseEnv(env)->CallNonvirtualCharMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.C = baseEnv(env)->CallStaticCharMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimShort:
+ result_check = "S";
+ switch (invoke) {
+ case kVirtual:
+ result.S = baseEnv(env)->CallShortMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.S = baseEnv(env)->CallNonvirtualShortMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.S = baseEnv(env)->CallStaticShortMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimInt:
+ result_check = "I";
+ switch (invoke) {
+ case kVirtual:
+ result.I = baseEnv(env)->CallIntMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.I = baseEnv(env)->CallNonvirtualIntMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.I = baseEnv(env)->CallStaticIntMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimLong:
+ result_check = "J";
+ switch (invoke) {
+ case kVirtual:
+ result.J = baseEnv(env)->CallLongMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.J = baseEnv(env)->CallNonvirtualLongMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.J = baseEnv(env)->CallStaticLongMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ result_check = "F";
+ switch (invoke) {
+ case kVirtual:
+ result.F = baseEnv(env)->CallFloatMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.F = baseEnv(env)->CallNonvirtualFloatMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.F = baseEnv(env)->CallStaticFloatMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimDouble:
+ result_check = "D";
+ switch (invoke) {
+ case kVirtual:
+ result.D = baseEnv(env)->CallDoubleMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.D = baseEnv(env)->CallNonvirtualDoubleMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.D = baseEnv(env)->CallStaticDoubleMethodA(env, c, mid, vargs);
+ break;
+ default:
+ break;
+ }
+ break;
+ case Primitive::kPrimVoid:
+ result_check = "V";
+ result.V = nullptr;
+ switch (invoke) {
+ case kVirtual:
+ baseEnv(env)->CallVoidMethodA(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ baseEnv(env)->CallNonvirtualVoidMethodA(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ baseEnv(env)->CallStaticVoidMethodA(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected return type: " << type;
+ result_check = nullptr;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
+ }
+ result.J = 0;
+ return result;
}
- static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
- CHECK_JNI_ENTRY(kFlag_Default, "EpJ", env, address, capacity);
- if (address == nullptr) {
- JniAbortF(__FUNCTION__, "non-nullable address is NULL");
- return nullptr;
+ static JniValueType CallMethodV(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+ jmethodID mid, va_list vargs, Primitive::Type type,
+ InvokeType invoke) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType result;
+ if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+ sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+ const char* result_check;
+ switch (type) {
+ case Primitive::kPrimNot:
+ result_check = "L";
+ switch (invoke) {
+ case kVirtual:
+ result.L = baseEnv(env)->CallObjectMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.L = baseEnv(env)->CallNonvirtualObjectMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.L = baseEnv(env)->CallStaticObjectMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimBoolean:
+ result_check = "Z";
+ switch (invoke) {
+ case kVirtual:
+ result.Z = baseEnv(env)->CallBooleanMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.Z = baseEnv(env)->CallNonvirtualBooleanMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.Z = baseEnv(env)->CallStaticBooleanMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimByte:
+ result_check = "B";
+ switch (invoke) {
+ case kVirtual:
+ result.B = baseEnv(env)->CallByteMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.B = baseEnv(env)->CallNonvirtualByteMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.B = baseEnv(env)->CallStaticByteMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimChar:
+ result_check = "C";
+ switch (invoke) {
+ case kVirtual:
+ result.C = baseEnv(env)->CallCharMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.C = baseEnv(env)->CallNonvirtualCharMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.C = baseEnv(env)->CallStaticCharMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimShort:
+ result_check = "S";
+ switch (invoke) {
+ case kVirtual:
+ result.S = baseEnv(env)->CallShortMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.S = baseEnv(env)->CallNonvirtualShortMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.S = baseEnv(env)->CallStaticShortMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimInt:
+ result_check = "I";
+ switch (invoke) {
+ case kVirtual:
+ result.I = baseEnv(env)->CallIntMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.I = baseEnv(env)->CallNonvirtualIntMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.I = baseEnv(env)->CallStaticIntMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimLong:
+ result_check = "J";
+ switch (invoke) {
+ case kVirtual:
+ result.J = baseEnv(env)->CallLongMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.J = baseEnv(env)->CallNonvirtualLongMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.J = baseEnv(env)->CallStaticLongMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimFloat:
+ result_check = "F";
+ switch (invoke) {
+ case kVirtual:
+ result.F = baseEnv(env)->CallFloatMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.F = baseEnv(env)->CallNonvirtualFloatMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.F = baseEnv(env)->CallStaticFloatMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimDouble:
+ result_check = "D";
+ switch (invoke) {
+ case kVirtual:
+ result.D = baseEnv(env)->CallDoubleMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ result.D = baseEnv(env)->CallNonvirtualDoubleMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ result.D = baseEnv(env)->CallStaticDoubleMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ case Primitive::kPrimVoid:
+ result_check = "V";
+ result.V = nullptr;
+ switch (invoke) {
+ case kVirtual:
+ baseEnv(env)->CallVoidMethodV(env, obj, mid, vargs);
+ break;
+ case kDirect:
+ baseEnv(env)->CallNonvirtualVoidMethodV(env, obj, c, mid, vargs);
+ break;
+ case kStatic:
+ baseEnv(env)->CallStaticVoidMethodV(env, c, mid, vargs);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke: " << invoke;
+ }
+ break;
+ default:
+ LOG(FATAL) << "Unexpected return type: " << type;
+ result_check = nullptr;
+ }
+ if (sc.Check(soa, false, result_check, &result)) {
+ return result;
+ }
}
- return CHECK_JNI_EXIT("L", baseEnv(env)->NewDirectByteBuffer(env, address, capacity));
+ result.J = 0;
+ return result;
}
- static void* GetDirectBufferAddress(JNIEnv* env, jobject buf) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
- // TODO: check that 'buf' is a java.nio.Buffer.
- return CHECK_JNI_EXIT("p", baseEnv(env)->GetDirectBufferAddress(env, buf));
+ static const void* GetStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+ jboolean* is_copy, bool utf, bool critical) {
+ ScopedObjectAccess soa(env);
+ int flags = critical ? kFlag_CritGet : kFlag_CritOkay;
+ ScopedCheck sc(flags, function_name);
+ JniValueType args[3] = {{.E = env}, {.s = string}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Esp", args)) {
+ JniValueType result;
+ if (utf) {
+ CHECK(!critical);
+ result.u = baseEnv(env)->GetStringUTFChars(env, string, is_copy);
+ } else {
+ if (critical) {
+ result.p = baseEnv(env)->GetStringCritical(env, string, is_copy);
+ } else {
+ result.p = baseEnv(env)->GetStringChars(env, string, is_copy);
+ }
+ }
+ // TODO: could we be smarter about not copying when local_is_copy?
+ if (result.p != nullptr && soa.ForceCopy()) {
+ if (utf) {
+ size_t length_in_bytes = strlen(result.u) + 1;
+ result.u =
+ reinterpret_cast<const char*>(GuardedCopy::Create(result.u, length_in_bytes, false));
+ } else {
+ size_t length_in_bytes = baseEnv(env)->GetStringLength(env, string) * 2;
+ result.p =
+ reinterpret_cast<const jchar*>(GuardedCopy::Create(result.p, length_in_bytes, false));
+ }
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ }
+ if (sc.Check(soa, false, utf ? "u" : "p", &result)) {
+ return utf ? result.u : result.p;
+ }
+ }
+ return nullptr;
}
- static jlong GetDirectBufferCapacity(JNIEnv* env, jobject buf) {
- CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
- // TODO: check that 'buf' is a java.nio.Buffer.
- return CHECK_JNI_EXIT("J", baseEnv(env)->GetDirectBufferCapacity(env, buf));
+ static void ReleaseStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+ const void* chars, bool utf, bool critical) {
+ ScopedObjectAccess soa(env);
+ int flags = kFlag_ExcepOkay | kFlag_Release;
+ if (critical) {
+ flags |= kFlag_CritRelease;
+ }
+ ScopedCheck sc(flags, function_name);
+ sc.CheckNonNull(chars);
+ bool force_copy_ok = !soa.ForceCopy() || GuardedCopy::Check(function_name, chars, false);
+ if (force_copy_ok && soa.ForceCopy()) {
+ chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<void*>(chars)));
+ }
+ if (force_copy_ok) {
+ JniValueType args[3] = {{.E = env}, {.s = string}, {.p = chars}};
+ if (sc.Check(soa, true, utf ? "Esu" : "Esp", args)) {
+ if (utf) {
+ CHECK(!critical);
+ baseEnv(env)->ReleaseStringUTFChars(env, string, reinterpret_cast<const char*>(chars));
+ } else {
+ if (critical) {
+ baseEnv(env)->ReleaseStringCritical(env, string, reinterpret_cast<const jchar*>(chars));
+ } else {
+ baseEnv(env)->ReleaseStringChars(env, string, reinterpret_cast<const jchar*>(chars));
+ }
+ }
+ JniValueType result;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
}
- private:
- static inline const JNINativeInterface* baseEnv(JNIEnv* env) {
- return reinterpret_cast<JNIEnvExt*>(env)->unchecked_functions;
+ static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
+ Primitive::Type type) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ JniValueType args[2] = {{.E = env}, {.z = length}};
+ if (sc.Check(soa, true, "Ez", args)) {
+ JniValueType result;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ result.a = baseEnv(env)->NewBooleanArray(env, length);
+ break;
+ case Primitive::kPrimByte:
+ result.a = baseEnv(env)->NewByteArray(env, length);
+ break;
+ case Primitive::kPrimChar:
+ result.a = baseEnv(env)->NewCharArray(env, length);
+ break;
+ case Primitive::kPrimShort:
+ result.a = baseEnv(env)->NewShortArray(env, length);
+ break;
+ case Primitive::kPrimInt:
+ result.a = baseEnv(env)->NewIntArray(env, length);
+ break;
+ case Primitive::kPrimLong:
+ result.a = baseEnv(env)->NewLongArray(env, length);
+ break;
+ case Primitive::kPrimFloat:
+ result.a = baseEnv(env)->NewFloatArray(env, length);
+ break;
+ case Primitive::kPrimDouble:
+ result.a = baseEnv(env)->NewDoubleArray(env, length);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ if (sc.Check(soa, false, "a", &result)) {
+ return result.a;
+ }
+ }
+ return nullptr;
+ }
+
+ static void* GetPrimitiveArrayElements(const char* function_name, Primitive::Type type,
+ JNIEnv* env, jarray array, jboolean* is_copy) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+ if (sc.Check(soa, true, "Eap", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ JniValueType result;
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ result.p = baseEnv(env)->GetBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimByte:
+ result.p = baseEnv(env)->GetByteArrayElements(env, down_cast<jbyteArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimChar:
+ result.p = baseEnv(env)->GetCharArrayElements(env, down_cast<jcharArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimShort:
+ result.p = baseEnv(env)->GetShortArrayElements(env, down_cast<jshortArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimInt:
+ result.p = baseEnv(env)->GetIntArrayElements(env, down_cast<jintArray>(array), is_copy);
+ break;
+ case Primitive::kPrimLong:
+ result.p = baseEnv(env)->GetLongArrayElements(env, down_cast<jlongArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimFloat:
+ result.p = baseEnv(env)->GetFloatArrayElements(env, down_cast<jfloatArray>(array),
+ is_copy);
+ break;
+ case Primitive::kPrimDouble:
+ result.p = baseEnv(env)->GetDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+ is_copy);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ if (result.p != nullptr && soa.ForceCopy()) {
+ result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+ if (is_copy != nullptr) {
+ *is_copy = JNI_TRUE;
+ }
+ }
+ if (sc.Check(soa, false, "p", &result)) {
+ return const_cast<void*>(result.p);
+ }
+ }
+ return nullptr;
+ }
+
+ static void ReleasePrimitiveArrayElements(const char* function_name, Primitive::Type type,
+ JNIEnv* env, jarray array, void* elems, jint mode) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_ExcepOkay, function_name);
+ if (sc.CheckNonNull(elems) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ if (soa.ForceCopy()) {
+ elems = GuardedCopy::ReleaseGuardedPACopy(function_name, env, array, elems, mode);
+ }
+ if (!soa.ForceCopy() || elems != nullptr) {
+ JniValueType args[4] = {{.E = env}, {.a = array}, {.p = elems}, {.r = mode}};
+ if (sc.Check(soa, true, "Eapr", args)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->ReleaseBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+ reinterpret_cast<jboolean*>(elems), mode);
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->ReleaseByteArrayElements(env, down_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elems), mode);
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->ReleaseCharArrayElements(env, down_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elems), mode);
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->ReleaseShortArrayElements(env, down_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elems), mode);
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->ReleaseIntArrayElements(env, down_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elems), mode);
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->ReleaseLongArrayElements(env, down_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elems), mode);
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->ReleaseFloatArrayElements(env, down_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elems), mode);
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->ReleaseDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elems), mode);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+ }
+ }
+
+ static void GetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+ jarray array, jsize start, jsize len, void* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->GetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+ reinterpret_cast<jboolean*>(buf));
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->GetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+ reinterpret_cast<jbyte*>(buf));
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->GetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+ reinterpret_cast<jchar*>(buf));
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->GetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+ reinterpret_cast<jshort*>(buf));
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->GetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+ reinterpret_cast<jint*>(buf));
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->GetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+ reinterpret_cast<jlong*>(buf));
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->GetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+ reinterpret_cast<jfloat*>(buf));
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->GetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+ reinterpret_cast<jdouble*>(buf));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
+ }
+
+ static void SetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+ jarray array, jsize start, jsize len, const void* buf) {
+ ScopedObjectAccess soa(env);
+ ScopedCheck sc(kFlag_Default, function_name);
+ JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+ // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+ // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+ if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+ switch (type) {
+ case Primitive::kPrimBoolean:
+ baseEnv(env)->SetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+ reinterpret_cast<const jboolean*>(buf));
+ break;
+ case Primitive::kPrimByte:
+ baseEnv(env)->SetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+ reinterpret_cast<const jbyte*>(buf));
+ break;
+ case Primitive::kPrimChar:
+ baseEnv(env)->SetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+ reinterpret_cast<const jchar*>(buf));
+ break;
+ case Primitive::kPrimShort:
+ baseEnv(env)->SetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+ reinterpret_cast<const jshort*>(buf));
+ break;
+ case Primitive::kPrimInt:
+ baseEnv(env)->SetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+ reinterpret_cast<const jint*>(buf));
+ break;
+ case Primitive::kPrimLong:
+ baseEnv(env)->SetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+ reinterpret_cast<const jlong*>(buf));
+ break;
+ case Primitive::kPrimFloat:
+ baseEnv(env)->SetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+ reinterpret_cast<const jfloat*>(buf));
+ break;
+ case Primitive::kPrimDouble:
+ baseEnv(env)->SetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+ reinterpret_cast<const jdouble*>(buf));
+ break;
+ default:
+ LOG(FATAL) << "Unexpected primitive type: " << type;
+ }
+ JniValueType result;
+ result.V = nullptr;
+ sc.Check(soa, false, "V", &result);
+ }
}
};
@@ -2023,38 +3584,58 @@ const JNINativeInterface* GetCheckJniNativeInterface() {
class CheckJII {
public:
static jint DestroyJavaVM(JavaVM* vm) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "v", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->DestroyJavaVM(vm));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__, false);
+ JniValueType args[1] = {{.v = vm}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->DestroyJavaVM(vm);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "vpp", vm, p_env, thr_args);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- ScopedCheck sc(vm, false, __FUNCTION__);
- sc.Check(true, "vpp", vm, p_env, thr_args);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
static jint DetachCurrentThread(JavaVM* vm) {
- ScopedCheck sc(vm, true, __FUNCTION__);
- sc.Check(true, "v", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->DetachCurrentThread(vm));
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[1] = {{.v = vm}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->DetachCurrentThread(vm);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
- static jint GetEnv(JavaVM* vm, void** env, jint version) {
- ScopedCheck sc(vm, true, __FUNCTION__);
- sc.Check(true, "vpI", vm);
- return CHECK_JNI_EXIT("I", BaseVm(vm)->GetEnv(vm, env, version));
+ static jint GetEnv(JavaVM* vm, void** p_env, jint version) {
+ ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+ JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.I = version}};
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpI", args);
+ JniValueType result;
+ result.i = BaseVm(vm)->GetEnv(vm, p_env, version);
+ sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ return result.i;
}
private:
- static inline const JNIInvokeInterface* BaseVm(JavaVM* vm) {
- return reinterpret_cast<JavaVMExt*>(vm)->unchecked_functions;
+ static const JNIInvokeInterface* BaseVm(JavaVM* vm) {
+ return reinterpret_cast<JavaVMExt*>(vm)->GetUncheckedFunctions();
}
};
diff --git a/runtime/check_jni.h b/runtime/check_jni.h
new file mode 100644
index 0000000000..f41abf81ce
--- /dev/null
+++ b/runtime/check_jni.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHECK_JNI_H_
+#define ART_RUNTIME_CHECK_JNI_H_
+
+#include <jni.h>
+
+namespace art {
+
+const JNINativeInterface* GetCheckJniNativeInterface();
+const JNIInvokeInterface* GetCheckJniInvokeInterface();
+
+} // namespace art
+
+#endif // ART_RUNTIME_CHECK_JNI_H_
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 25eb3a342d..9921bddad8 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_CLASS_LINKER_INL_H_
#include "class_linker.h"
+#include "gc_root-inl.h"
#include "gc/heap-inl.h"
#include "mirror/art_field.h"
#include "mirror/class_loader.h"
@@ -40,8 +41,7 @@ inline mirror::Class* ClassLinker::FindSystemClass(Thread* self, const char* des
inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class** element_class) {
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
// Read the cached array class once to avoid races with other threads setting it.
- mirror::Class* array_class = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &find_array_class_cache_[i]);
+ mirror::Class* array_class = find_array_class_cache_[i].Read();
if (array_class != nullptr && array_class->GetComponentType() == *element_class) {
return array_class;
}
@@ -54,7 +54,7 @@ inline mirror::Class* ClassLinker::FindArrayClass(Thread* self, mirror::Class**
mirror::Class* array_class = FindClass(self, descriptor.c_str(), class_loader);
// Benign races in storing array class and incrementing index.
size_t victim_index = find_array_class_cache_next_victim_;
- find_array_class_cache_[victim_index] = array_class;
+ find_array_class_cache_[victim_index] = GcRoot<mirror::Class>(array_class);
find_array_class_cache_next_victim_ = (victim_index + 1) % kFindArrayCacheSize;
return array_class;
}
@@ -77,7 +77,7 @@ inline mirror::String* ClassLinker::ResolveString(uint32_t string_idx,
inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
mirror::ArtMethod* referrer) {
- mirror::Class* resolved_type = referrer->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* resolved_type = referrer->GetDexCacheResolvedType(type_idx);
if (UNLIKELY(resolved_type == nullptr)) {
mirror::Class* declaring_class = referrer->GetDeclaringClass();
StackHandleScope<2> hs(Thread::Current());
@@ -85,9 +85,8 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx,
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (resolved_type != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedType(type_idx), resolved_type);
- }
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type;
}
@@ -102,9 +101,8 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtFie
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_type = ResolveType(dex_file, type_idx, dex_cache, class_loader);
- if (resolved_type != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedType(type_idx), resolved_type);
- }
+ // Note: We cannot check here to see whether we added the type to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_type;
}
@@ -112,8 +110,7 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtFie
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
mirror::ArtMethod* referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method =
- referrer->GetDexCacheResolvedMethods()->Get(method_idx);
+ mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
}
@@ -135,9 +132,8 @@ inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t meth
const DexFile* dex_file = h_dex_cache->GetDexFile();
resolved_method = ResolveMethod(*dex_file, method_idx, h_dex_cache, h_class_loader, h_referrer,
type);
- if (resolved_method != nullptr) {
- DCHECK_EQ(h_dex_cache->GetResolvedMethod(method_idx), resolved_method);
- }
+ // Note: We cannot check here to see whether we added the method to the cache. It
+ // might be an erroneous class, which results in it being hidden from us.
return resolved_method;
}
@@ -156,9 +152,8 @@ inline mirror::ArtField* ClassLinker::ResolveField(uint32_t field_idx, mirror::A
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
const DexFile& dex_file = *dex_cache->GetDexFile();
resolved_field = ResolveField(dex_file, field_idx, dex_cache, class_loader, is_static);
- if (resolved_field != nullptr) {
- DCHECK_EQ(dex_cache->GetResolvedField(field_idx), resolved_field);
- }
+ // Note: We cannot check here to see whether we added the field to the cache. The type
+ // might be an erroneous class, which results in it being hidden from us.
}
return resolved_field;
}
@@ -204,10 +199,8 @@ inline mirror::ObjectArray<mirror::ArtField>* ClassLinker::AllocArtFieldArray(Th
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(class_roots_ != NULL);
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ DCHECK(!class_roots_.IsNull());
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
DCHECK(klass != NULL);
return klass;
@@ -216,7 +209,7 @@ inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
inline mirror::DexCache* ClassLinker::GetDexCache(size_t idx) {
dex_lock_.AssertSharedHeld(Thread::Current());
DCHECK(idx < dex_caches_.size());
- return ReadBarrier::BarrierForRoot<mirror::DexCache, kWithReadBarrier>(&dex_caches_[idx]);
+ return dex_caches_[idx].Read();
}
} // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 2c11f8b89c..3b4976f2a0 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -16,9 +16,6 @@
#include "class_linker.h"
-#include <fcntl.h>
-#include <sys/file.h>
-#include <sys/stat.h>
#include <deque>
#include <memory>
#include <string>
@@ -34,6 +31,7 @@
#include "compiler_callbacks.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/accounting/heap_bitmap.h"
#include "gc/heap.h"
@@ -267,9 +265,10 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
java_lang_ref_Reference->SetStatus(mirror::Class::kStatusResolved, self);
// Create storage for root classes, save away our work so far (requires descriptors).
- class_roots_ = mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
- kClassRootsMax);
- CHECK(class_roots_ != NULL);
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class> >(
+ mirror::ObjectArray<mirror::Class>::Alloc(self, object_array_class.Get(),
+ kClassRootsMax));
+ CHECK(!class_roots_.IsNull());
SetClassRoot(kJavaLangClass, java_lang_Class.Get());
SetClassRoot(kJavaLangObject, java_lang_Object.Get());
SetClassRoot(kClassArrayClass, class_array_class.Get());
@@ -289,7 +288,7 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
SetClassRoot(kPrimitiveVoid, CreatePrimitiveClass(self, Primitive::kPrimVoid));
// Create array interface entries to populate once we can load system classes.
- array_iftable_ = AllocIfTable(self, 2);
+ array_iftable_ = GcRoot<mirror::IfTable>(AllocIfTable(self, 2));
// Create int array type for AllocDexCache (done in AppendToBootClassPath).
Handle<mirror::Class> int_array_class(hs.NewHandle(
@@ -428,8 +427,7 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class
// We assume that Cloneable/Serializable don't have superinterfaces -- normally we'd have to
// crawl up and explicitly list all of the supers as well.
{
- mirror::IfTable* array_iftable =
- ReadBarrier::BarrierForRoot<mirror::IfTable, kWithReadBarrier>(&array_iftable_);
+ mirror::IfTable* array_iftable = array_iftable_.Read();
array_iftable->SetInterface(0, java_lang_Cloneable);
array_iftable->SetInterface(1, java_io_Serializable);
}
@@ -559,7 +557,7 @@ void ClassLinker::FinishInit(Thread* self) {
// if possible add new checks there to catch errors early
}
- CHECK(array_iftable_ != NULL);
+ CHECK(!array_iftable_.IsNull());
// disable the slow paths in FindClass and CreatePrimitiveClass now
// that Object, Class, and Object[] are setup
@@ -614,6 +612,14 @@ bool ClassLinker::GenerateOatFile(const char* dex_filename,
argv.push_back("--compiler-filter=verify-none");
}
+ if (Runtime::Current()->MustRelocateIfPossible()) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xrelocate");
+ } else {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xnorelocate");
+ }
+
if (!kIsTargetBuild) {
argv.push_back("--host");
}
@@ -679,14 +685,6 @@ const OatFile* ClassLinker::FindOpenedOatFile(const char* oat_location, const ch
return NULL;
}
-static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location) {
- if (number == 0) {
- return dex_location;
- } else {
- return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
- }
-}
-
static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, const char* dex_location,
bool generated,
std::vector<std::string>* error_msgs,
@@ -699,7 +697,7 @@ static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, const char* de
bool success = true;
for (size_t i = 0; success; ++i) {
- std::string next_name_str = GetMultiDexClassesDexName(i, dex_location);
+ std::string next_name_str = DexFile::GetMultiDexClassesDexName(i, dex_location);
const char* next_name = next_name_str.c_str();
uint32_t dex_location_checksum;
@@ -814,6 +812,7 @@ bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_
return false;
}
+ // TODO Caller specifically asks for this oat_location. We should honor it. Probably?
open_oat_file.reset(FindOatFileInOatLocationForDexFile(dex_location, dex_location_checksum,
oat_location, &error_msg));
@@ -833,8 +832,6 @@ bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_
// There's no point in going forward and eventually try to regenerate the
// file if we couldn't remove the obsolete one. Mostly likely we will fail
// with the same error when trying to write the new file.
- // In case the clean up failure is due to permission issues it's *mandatory*
- // to stop to avoid regenerating under the wrong user.
// TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS).
if (obsolete_file_cleanup_failed) {
return false;
@@ -940,6 +937,13 @@ const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_l
actual_image_oat_offset);
return nullptr;
}
+ int32_t expected_patch_delta = image_header.GetPatchDelta();
+ int32_t actual_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
+ if (expected_patch_delta != actual_patch_delta) {
+ *error_msg = StringPrintf("Failed to find oat file at '%s' with expected patch delta %d, "
+ " found %d", oat_location, expected_patch_delta, actual_patch_delta);
+ return nullptr;
+ }
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
&dex_location_checksum);
if (oat_dex_file == nullptr) {
@@ -987,11 +991,25 @@ const OatFile* ClassLinker::CreateOatFileForDexLocation(const char* dex_location
return oat_file.release();
}
-bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- const InstructionSet instruction_set,
- std::string* error_msg) {
+bool ClassLinker::VerifyOatImageChecksum(const OatFile* oat_file,
+ const InstructionSet instruction_set) {
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ uint32_t image_oat_checksum = 0;
+ if (instruction_set == kRuntimeISA) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ image_oat_checksum = image_header.GetOatChecksum();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), instruction_set));
+ image_oat_checksum = image_header->GetOatChecksum();
+ }
+ return oat_file->GetOatHeader().GetImageFileLocationOatChecksum() == image_oat_checksum;
+}
+
+bool ClassLinker::VerifyOatChecksums(const OatFile* oat_file,
+ const InstructionSet instruction_set,
+ std::string* error_msg) {
Runtime* runtime = Runtime::Current();
const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
@@ -1000,19 +1018,42 @@ bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
// image header from the image for the right instruction set.
uint32_t image_oat_checksum = 0;
uintptr_t image_oat_data_begin = 0;
- if (instruction_set == kRuntimeISA) {
+ int32_t image_patch_delta = 0;
+ if (instruction_set == Runtime::Current()->GetInstructionSet()) {
const ImageHeader& image_header = image_space->GetImageHeader();
image_oat_checksum = image_header.GetOatChecksum();
image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ image_patch_delta = image_header.GetPatchDelta();
} else {
std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
image_space->GetImageLocation().c_str(), instruction_set));
image_oat_checksum = image_header->GetOatChecksum();
image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ image_patch_delta = image_header->GetPatchDelta();
}
const OatHeader& oat_header = oat_file->GetOatHeader();
- bool image_check = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ bool ret = ((oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum)
+ && (oat_header.GetImagePatchDelta() == image_patch_delta)
+ && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin));
+ if (!ret) {
+ *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
+ oat_file->GetLocation().c_str(),
+ oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
+ oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
+ oat_file->GetOatHeader().GetImagePatchDelta(),
+ image_oat_checksum, image_oat_data_begin, image_patch_delta);
+ }
+ return ret;
+}
+
+bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ const InstructionSet instruction_set,
+ std::string* error_msg) {
+ if (!VerifyOatChecksums(oat_file, instruction_set, error_msg)) {
+ return false;
+ }
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
&dex_location_checksum);
@@ -1028,39 +1069,22 @@ bool ClassLinker::VerifyOatFileChecksums(const OatFile* oat_file,
}
return false;
}
- bool dex_check = dex_location_checksum == oat_dex_file->GetDexFileLocationChecksum();
-
- if (image_check && dex_check) {
- return true;
- }
- if (!image_check) {
- ScopedObjectAccess soa(Thread::Current());
- *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d) with (0x%x, %" PRIdPTR ")",
- oat_file->GetLocation().c_str(),
- oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
- oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
- image_oat_checksum, image_oat_data_begin);
- }
- if (!dex_check) {
+ if (dex_location_checksum != oat_dex_file->GetDexFileLocationChecksum()) {
*error_msg = StringPrintf("oat file '%s' mismatch (0x%x) with '%s' (0x%x)",
oat_file->GetLocation().c_str(),
oat_dex_file->GetDexFileLocationChecksum(),
dex_location, dex_location_checksum);
+ return false;
}
- return false;
+ return true;
}
-const OatFile* ClassLinker::LoadOatFileAndVerifyDexFile(const std::string& oat_file_location,
- const char* dex_location,
- std::string* error_msg,
- bool* open_failed) {
- std::unique_ptr<const OatFile> oat_file(FindOatFileFromOatLocation(oat_file_location, error_msg));
- if (oat_file.get() == nullptr) {
- *open_failed = true;
- return nullptr;
- }
- *open_failed = false;
+bool ClassLinker::VerifyOatWithDexFile(const OatFile* oat_file,
+ const char* dex_location,
+ std::string* error_msg) {
+ CHECK(oat_file != nullptr);
+ CHECK(dex_location != nullptr);
std::unique_ptr<const DexFile> dex_file;
uint32_t dex_location_checksum;
if (!DexFile::GetChecksum(dex_location, &dex_location_checksum, error_msg)) {
@@ -1070,26 +1094,21 @@ const OatFile* ClassLinker::LoadOatFileAndVerifyDexFile(const std::string& oat_f
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, NULL);
if (oat_dex_file == nullptr) {
*error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
- "dex file '%s': %s", oat_file_location.c_str(), dex_location,
+ "dex file '%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg->c_str());
- return nullptr;
+ return false;
}
dex_file.reset(oat_dex_file->OpenDexFile(error_msg));
} else {
- bool verified = VerifyOatFileChecksums(oat_file.get(), dex_location, dex_location_checksum,
- kRuntimeISA, error_msg);
+ bool verified = VerifyOatAndDexFileChecksums(oat_file, dex_location, dex_location_checksum,
+ kRuntimeISA, error_msg);
if (!verified) {
- return nullptr;
+ return false;
}
dex_file.reset(oat_file->GetOatDexFile(dex_location,
&dex_location_checksum)->OpenDexFile(error_msg));
}
-
- if (dex_file.get() != nullptr) {
- return oat_file.release();
- } else {
- return nullptr;
- }
+ return dex_file.get() != nullptr;
}
const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
@@ -1099,51 +1118,25 @@ const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
std::vector<std::string>* error_msgs,
bool* obsolete_file_cleanup_failed) {
*obsolete_file_cleanup_failed = false;
- // Look for an existing file next to dex. for example, for
- // /foo/bar/baz.jar, look for /foo/bar/<isa>/baz.odex.
- std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
- bool open_failed;
+ bool already_opened = false;
+ std::string dex_location_str(dex_location);
+ std::unique_ptr<const OatFile> oat_file(OpenOatFileFromDexLocation(dex_location_str, isa,
+ &already_opened,
+ obsolete_file_cleanup_failed,
+ error_msgs));
std::string error_msg;
- const OatFile* oat_file = LoadOatFileAndVerifyDexFile(odex_filename, dex_location, &error_msg,
- &open_failed);
- if (oat_file != nullptr) {
- return oat_file;
- }
- if (dex_location_checksum == nullptr) {
- error_msgs->push_back(StringPrintf("Failed to open oat file from %s and no classes.dex found in"
- "%s: %s", odex_filename.c_str(), dex_location,
+ if (oat_file.get() == nullptr) {
+ error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
+ dex_location));
+ return nullptr;
+ } else if (!VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
+ error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
+ "'%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg.c_str()));
return nullptr;
+ } else {
+ return oat_file.release();
}
-
- std::string cache_error_msg;
- const std::string dalvik_cache(GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA)));
- std::string cache_location(GetDalvikCacheFilenameOrDie(dex_location,
- dalvik_cache.c_str()));
- oat_file = LoadOatFileAndVerifyDexFile(cache_location, dex_location, &cache_error_msg,
- &open_failed);
- if (oat_file != nullptr) {
- return oat_file;
- }
-
- if (!open_failed && TEMP_FAILURE_RETRY(unlink(cache_location.c_str())) != 0) {
- std::string error_msg = StringPrintf("Failed to remove obsolete file from %s when searching"
- "for dex file %s: %s",
- cache_location.c_str(), dex_location, strerror(errno));
- error_msgs->push_back(error_msg);
- VLOG(class_linker) << error_msg;
- // Let the caller know that we couldn't remove the obsolete file.
- // This is a good indication that further writes may fail as well.
- *obsolete_file_cleanup_failed = true;
- }
-
- std::string compound_msg = StringPrintf("Failed to open oat file from %s (error '%s') or %s "
- "(error '%s').", odex_filename.c_str(), error_msg.c_str(),
- cache_location.c_str(), cache_error_msg.c_str());
- VLOG(class_linker) << compound_msg;
- error_msgs->push_back(compound_msg);
-
- return nullptr;
}
const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
@@ -1158,6 +1151,277 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string&
return nullptr;
}
+const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_location,
+ InstructionSet isa,
+ bool *already_opened,
+ bool *obsolete_file_cleanup_failed,
+ std::vector<std::string>* error_msgs) {
+ // Find out if we've already opened the file
+ const OatFile* ret = nullptr;
+ std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
+ ret = FindOpenedOatFileFromOatLocation(odex_filename);
+ if (ret != nullptr) {
+ *already_opened = true;
+ return ret;
+ }
+
+ std::string dalvik_cache;
+ bool have_android_data = false;
+ bool have_dalvik_cache = false;
+ GetDalvikCache(GetInstructionSetString(kRuntimeISA), false, &dalvik_cache,
+ &have_android_data, &have_dalvik_cache);
+ std::string cache_filename;
+ if (have_dalvik_cache) {
+ cache_filename = GetDalvikCacheFilenameOrDie(dex_location.c_str(), dalvik_cache.c_str());
+ ret = FindOpenedOatFileFromOatLocation(cache_filename);
+ if (ret != nullptr) {
+ *already_opened = true;
+ return ret;
+ }
+ } else {
+ // If we need to relocate we should just place odex back where it started.
+ cache_filename = odex_filename;
+ }
+
+ ret = nullptr;
+
+ // We know that neither the odex nor the cache'd version is already in use, if it even exists.
+ //
+ // Now we do the following:
+ // 1) Try and open the odex version
+ // 2) If present, checksum-verified & relocated correctly return it
+ // 3) Close the odex version to free up its address space.
+ // 4) Try and open the cache version
+ // 5) If present, checksum-verified & relocated correctly return it
+ // 6) Close the cache version to free up its address space.
+ // 7) If we should relocate:
+ // a) If we have opened and checksum-verified the odex version relocate it to
+ // 'cache_filename' and return it
+ // b) If we have opened and checksum-verified the cache version relocate it in place and return
+ // it. This should not happen often (I think only the run-test's will hit this case).
+ // 8) If the cache-version was present we should delete it since it must be obsolete if we get to
+ // this point.
+ // 9) Return nullptr
+
+ *already_opened = false;
+ const Runtime* runtime = Runtime::Current();
+ CHECK(runtime != nullptr);
+ bool executable = !runtime->IsCompiler();
+
+ std::string odex_error_msg;
+ bool should_patch_system = false;
+ bool odex_checksum_verified = false;
+ {
+ // There is a high probability that these both these oat files map similar/the same address
+ // spaces so we must scope them like this so they each gets its turn.
+ std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, NULL,
+ executable, &odex_error_msg));
+ if (odex_oat_file.get() != nullptr && CheckOatFile(odex_oat_file.get(), isa,
+ &odex_checksum_verified,
+ &odex_error_msg)) {
+ error_msgs->push_back(odex_error_msg);
+ return odex_oat_file.release();
+ } else if (odex_checksum_verified) {
+ // We can just relocate
+ should_patch_system = true;
+ odex_error_msg = "Image Patches are incorrect";
+ }
+ }
+
+
+ std::string cache_error_msg;
+ bool should_patch_cache = false;
+ bool cache_checksum_verified = false;
+ if (have_dalvik_cache) {
+ std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, NULL,
+ executable, &cache_error_msg));
+ if (cache_oat_file.get() != nullptr && CheckOatFile(cache_oat_file.get(), isa,
+ &cache_checksum_verified,
+ &cache_error_msg)) {
+ error_msgs->push_back(cache_error_msg);
+ return cache_oat_file.release();
+ } else if (cache_checksum_verified) {
+ // We can just relocate
+ should_patch_cache = true;
+ cache_error_msg = "Image Patches are incorrect";
+ }
+ } else if (have_android_data) {
+ // dalvik_cache does not exist but android data does. This means we should be able to create
+ // it, so we should try.
+ GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA), true);
+ }
+
+ ret = nullptr;
+ std::string error_msg;
+ if (runtime->CanRelocate()) {
+ // Run relocation
+ const std::string& image_location =
+ Runtime::Current()->GetHeap()->GetImageSpace()->GetImageLocation();
+ if (odex_checksum_verified && should_patch_system) {
+ ret = PatchAndRetrieveOat(odex_filename, cache_filename, image_location, isa, &error_msg);
+ } else if (cache_checksum_verified && should_patch_cache) {
+ CHECK(have_dalvik_cache);
+ ret = PatchAndRetrieveOat(cache_filename, cache_filename, image_location, isa, &error_msg);
+ }
+ }
+ if (ret == nullptr && have_dalvik_cache && OS::FileExists(cache_filename.c_str())) {
+ // implicitly: were able to fine where the cached version is but we were unable to use it,
+ // either as a destination for relocation or to open a file. We should delete it if it is
+ // there.
+ if (TEMP_FAILURE_RETRY(unlink(cache_filename.c_str())) != 0) {
+ std::string rm_error_msg = StringPrintf("Failed to remove obsolete file from %s when "
+ "searching for dex file %s: %s",
+ cache_filename.c_str(), dex_location.c_str(),
+ strerror(errno));
+ error_msgs->push_back(rm_error_msg);
+ VLOG(class_linker) << rm_error_msg;
+ // Let the caller know that we couldn't remove the obsolete file.
+ // This is a good indication that further writes may fail as well.
+ *obsolete_file_cleanup_failed = true;
+ }
+ }
+ if (ret == nullptr) {
+ VLOG(class_linker) << error_msg;
+ error_msgs->push_back(error_msg);
+ std::string relocation_msg;
+ if (runtime->CanRelocate()) {
+ relocation_msg = StringPrintf(" and relocation failed");
+ }
+ if (have_dalvik_cache && cache_checksum_verified) {
+ error_msg = StringPrintf("Failed to open oat file from %s (error %s) or %s "
+ "(error %s)%s.", odex_filename.c_str(), odex_error_msg.c_str(),
+ cache_filename.c_str(), cache_error_msg.c_str(),
+ relocation_msg.c_str());
+ } else {
+ error_msg = StringPrintf("Failed to open oat file from %s (error %s) (no "
+ "dalvik_cache availible)%s.", odex_filename.c_str(),
+ odex_error_msg.c_str(), relocation_msg.c_str());
+ }
+ VLOG(class_linker) << error_msg;
+ error_msgs->push_back(error_msg);
+ }
+ return ret;
+}
+
+const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
+ const std::string& output_oat,
+ const std::string& image_location,
+ InstructionSet isa,
+ std::string* error_msg) {
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
+ std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
+
+ std::string isa_arg("--instruction-set=");
+ isa_arg += GetInstructionSetString(isa);
+ std::string input_oat_filename_arg("--input-oat-file=");
+ input_oat_filename_arg += input_oat;
+ std::string output_oat_filename_arg("--output-oat-file=");
+ output_oat_filename_arg += output_oat;
+ std::string patched_image_arg("--patched-image-location=");
+ patched_image_arg += image_location;
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+ argv.push_back(isa_arg);
+ argv.push_back(input_oat_filename_arg);
+ argv.push_back(output_oat_filename_arg);
+ argv.push_back(patched_image_arg);
+
+ std::string command_line(Join(argv, ' '));
+ LOG(INFO) << "Relocate Oat File: " << command_line;
+ bool success = Exec(argv, error_msg);
+ if (success) {
+ std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, NULL,
+ !Runtime::Current()->IsCompiler(), error_msg));
+ bool checksum_verified = false;
+ if (output.get() != nullptr && CheckOatFile(output.get(), isa, &checksum_verified, error_msg)) {
+ return output.release();
+ } else if (output.get() != nullptr) {
+ *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
+ "but output file '%s' failed verifcation: %s",
+ input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
+ } else {
+ *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
+ "but was unable to open output file '%s': %s",
+ input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
+ }
+ } else {
+ *error_msg = StringPrintf("Patching of oat file '%s to '%s' "
+ "failed: %s", input_oat.c_str(), output_oat.c_str(),
+ error_msg->c_str());
+ }
+ return nullptr;
+}
+
+int32_t ClassLinker::GetRequiredDelta(const OatFile* oat_file, InstructionSet isa) {
+ Runtime* runtime = Runtime::Current();
+ int32_t real_patch_delta;
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (isa == Runtime::Current()->GetInstructionSet()) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ real_patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), isa));
+ real_patch_delta = image_header->GetPatchDelta();
+ }
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+ return real_patch_delta - oat_header.GetImagePatchDelta();
+}
+
+bool ClassLinker::CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+ bool* checksum_verified,
+ std::string* error_msg) {
+ std::string compound_msg("Oat file failed to verify: ");
+ Runtime* runtime = Runtime::Current();
+ uint32_t real_image_checksum;
+ void* real_image_oat_offset;
+ int32_t real_patch_delta;
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (isa == Runtime::Current()->GetInstructionSet()) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ real_image_checksum = image_header.GetOatChecksum();
+ real_image_oat_offset = image_header.GetOatDataBegin();
+ real_patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
+ image_space->GetImageLocation().c_str(), isa));
+ real_image_checksum = image_header->GetOatChecksum();
+ real_image_oat_offset = image_header->GetOatDataBegin();
+ real_patch_delta = image_header->GetPatchDelta();
+ }
+
+ const OatHeader& oat_header = oat_file->GetOatHeader();
+
+ uint32_t oat_image_checksum = oat_header.GetImageFileLocationOatChecksum();
+ *checksum_verified = oat_image_checksum == real_image_checksum;
+ if (!*checksum_verified) {
+ compound_msg += StringPrintf(" Oat Image Checksum Incorrect (expected 0x%x, recieved 0x%x)",
+ real_image_checksum, oat_image_checksum);
+ }
+
+ void* oat_image_oat_offset =
+ reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
+ bool offset_verified = oat_image_oat_offset == real_image_oat_offset;
+ if (!offset_verified) {
+ compound_msg += StringPrintf(" Oat Image oat offset incorrect (expected 0x%p, recieved 0x%p)",
+ real_image_oat_offset, oat_image_oat_offset);
+ }
+
+ int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
+ bool patch_delta_verified = oat_patch_delta == real_patch_delta;
+ if (!patch_delta_verified) {
+ compound_msg += StringPrintf(" Oat image patch delta incorrect (expected 0x%x, recieved 0x%x)",
+ real_patch_delta, oat_patch_delta);
+ }
+
+ bool ret = (*checksum_verified && offset_verified && patch_delta_verified);
+ if (ret) {
+ *error_msg = compound_msg;
+ }
+ return ret;
+}
+
const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location,
std::string* error_msg) {
const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location);
@@ -1221,7 +1485,7 @@ void ClassLinker::InitFromImage() {
Handle<mirror::ObjectArray<mirror::Class>> class_roots(hs.NewHandle(
space->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)->
AsObjectArray<mirror::Class>()));
- class_roots_ = class_roots.Get();
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
// Special case of setting up the String class early so that we can test arbitrary objects
// as being Strings or not
@@ -1261,11 +1525,11 @@ void ClassLinker::InitFromImage() {
// reinit class_roots_
mirror::Class::SetClassClass(class_roots->Get(kJavaLangClass));
- class_roots_ = class_roots.Get();
+ class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(class_roots.Get());
// reinit array_iftable_ from any array class instance, they should be ==
- array_iftable_ = GetClassRoot(kObjectArrayClass)->GetIfTable();
- DCHECK(array_iftable_ == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+ array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
+ DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
// String class root was set above
mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -1288,22 +1552,23 @@ void ClassLinker::InitFromImage() {
void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- callback(reinterpret_cast<mirror::Object**>(&it.second), arg, 0, kRootStickyClass);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ it.second.VisitRoot(callback, arg, 0, kRootStickyClass);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& pair : new_class_roots_) {
- mirror::Object* old_ref = pair.second;
- callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootStickyClass);
- if (UNLIKELY(pair.second != old_ref)) {
+ mirror::Class* old_ref = pair.second.Read<kWithoutReadBarrier>();
+ pair.second.VisitRoot(callback, arg, 0, kRootStickyClass);
+ mirror::Class* new_ref = pair.second.Read<kWithoutReadBarrier>();
+ if (UNLIKELY(new_ref != old_ref)) {
// Uh ohes, GC moved a root in the log. Need to search the class_table and update the
// corresponding object. This is slow, but luckily for us, this may only happen with a
// concurrent moving GC.
for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
it != end && it->first == pair.first; ++it) {
// If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second) {
- it->second = pair.second;
+ if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
+ it->second = GcRoot<mirror::Class>(new_ref);
}
}
}
@@ -1325,17 +1590,17 @@ void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFl
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
void ClassLinker::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
- callback(reinterpret_cast<mirror::Object**>(&class_roots_), arg, 0, kRootVMInternal);
+ class_roots_.VisitRoot(callback, arg, 0, kRootVMInternal);
Thread* self = Thread::Current();
{
ReaderMutexLock mu(self, dex_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
- for (mirror::DexCache*& dex_cache : dex_caches_) {
- callback(reinterpret_cast<mirror::Object**>(&dex_cache), arg, 0, kRootVMInternal);
+ for (GcRoot<mirror::DexCache>& dex_cache : dex_caches_) {
+ dex_cache.VisitRoot(callback, arg, 0, kRootVMInternal);
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (size_t index : new_dex_cache_roots_) {
- callback(reinterpret_cast<mirror::Object**>(&dex_caches_[index]), arg, 0, kRootVMInternal);
+ dex_caches_[index].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
@@ -1348,12 +1613,11 @@ void ClassLinker::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f
}
}
VisitClassRoots(callback, arg, flags);
- callback(reinterpret_cast<mirror::Object**>(&array_iftable_), arg, 0, kRootVMInternal);
- DCHECK(array_iftable_ != nullptr);
+ array_iftable_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!array_iftable_.IsNull());
for (size_t i = 0; i < kFindArrayCacheSize; ++i) {
- if (find_array_class_cache_[i] != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&find_array_class_cache_[i]), arg, 0,
- kRootVMInternal);
+ if (!find_array_class_cache_[i].IsNull()) {
+ find_array_class_cache_[i].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
}
@@ -1363,9 +1627,8 @@ void ClassLinker::VisitClasses(ClassVisitor* visitor, void* arg) {
MoveImageClassesToClassTable();
}
WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- mirror::Class** root = &it.second;
- mirror::Class* c = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ mirror::Class* c = it.second.Read();
if (!visitor(c, arg)) {
return;
}
@@ -1660,23 +1923,26 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
// size when the class becomes resolved.
klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
}
- if (UNLIKELY(klass.Get() == NULL)) {
+ if (UNLIKELY(klass.Get() == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
- return NULL;
+ return nullptr;
}
klass->SetDexCache(FindDexCache(dex_file));
LoadClass(dex_file, dex_class_def, klass, class_loader.Get());
- // Check for a pending exception during load
+ ObjectLock<mirror::Class> lock(self, klass);
if (self->IsExceptionPending()) {
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
+ // An exception occured during load, set status to erroneous while holding klass' lock in case
+ // notification is necessary.
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
}
- ObjectLock<mirror::Class> lock(self, klass);
klass->SetClinitThreadId(self->GetTid());
// Add the newly loaded class to the loaded classes table.
mirror::Class* existing = InsertClass(descriptor, klass.Get(), Hash(descriptor));
- if (existing != NULL) {
+ if (existing != nullptr) {
// We failed to insert because we raced with another thread. Calling EnsureResolved may cause
// this thread to block.
return EnsureResolved(self, descriptor, existing);
@@ -1686,8 +1952,10 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
CHECK(!klass->IsLoaded());
if (!LoadSuperAndInterfaces(klass, dex_file)) {
// Loading failed.
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
}
CHECK(klass->IsLoaded());
// Link the class (if necessary)
@@ -1698,8 +1966,10 @@ mirror::Class* ClassLinker::DefineClass(const char* descriptor,
mirror::Class* new_class = nullptr;
if (!LinkClass(self, descriptor, klass, interfaces, &new_class)) {
// Linking failed.
- klass->SetStatus(mirror::Class::kStatusError, self);
- return NULL;
+ if (!klass->IsErroneous()) {
+ klass->SetStatus(mirror::Class::kStatusError, self);
+ }
+ return nullptr;
}
CHECK(new_class != nullptr) << descriptor;
CHECK(new_class->IsResolved()) << descriptor;
@@ -2284,7 +2554,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
<< dex_cache->GetLocation()->ToModifiedUtf8() << " " << dex_file.GetLocation();
- dex_caches_.push_back(dex_cache.Get());
+ dex_caches_.push_back(GcRoot<mirror::DexCache>(dex_cache.Get()));
dex_cache->SetDexFile(&dex_file);
if (log_new_dex_caches_roots_) {
// TODO: This is not safe if we can remove dex caches.
@@ -2401,7 +2671,14 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
Handle<mirror::Class> component_type(hs.NewHandle(FindClass(self, descriptor + 1, class_loader)));
if (component_type.Get() == nullptr) {
DCHECK(self->IsExceptionPending());
- return nullptr;
+ // We need to accept erroneous classes as component types.
+ component_type.Assign(LookupClass(descriptor + 1, class_loader.Get()));
+ if (component_type.Get() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ } else {
+ self->ClearException();
+ }
}
if (UNLIKELY(component_type->IsPrimitiveVoid())) {
ThrowNoClassDefFoundError("Attempt to create array of void primitive type");
@@ -2494,8 +2771,7 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto
// Use the single, global copies of "interfaces" and "iftable"
// (remember not to free them for arrays).
{
- mirror::IfTable* array_iftable =
- ReadBarrier::BarrierForRoot<mirror::IfTable, kWithReadBarrier>(&array_iftable_);
+ mirror::IfTable* array_iftable = array_iftable_.Read();
CHECK(array_iftable != nullptr);
new_class->SetIfTable(array_iftable);
}
@@ -2579,9 +2855,9 @@ mirror::Class* ClassLinker::InsertClass(const char* descriptor, mirror::Class* k
}
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
return NULL;
}
@@ -2603,8 +2879,8 @@ mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* k
for (auto it = class_table_.lower_bound(hash), end = class_table_.end(); it != end && it->first == hash;
++it) {
- mirror::Class* entry = it->second;
- if (entry == existing) {
+ mirror::Class* klass = it->second.Read();
+ if (klass == existing) {
class_table_.erase(it);
break;
}
@@ -2622,9 +2898,9 @@ mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* k
}
VerifyObject(klass);
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
return existing;
@@ -2636,8 +2912,7 @@ bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader*
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash;
++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
class_table_.erase(it);
return true;
@@ -2681,14 +2956,12 @@ mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
size_t hash) {
auto end = class_table_.end();
for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
if (kIsDebugBuild) {
// Check for duplicates in the table.
for (++it; it != end && it->first == hash; ++it) {
- mirror::Class** root2 = &it->second;
- mirror::Class* klass2 = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root2);
+ mirror::Class* klass2 = it->second.Read();
CHECK(!(klass2->GetClassLoader() == class_loader &&
klass2->DescriptorEquals(descriptor)))
<< PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
@@ -2732,9 +3005,9 @@ void ClassLinker::MoveImageClassesToClassTable() {
CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
<< PrettyClassAndClassLoader(klass);
} else {
- class_table_.insert(std::make_pair(hash, klass));
+ class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
if (log_new_class_table_roots_) {
- new_class_roots_.push_back(std::make_pair(hash, klass));
+ new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
}
}
}
@@ -2780,8 +3053,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
it != end && it->first == hash; ++it) {
- mirror::Class** root = &it->second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ mirror::Class* klass = it->second.Read();
if (klass->DescriptorEquals(descriptor)) {
result.push_back(klass);
}
@@ -3213,22 +3485,21 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
DCHECK(proxy_class->IsProxyClass());
DCHECK(proxy_method->IsProxyMethod());
// Locate the dex cache of the original interface/Object
- mirror::DexCache* dex_cache = NULL;
+ mirror::DexCache* dex_cache = nullptr;
{
- mirror::ObjectArray<mirror::Class>* resolved_types = proxy_method->GetDexCacheResolvedTypes();
ReaderMutexLock mu(Thread::Current(), dex_lock_);
for (size_t i = 0; i != dex_caches_.size(); ++i) {
mirror::DexCache* a_dex_cache = GetDexCache(i);
- if (a_dex_cache->GetResolvedTypes() == resolved_types) {
+ if (proxy_method->HasSameDexCacheResolvedTypes(a_dex_cache->GetResolvedTypes())) {
dex_cache = a_dex_cache;
break;
}
}
}
- CHECK(dex_cache != NULL);
+ CHECK(dex_cache != nullptr);
uint32_t method_idx = proxy_method->GetDexMethodIndex();
mirror::ArtMethod* resolved_method = dex_cache->GetResolvedMethod(method_idx);
- CHECK(resolved_method != NULL);
+ CHECK(resolved_method != nullptr);
return resolved_method;
}
@@ -3241,14 +3512,19 @@ mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
proxy_class->GetDirectMethods();
CHECK_EQ(proxy_direct_methods->GetLength(), 16);
mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2);
- // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
- // code_ too)
- mirror::ArtMethod* constructor =
- down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
- if (constructor == NULL) {
+ mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
+ if (constructor == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
- return NULL;
+ return nullptr;
}
+ // Make the proxy constructor's code always point to the uninstrumented code. This avoids
+ // getting a method enter event for the proxy constructor as the proxy constructor doesn't
+ // have an activation.
+ bool have_portable_code;
+ constructor->SetEntryPointFromQuickCompiledCode(GetQuickOatCodeFor(proxy_constructor));
+ constructor->SetEntryPointFromPortableCompiledCode(GetPortableOatCodeFor(proxy_constructor,
+ &have_portable_code));
+
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
constructor->SetDeclaringClass(klass.Get());
@@ -3303,8 +3579,8 @@ static void CheckProxyMethod(Handle<mirror::ArtMethod> method, Handle<mirror::Ar
// The proxy method doesn't have its own dex cache or dex file and so it steals those of its
// interface prototype. The exception to this are Constructors and the Class of the Proxy itself.
CHECK_EQ(prototype->GetDexCacheStrings(), method->GetDexCacheStrings());
- CHECK_EQ(prototype->GetDexCacheResolvedMethods(), method->GetDexCacheResolvedMethods());
- CHECK_EQ(prototype->GetDexCacheResolvedTypes(), method->GetDexCacheResolvedTypes());
+ CHECK(prototype->HasSameDexCacheResolvedMethods(method.Get()));
+ CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
MethodHelper mh(method);
@@ -3570,9 +3846,9 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
if (klass->HasSuperClass() &&
klass->GetClassLoader() != klass->GetSuperClass()->GetClassLoader()) {
- for (int i = klass->GetSuperClass()->GetVTable()->GetLength() - 1; i >= 0; --i) {
- mh.ChangeMethod(klass->GetVTable()->GetWithoutChecks(i));
- super_mh.ChangeMethod(klass->GetSuperClass()->GetVTable()->GetWithoutChecks(i));
+ for (int i = klass->GetSuperClass()->GetVTableLength() - 1; i >= 0; --i) {
+ mh.ChangeMethod(klass->GetVTableEntry(i));
+ super_mh.ChangeMethod(klass->GetSuperClass()->GetVTableEntry(i));
if (mh.GetMethod() != super_mh.GetMethod() &&
!mh.HasSameSignatureWithDifferentClassLoaders(&super_mh)) {
ThrowLinkageError(klass.Get(),
@@ -3730,10 +4006,6 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
// This will notify waiters on new_class that saw the not yet resolved
// class in the class_table_ during EnsureResolved.
new_class_h->SetStatus(mirror::Class::kStatusResolved, self);
-
- // Only embedded imt should be used from this point.
- new_class_h->SetImTable(NULL);
- // TODO: remove vtable and only use embedded vtable.
}
return true;
}
@@ -3866,17 +4138,31 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
if (klass->HasSuperClass()) {
uint32_t max_count = klass->NumVirtualMethods() +
- klass->GetSuperClass()->GetVTable()->GetLength();
- size_t actual_count = klass->GetSuperClass()->GetVTable()->GetLength();
+ klass->GetSuperClass()->GetVTableLength();
+ size_t actual_count = klass->GetSuperClass()->GetVTableLength();
CHECK_LE(actual_count, max_count);
- // TODO: do not assign to the vtable field until it is fully constructed.
StackHandleScope<3> hs(self);
- Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
- hs.NewHandle(klass->GetSuperClass()->GetVTable()->CopyOf(self, max_count)));
- if (UNLIKELY(vtable.Get() == NULL)) {
- CHECK(self->IsExceptionPending()); // OOME.
- return false;
+ Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable;
+ mirror::Class* super_class = klass->GetSuperClass();
+ if (super_class->ShouldHaveEmbeddedImtAndVTable()) {
+ vtable = hs.NewHandle(AllocArtMethodArray(self, max_count));
+ if (UNLIKELY(vtable.Get() == nullptr)) {
+ CHECK(self->IsExceptionPending()); // OOME.
+ return false;
+ }
+ int len = super_class->GetVTableLength();
+ for (int i = 0; i < len; i++) {
+ vtable->Set<false>(i, super_class->GetVTableEntry(i));
+ }
+ } else {
+ CHECK(super_class->GetVTable() != nullptr) << PrettyClass(super_class);
+ vtable = hs.NewHandle(super_class->GetVTable()->CopyOf(self, max_count));
+ if (UNLIKELY(vtable.Get() == nullptr)) {
+ CHECK(self->IsExceptionPending()); // OOME.
+ return false;
+ }
}
+
// See if any of our virtual methods override the superclass.
MethodHelper local_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
MethodHelper super_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
@@ -4662,7 +4948,7 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
bool is_static) {
DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
- if (resolved != NULL) {
+ if (resolved != nullptr) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
@@ -4670,9 +4956,9 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
StackHandleScope<1> hs(self);
Handle<mirror::Class> klass(
hs.NewHandle(ResolveType(dex_file, field_id.class_idx_, dex_cache, class_loader)));
- if (klass.Get() == NULL) {
+ if (klass.Get() == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
if (is_static) {
@@ -4681,7 +4967,7 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
resolved = klass->FindInstanceField(dex_cache.Get(), field_idx);
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
const char* name = dex_file.GetFieldName(field_id);
const char* type = dex_file.GetFieldTypeDescriptor(field_id);
if (is_static) {
@@ -4689,7 +4975,7 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
} else {
resolved = klass->FindInstanceField(name, type);
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
ThrowNoSuchFieldError(is_static ? "static " : "instance ", klass.Get(), type, name);
return NULL;
}
@@ -4704,7 +4990,7 @@ mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
Handle<mirror::ClassLoader> class_loader) {
DCHECK(dex_cache.Get() != nullptr);
mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
- if (resolved != NULL) {
+ if (resolved != nullptr) {
return resolved;
}
const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
@@ -4747,9 +5033,8 @@ void ClassLinker::DumpAllClasses(int flags) {
std::vector<mirror::Class*> all_classes;
{
ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- for (std::pair<const size_t, mirror::Class*>& it : class_table_) {
- mirror::Class** root = &it.second;
- mirror::Class* klass = ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(root);
+ for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
+ mirror::Class* klass = it.second.Read();
all_classes.push_back(klass);
}
}
@@ -4789,9 +5074,7 @@ void ClassLinker::SetClassRoot(ClassRoot class_root, mirror::Class* klass) {
DCHECK(klass != NULL);
DCHECK(klass->GetClassLoader() == NULL);
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != NULL);
DCHECK(class_roots->Get(class_root) == NULL);
class_roots->Set<false>(class_root, klass);
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 64bffc9bf3..8c0904203b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -24,11 +24,11 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "dex_file.h"
+#include "gc_root.h"
#include "gtest/gtest.h"
#include "jni.h"
#include "oat_file.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -245,9 +245,11 @@ class ClassLinker {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
- LOCKS_EXCLUDED(Locks::classlinker_classes_lock_);
+ LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
- LOCKS_EXCLUDED(dex_lock_);
+ LOCKS_EXCLUDED(dex_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(const DexFile& dex_file)
LOCKS_EXCLUDED(dex_lock_)
@@ -265,10 +267,6 @@ class ClassLinker {
std::string* error_msg)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- const OatFile* FindOatFileFromOatLocation(const std::string& location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
// Find or create the oat file holding dex_location. Then load all corresponding dex files
// (if multidex) into the given vector.
bool OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
@@ -276,12 +274,18 @@ class ClassLinker {
std::vector<const DexFile*>* dex_files)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+ // Returns true if the given oat file has the same image checksum as the image it is paired with.
+ static bool VerifyOatImageChecksum(const OatFile* oat_file, const InstructionSet instruction_set);
+ // Returns true if the oat file checksums match with the image and the offsets are such that it
+ // could be loaded with it.
+ static bool VerifyOatChecksums(const OatFile* oat_file, const InstructionSet instruction_set,
+ std::string* error_msg);
// Returns true if oat file contains the dex file with the given location and checksum.
- static bool VerifyOatFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- InstructionSet instruction_set,
- std::string* error_msg);
+ static bool VerifyOatAndDexFileChecksums(const OatFile* oat_file,
+ const char* dex_location,
+ uint32_t dex_location_checksum,
+ InstructionSet instruction_set,
+ std::string* error_msg);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
@@ -382,9 +386,7 @@ class ClassLinker {
mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ObjectArray<mirror::Class>* class_roots =
- ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::Class>, kWithReadBarrier>(
- &class_roots_);
+ mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != NULL);
return class_roots;
}
@@ -546,9 +548,31 @@ class ClassLinker {
const OatFile* FindOpenedOatFile(const char* oat_location, const char* dex_location,
const uint32_t* const dex_location_checksum)
LOCKS_EXCLUDED(dex_lock_);
+
+ // Will open the oat file directly without relocating, even if we could/should do relocation.
+ const OatFile* FindOatFileFromOatLocation(const std::string& oat_location,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(dex_lock_);
+
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
LOCKS_EXCLUDED(dex_lock_);
+ const OatFile* OpenOatFileFromDexLocation(const std::string& dex_location,
+ InstructionSet isa,
+ bool* already_opened,
+ bool* obsolete_file_cleanup_failed,
+ std::vector<std::string>* error_msg)
+ LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+
+ const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
+ const std::string& image_location, InstructionSet isa,
+ std::string* error_msg)
+ LOCKS_EXCLUDED(Locks::mutator_lock_);
+
+ bool CheckOatFile(const OatFile* oat_file, InstructionSet isa,
+ bool* checksum_verified, std::string* error_msg);
+ int32_t GetRequiredDelta(const OatFile* oat_file, InstructionSet isa);
+
// Note: will not register the oat file.
const OatFile* FindOatFileInOatLocationForDexFile(const char* dex_location,
uint32_t dex_location_checksum,
@@ -575,14 +599,10 @@ class ClassLinker {
bool* obsolete_file_cleanup_failed)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
- // Find a verify an oat file with the given dex file. Will return nullptr when the oat file
- // was not found or the dex file could not be verified.
- // Note: Does not register the oat file.
- const OatFile* LoadOatFileAndVerifyDexFile(const std::string& oat_file_location,
- const char* dex_location,
- std::string* error_msg,
- bool* open_failed)
- LOCKS_EXCLUDED(dex_lock_);
+ // verify an oat file with the given dex file. Will return false when the dex file could not be
+ // verified. Will return true otherwise.
+ bool VerifyOatWithDexFile(const OatFile* oat_file, const char* dex_location,
+ std::string* error_msg);
mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
mirror::Class* proxy_class)
@@ -595,18 +615,18 @@ class ClassLinker {
mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<size_t> new_dex_cache_roots_ GUARDED_BY(dex_lock_);;
- std::vector<mirror::DexCache*> dex_caches_ GUARDED_BY(dex_lock_);
+ std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
// multimap from a string hash code of a class descriptor to
// mirror::Class* instances. Results should be compared for a matching
// Class::descriptor_ and Class::class_loader_.
- typedef std::multimap<size_t, mirror::Class*> Table;
+ typedef std::multimap<size_t, GcRoot<mirror::Class>> Table;
// This contains strong roots. To enable concurrent root scanning of
// the class table, be careful to use a read barrier when accessing this.
Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
- std::vector<std::pair<size_t, mirror::Class*>> new_class_roots_;
+ std::vector<std::pair<size_t, GcRoot<mirror::Class>>> new_class_roots_;
// Do we need to search dex caches to find image classes?
bool dex_cache_image_class_lookup_required_;
@@ -635,7 +655,7 @@ class ClassLinker {
// retire a class, the version of the class in the table is returned and this may differ from
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
- __attribute__((warn_unused_result)) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ WARN_UNUSED SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -680,7 +700,7 @@ class ClassLinker {
kJavaLangStackTraceElementArrayClass,
kClassRootsMax,
};
- mirror::ObjectArray<mirror::Class>* class_roots_;
+ GcRoot<mirror::ObjectArray<mirror::Class>> class_roots_;
mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -696,12 +716,12 @@ class ClassLinker {
}
// The interface table used by all arrays.
- mirror::IfTable* array_iftable_;
+ GcRoot<mirror::IfTable> array_iftable_;
// A cache of the last FindArrayClass results. The cache serves to avoid creating array class
// descriptors for the sake of performing FindClass.
static constexpr size_t kFindArrayCacheSize = 16;
- mirror::Class* find_array_class_cache_[kFindArrayCacheSize];
+ GcRoot<mirror::Class> find_array_class_cache_[kFindArrayCacheSize];
size_t find_array_class_cache_next_victim_;
bool init_done_;
@@ -720,6 +740,8 @@ class ClassLinker {
const void* quick_to_interpreter_bridge_trampoline_;
friend class ImageWriter; // for GetClassRoots
+ friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
+ friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 21fe0067ed..8d9326583d 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -91,7 +91,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, primitive->NumInstanceFields());
EXPECT_EQ(0U, primitive->NumStaticFields());
EXPECT_EQ(0U, primitive->NumDirectInterfaces());
- EXPECT_TRUE(primitive->GetVTable() == NULL);
+ EXPECT_FALSE(primitive->HasVTable());
EXPECT_EQ(0, primitive->GetIfTableCount());
EXPECT_TRUE(primitive->GetIfTable() == NULL);
EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
@@ -143,7 +143,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_EQ(0U, array->NumInstanceFields());
EXPECT_EQ(0U, array->NumStaticFields());
EXPECT_EQ(2U, array->NumDirectInterfaces());
- EXPECT_TRUE(array->GetVTable() != NULL);
+ EXPECT_TRUE(array->ShouldHaveEmbeddedImtAndVTable());
EXPECT_EQ(2, array->GetIfTableCount());
ASSERT_TRUE(array->GetIfTable() != NULL);
mirror::Class* direct_interface0 = mirror::Class::GetDirectInterface(self, array, 0);
@@ -156,20 +156,20 @@ class ClassLinkerTest : public CommonRuntimeTest {
}
void AssertMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- EXPECT_TRUE(method != NULL);
- EXPECT_TRUE(method->GetClass() != NULL);
- EXPECT_TRUE(method->GetName() != NULL);
+ EXPECT_TRUE(method != nullptr);
+ EXPECT_TRUE(method->GetClass() != nullptr);
+ EXPECT_TRUE(method->GetName() != nullptr);
EXPECT_TRUE(method->GetSignature() != Signature::NoSignature());
- EXPECT_TRUE(method->GetDexCacheStrings() != NULL);
- EXPECT_TRUE(method->GetDexCacheResolvedMethods() != NULL);
- EXPECT_TRUE(method->GetDexCacheResolvedTypes() != NULL);
+ EXPECT_TRUE(method->GetDexCacheStrings() != nullptr);
+ EXPECT_TRUE(method->HasDexCacheResolvedMethods());
+ EXPECT_TRUE(method->HasDexCacheResolvedTypes());
EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetStrings(),
method->GetDexCacheStrings());
- EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods(),
- method->GetDexCacheResolvedMethods());
- EXPECT_EQ(method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes(),
- method->GetDexCacheResolvedTypes());
+ EXPECT_TRUE(method->HasSameDexCacheResolvedMethods(
+ method->GetDeclaringClass()->GetDexCache()->GetResolvedMethods()));
+ EXPECT_TRUE(method->HasSameDexCacheResolvedTypes(
+ method->GetDeclaringClass()->GetDexCache()->GetResolvedTypes()));
}
void AssertField(mirror::Class* klass, mirror::ArtField* field)
@@ -216,7 +216,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
EXPECT_NE(0U, klass->NumDirectMethods());
}
}
- EXPECT_EQ(klass->IsInterface(), klass->GetVTable() == NULL);
+ EXPECT_EQ(klass->IsInterface(), !klass->HasVTable());
mirror::IfTable* iftable = klass->GetIfTable();
for (int i = 0; i < klass->GetIfTableCount(); i++) {
mirror::Class* interface = iftable->GetInterface(i);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f47f13d4b5..ab4a2bbdf7 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -22,6 +22,7 @@
#include <ScopedLocalRef.h>
#include "../../external/icu/icu4c/source/common/unicode/uvernum.h"
+#include "base/macros.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
@@ -29,6 +30,7 @@
#include "class_linker.h"
#include "compiler_callbacks.h"
#include "dex_file.h"
+#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
#include "jni_internal.h"
@@ -93,7 +95,7 @@ void ScratchFile::Unlink() {
CommonRuntimeTest::CommonRuntimeTest() {}
CommonRuntimeTest::~CommonRuntimeTest() {}
-void CommonRuntimeTest::SetEnvironmentVariables(std::string& android_data) {
+void CommonRuntimeTest::SetUpAndroidRoot() {
if (IsHost()) {
// $ANDROID_ROOT is set on the device, but not necessarily on the host.
// But it needs to be set so that icu4c can find its locale data.
@@ -133,15 +135,36 @@ void CommonRuntimeTest::SetEnvironmentVariables(std::string& android_data) {
setenv("ANDROID_HOST_OUT", getenv("ANDROID_ROOT"), 1);
}
}
+}
+void CommonRuntimeTest::SetUpAndroidData(std::string& android_data) {
// On target, Cannot use /mnt/sdcard because it is mounted noexec, so use subdir of dalvik-cache
- android_data = (IsHost() ? "/tmp/art-data-XXXXXX" : "/data/dalvik-cache/art-data-XXXXXX");
+ if (IsHost()) {
+ const char* tmpdir = getenv("TMPDIR");
+ if (tmpdir != nullptr && tmpdir[0] != 0) {
+ android_data = tmpdir;
+ } else {
+ android_data = "/tmp";
+ }
+ } else {
+ android_data = "/data/dalvik-cache";
+ }
+ android_data += "/art-data-XXXXXX";
if (mkdtemp(&android_data[0]) == nullptr) {
PLOG(FATAL) << "mkdtemp(\"" << &android_data[0] << "\") failed";
}
setenv("ANDROID_DATA", android_data.c_str(), 1);
}
+void CommonRuntimeTest::TearDownAndroidData(const std::string& android_data, bool fail_on_error) {
+ if (fail_on_error) {
+ ASSERT_EQ(rmdir(android_data.c_str()), 0);
+ } else {
+ rmdir(android_data.c_str());
+ }
+}
+
+
const DexFile* CommonRuntimeTest::LoadExpectSingleDexFile(const char* location) {
std::vector<const DexFile*> dex_files;
std::string error_msg;
@@ -155,7 +178,8 @@ const DexFile* CommonRuntimeTest::LoadExpectSingleDexFile(const char* location)
}
void CommonRuntimeTest::SetUp() {
- SetEnvironmentVariables(android_data_);
+ SetUpAndroidRoot();
+ SetUpAndroidData(android_data_);
dalvik_cache_.append(android_data_.c_str());
dalvik_cache_.append("/dalvik-cache");
int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
@@ -199,27 +223,40 @@ void CommonRuntimeTest::SetUp() {
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
}
-void CommonRuntimeTest::TearDown() {
- const char* android_data = getenv("ANDROID_DATA");
- ASSERT_TRUE(android_data != nullptr);
- DIR* dir = opendir(dalvik_cache_.c_str());
+void CommonRuntimeTest::ClearDirectory(const char* dirpath) {
+ ASSERT_TRUE(dirpath != nullptr);
+ DIR* dir = opendir(dirpath);
ASSERT_TRUE(dir != nullptr);
dirent* e;
+ struct stat s;
while ((e = readdir(dir)) != nullptr) {
if ((strcmp(e->d_name, ".") == 0) || (strcmp(e->d_name, "..") == 0)) {
continue;
}
- std::string filename(dalvik_cache_);
+ std::string filename(dirpath);
filename.push_back('/');
filename.append(e->d_name);
- int unlink_result = unlink(filename.c_str());
- ASSERT_EQ(0, unlink_result);
+ int stat_result = lstat(filename.c_str(), &s);
+ ASSERT_EQ(0, stat_result) << "unable to stat " << filename;
+ if (S_ISDIR(s.st_mode)) {
+ ClearDirectory(filename.c_str());
+ int rmdir_result = rmdir(filename.c_str());
+ ASSERT_EQ(0, rmdir_result) << filename;
+ } else {
+ int unlink_result = unlink(filename.c_str());
+ ASSERT_EQ(0, unlink_result) << filename;
+ }
}
closedir(dir);
+}
+
+void CommonRuntimeTest::TearDown() {
+ const char* android_data = getenv("ANDROID_DATA");
+ ASSERT_TRUE(android_data != nullptr);
+ ClearDirectory(dalvik_cache_.c_str());
int rmdir_cache_result = rmdir(dalvik_cache_.c_str());
ASSERT_EQ(0, rmdir_cache_result);
- int rmdir_data_result = rmdir(android_data_.c_str());
- ASSERT_EQ(0, rmdir_data_result);
+ TearDownAndroidData(android_data_, true);
// icu4c has a fixed 10-element array "gCommonICUDataArray".
// If we run > 10 tests, we fill that array and u_setCommonData fails.
@@ -248,6 +285,19 @@ std::string CommonRuntimeTest::GetDexFileName(const std::string& jar_prefix) {
return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
}
+std::string CommonRuntimeTest::GetLibCoreOatFileName() {
+ return GetOatFileName("core");
+}
+
+std::string CommonRuntimeTest::GetOatFileName(const std::string& oat_prefix) {
+ if (IsHost()) {
+ const char* host_dir = getenv("ANDROID_HOST_OUT");
+ CHECK(host_dir != nullptr);
+ return StringPrintf("%s/framework/%s.art", host_dir, oat_prefix.c_str());
+ }
+ return StringPrintf("%s/framework/%s.art", GetAndroidRoot(), oat_prefix.c_str());
+}
+
std::string CommonRuntimeTest::GetTestAndroidRoot() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
@@ -257,6 +307,17 @@ std::string CommonRuntimeTest::GetTestAndroidRoot() {
return GetAndroidRoot();
}
+// Check that for target builds we have ART_TARGET_NATIVETEST_DIR set.
+#ifdef ART_TARGET
+#ifndef ART_TARGET_NATIVETEST_DIR
+#error "ART_TARGET_NATIVETEST_DIR not set."
+#endif
+// Wrap it as a string literal.
+#define ART_TARGET_NATIVETEST_DIR_STRING STRINGIFY(ART_TARGET_NATIVETEST_DIR) "/"
+#else
+#define ART_TARGET_NATIVETEST_DIR_STRING ""
+#endif
+
std::vector<const DexFile*> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
CHECK(name != nullptr);
std::string filename;
@@ -264,7 +325,7 @@ std::vector<const DexFile*> CommonRuntimeTest::OpenTestDexFiles(const char* name
filename += getenv("ANDROID_HOST_OUT");
filename += "/framework/";
} else {
- filename += "/data/nativetest/art/";
+ filename += ART_TARGET_NATIVETEST_DIR_STRING;
}
filename += "art-gtest-";
filename += name;
@@ -293,23 +354,22 @@ jobject CommonRuntimeTest::LoadDex(const char* dex_name) {
for (const DexFile* dex_file : dex_files) {
class_linker_->RegisterDexFile(*dex_file);
}
- ScopedObjectAccessUnchecked soa(Thread::Current());
- ScopedLocalRef<jobject> class_loader_local(soa.Env(),
- soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
- jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
- soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
+ Thread* self = Thread::Current();
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedLocalRef<jobject> class_loader_local(env,
+ env->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
+ jobject class_loader = env->NewGlobalRef(class_loader_local.get());
+ self->SetClassLoaderOverride(class_loader_local.get());
Runtime::Current()->SetCompileTimeClassPath(class_loader, dex_files);
return class_loader;
}
CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
- vm_->check_jni_abort_hook = Hook;
- vm_->check_jni_abort_hook_data = &actual_;
+ vm_->SetCheckJniAbortHook(Hook, &actual_);
}
CheckJniAbortCatcher::~CheckJniAbortCatcher() {
- vm_->check_jni_abort_hook = nullptr;
- vm_->check_jni_abort_hook_data = nullptr;
+ vm_->SetCheckJniAbortHook(nullptr, nullptr);
EXPECT_TRUE(actual_.empty()) << actual_;
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index d0450317a3..12c1241270 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -64,7 +64,13 @@ class ScratchFile {
class CommonRuntimeTest : public testing::Test {
public:
- static void SetEnvironmentVariables(std::string& android_data);
+ static void SetUpAndroidRoot();
+
+ // Note: setting up ANDROID_DATA may create a temporary directory. If this is used in a
+ // non-derived class, be sure to also call the corresponding tear-down below.
+ static void SetUpAndroidData(std::string& android_data);
+
+ static void TearDownAndroidData(const std::string& android_data, bool fail_on_error);
CommonRuntimeTest();
~CommonRuntimeTest();
@@ -81,12 +87,22 @@ class CommonRuntimeTest : public testing::Test {
// Allow subclases such as CommonCompilerTest to add extra options.
virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ void ClearDirectory(const char* dirpath);
+
virtual void TearDown();
+ // Gets the path of the libcore dex file.
std::string GetLibCoreDexFileName();
+ // Gets the path of the specified dex file for host or target.
std::string GetDexFileName(const std::string& jar_prefix);
+ // Gets the path of the libcore oat file.
+ std::string GetLibCoreOatFileName();
+
+ // Gets the path of the specified oat file for host or target.
+ std::string GetOatFileName(const std::string& oat_prefix);
+
std::string GetTestAndroidRoot();
std::vector<const DexFile*> OpenTestDexFiles(const char* name)
@@ -122,7 +138,7 @@ class CheckJniAbortCatcher {
private:
static void Hook(void* data, const std::string& reason);
- JavaVMExt* vm_;
+ JavaVMExt* const vm_;
std::string actual_;
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index b07043f5d7..d1a68615b1 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -36,6 +36,10 @@ class CompilerCallbacks {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
+ // Return true if we should attempt to relocate to a random base address if we have not already
+ // done so. Return false if relocating in this way would be problematic.
+ virtual bool IsRelocationPossible() = 0;
+
protected:
CompilerCallbacks() { }
};
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4cf4c099b2..bc13379f14 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2250,15 +2250,18 @@ void Dbg::ResumeVM() {
}
JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
- ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
+ Thread* self = Thread::Current();
+ ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
{
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccess soa(self);
peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
}
if (peer.get() == NULL) {
return JDWP::ERR_THREAD_NOT_ALIVE;
}
- // Suspend thread to build stack trace.
+ // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
+ // trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
bool timed_out;
Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true,
&timed_out);
@@ -2450,12 +2453,9 @@ JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width_, 8U);
- uint32_t lo;
- uint32_t hi;
- if (GetVReg(m, reg, kDoubleLoVReg, &lo) && GetVReg(m, reg + 1, kDoubleHiVReg, &hi)) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "get double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ uint64_t longVal;
+ if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
+ VLOG(jdwp) << "get double local " << reg << " = " << longVal;
JDWP::Set8BE(buf_+1, longVal);
} else {
VLOG(jdwp) << "failed to get double local " << reg;
@@ -2465,12 +2465,9 @@ JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
case JDWP::JT_LONG: {
CHECK_EQ(width_, 8U);
- uint32_t lo;
- uint32_t hi;
- if (GetVReg(m, reg, kLongLoVReg, &lo) && GetVReg(m, reg + 1, kLongHiVReg, &hi)) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "get long local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ uint64_t longVal;
+ if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
+ VLOG(jdwp) << "get long local " << reg << " = " << longVal;
JDWP::Set8BE(buf_+1, longVal);
} else {
VLOG(jdwp) << "failed to get long local " << reg;
@@ -2593,28 +2590,18 @@ JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame
}
case JDWP::JT_DOUBLE: {
CHECK_EQ(width_, 8U);
- const uint32_t lo = static_cast<uint32_t>(value_);
- const uint32_t hi = static_cast<uint32_t>(value_ >> 32);
- bool success = SetVReg(m, reg, lo, kDoubleLoVReg);
- success &= SetVReg(m, reg + 1, hi, kDoubleHiVReg);
+ bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
if (!success) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "failed to set double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
error_ = kFailureErrorCode;
}
break;
}
case JDWP::JT_LONG: {
CHECK_EQ(width_, 8U);
- const uint32_t lo = static_cast<uint32_t>(value_);
- const uint32_t hi = static_cast<uint32_t>(value_ >> 32);
- bool success = SetVReg(m, reg, lo, kLongLoVReg);
- success &= SetVReg(m, reg + 1, hi, kLongHiVReg);
+ bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
if (!success) {
- uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo;
- VLOG(jdwp) << "failed to set double local " << reg << " = "
- << hi << ":" << lo << " = " << longVal;
+ VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
error_ = kFailureErrorCode;
}
break;
@@ -3048,7 +3035,7 @@ static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
for (const Breakpoint& breakpoint : gBreakpoints) {
CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
@@ -3144,7 +3131,7 @@ class ScopedThreadSuspension {
ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- thread_(NULL),
+ thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
other_suspend_(false) {
@@ -3160,10 +3147,15 @@ class ScopedThreadSuspension {
soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
jobject thread_peer = gRegistry->GetJObject(thread_id);
bool timed_out;
- Thread* suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
- &timed_out);
+ Thread* suspended_thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
+ &timed_out);
+ }
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
- if (suspended_thread == NULL) {
+ if (suspended_thread == nullptr) {
// Thread terminated from under us while suspending.
error_ = JDWP::ERR_INVALID_THREAD;
} else {
@@ -3957,7 +3949,8 @@ class HeapChunkContext {
HeapChunkContext(bool merge, bool native)
: buf_(16384 - 16),
type_(0),
- merge_(merge) {
+ merge_(merge),
+ chunk_overhead_(0) {
Reset();
if (native) {
type_ = CHUNK_TYPE("NHSG");
@@ -3972,6 +3965,14 @@ class HeapChunkContext {
}
}
+ void SetChunkOverhead(size_t chunk_overhead) {
+ chunk_overhead_ = chunk_overhead;
+ }
+
+ void ResetStartOfNextChunk() {
+ startOfNextMemoryChunk_ = nullptr;
+ }
+
void EnsureHeader(const void* chunk_ptr) {
if (!needHeader_) {
return;
@@ -4016,7 +4017,7 @@ class HeapChunkContext {
void Reset() {
p_ = &buf_[0];
- startOfNextMemoryChunk_ = NULL;
+ ResetStartOfNextChunk();
totalAllocationUnits_ = 0;
needHeader_ = true;
pieceLenField_ = NULL;
@@ -4043,6 +4044,8 @@ class HeapChunkContext {
*/
bool native = type_ == CHUNK_TYPE("NHSG");
+ // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
+ // count gaps inbetween spaces as free memory.
if (startOfNextMemoryChunk_ != NULL) {
// Transmit any pending free memory. Native free memory of
// over kMaxFreeLen could be because of the use of mmaps, so
@@ -4069,11 +4072,8 @@ class HeapChunkContext {
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
// If it's the same, we should combine them.
uint8_t state = ExamineObject(obj, native);
- // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
- // allocation then the first sizeof(size_t) may belong to it.
- const size_t dlMallocOverhead = sizeof(size_t);
- AppendChunk(state, start, used_bytes + dlMallocOverhead);
- startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead;
+ AppendChunk(state, start, used_bytes + chunk_overhead_);
+ startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
}
void AppendChunk(uint8_t state, void* ptr, size_t length)
@@ -4162,10 +4162,18 @@ class HeapChunkContext {
uint32_t type_;
bool merge_;
bool needHeader_;
+ size_t chunk_overhead_;
DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
};
+static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
+ HeapChunkContext::HeapChunkCallback(
+ obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
+}
+
void Dbg::DdmSendHeapSegments(bool native) {
Dbg::HpsgWhen when;
Dbg::HpsgWhat what;
@@ -4206,14 +4214,27 @@ void Dbg::DdmSendHeapSegments(bool native) {
#endif
} else {
gc::Heap* heap = Runtime::Current()->GetHeap();
- const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
- typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
- for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
- if ((*cur)->IsMallocSpace()) {
- (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ for (const auto& space : heap->GetContinuousSpaces()) {
+ if (space->IsDlMallocSpace()) {
+ // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
+ // allocation then the first sizeof(size_t) may belong to it.
+ context.SetChunkOverhead(sizeof(size_t));
+ space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ } else if (space->IsRosAllocSpace()) {
+ context.SetChunkOverhead(0);
+ space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
+ } else if (space->IsBumpPointerSpace()) {
+ context.SetChunkOverhead(0);
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
+ space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
+ } else {
+ UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
}
+ context.ResetStartOfNextChunk();
}
// Walk the large objects, these are not in the AllocSpace.
+ context.SetChunkOverhead(0);
heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
}
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e5bc7c8c86..e1a77714b9 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -951,6 +951,38 @@ std::pair<const char*, const char*> DexFile::SplitMultiDexLocation(
return std::make_pair(tmp, colon_ptr + 1);
}
+std::string DexFile::GetMultiDexClassesDexName(size_t number, const char* dex_location) {
+ if (number == 0) {
+ return dex_location;
+ } else {
+ return StringPrintf("%s" kMultiDexSeparatorString "classes%zu.dex", dex_location, number + 1);
+ }
+}
+
+std::string DexFile::GetDexCanonicalLocation(const char* dex_location) {
+ CHECK_NE(dex_location, static_cast<const char*>(nullptr));
+ char* path = nullptr;
+ if (!IsMultiDexLocation(dex_location)) {
+ path = realpath(dex_location, nullptr);
+ } else {
+ std::pair<const char*, const char*> pair = DexFile::SplitMultiDexLocation(dex_location);
+ const char* dex_real_location(realpath(pair.first, nullptr));
+ delete pair.first;
+ if (dex_real_location != nullptr) {
+ int length = strlen(dex_real_location) + strlen(pair.second) + strlen(kMultiDexSeparatorString) + 1;
+ char* multidex_canonical_location = reinterpret_cast<char*>(malloc(sizeof(char) * length));
+ snprintf(multidex_canonical_location, length, "%s" kMultiDexSeparatorString "%s", dex_real_location, pair.second);
+ free(const_cast<char*>(dex_real_location));
+ path = multidex_canonical_location;
+ }
+ }
+
+ // If realpath fails then we just copy the argument.
+ std::string result(path == nullptr ? dex_location : path);
+ free(path);
+ return result;
+}
+
std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
os << StringPrintf("[DexFile: %s dex-checksum=%08x location-checksum=%08x %p-%p]",
dex_file.GetLocation().c_str(),
@@ -958,6 +990,7 @@ std::ostream& operator<<(std::ostream& os, const DexFile& dex_file) {
dex_file.Begin(), dex_file.Begin() + dex_file.Size());
return os;
}
+
std::string Signature::ToString() const {
if (dex_file_ == nullptr) {
CHECK(proto_id_ == nullptr);
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 04f1cc1417..2794af646a 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -382,6 +382,20 @@ class DexFile {
return location_;
}
+ // For normal dex files, location and base location coincide. If a dex file is part of a multidex
+ // archive, the base location is the name of the originating jar/apk, stripped of any internal
+ // classes*.dex path.
+ const std::string GetBaseLocation() const {
+ if (IsMultiDexLocation(location_.c_str())) {
+ std::pair<const char*, const char*> pair = SplitMultiDexLocation(location_.c_str());
+ std::string res(pair.first);
+ delete[] pair.first;
+ return res;
+ } else {
+ return location_;
+ }
+ }
+
// For DexFiles directly from .dex files, this is the checksum from the DexFile::Header.
// For DexFiles opened from a zip files, this will be the ZipEntry CRC32 of classes.dex.
uint32_t GetLocationChecksum() const {
@@ -827,6 +841,23 @@ class DexFile {
return size_;
}
+ static std::string GetMultiDexClassesDexName(size_t number, const char* dex_location);
+
+ // Returns the canonical form of the given dex location.
+ //
+ // There are different flavors of "dex locations" as follows:
+ // the file name of a dex file:
+ // The actual file path that the dex file has on disk.
+ // dex_location:
+ // This acts as a key for the class linker to know which dex file to load.
+ // It may correspond to either an old odex file or a particular dex file
+ // inside an oat file. In the first case it will also match the file name
+ // of the dex file. In the second case (oat) it will include the file name
+ // and possibly some multidex annotation to uniquely identify it.
+ // canonical_dex_location:
+ // the dex_location where it's file name part has been made canonical.
+ static std::string GetDexCanonicalLocation(const char* dex_location);
+
private:
// Opens a .dex file
static const DexFile* OpenFile(int fd, const char* location, bool verify, std::string* error_msg);
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 284aa89d99..330d045b42 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -345,4 +345,34 @@ TEST_F(DexFileTest, FindFieldId) {
}
}
+TEST_F(DexFileTest, GetMultiDexClassesDexName) {
+ std::string dex_location_str = "/system/app/framework.jar";
+ const char* dex_location = dex_location_str.c_str();
+ ASSERT_EQ("/system/app/framework.jar", DexFile::GetMultiDexClassesDexName(0, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes2.dex", DexFile::GetMultiDexClassesDexName(1, dex_location));
+ ASSERT_EQ("/system/app/framework.jar:classes101.dex", DexFile::GetMultiDexClassesDexName(100, dex_location));
+}
+
+TEST_F(DexFileTest, GetDexCanonicalLocation) {
+ ScratchFile file;
+ char* dex_location_real = realpath(file.GetFilename().c_str(), nullptr);
+ std::string dex_location(dex_location_real);
+
+ ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+ std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
+ ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
+
+ std::string dex_location_sym = dex_location + "symlink";
+ ASSERT_EQ(0, symlink(dex_location.c_str(), dex_location_sym.c_str()));
+
+ ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location_sym.c_str()));
+
+ std::string multidex_location_sym = DexFile::GetMultiDexClassesDexName(1, dex_location_sym.c_str());
+ ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
+
+ ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
+
+ free(dex_location_real);
+}
+
} // namespace art
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 291e2d0756..7e6bdfa4d4 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -170,13 +170,29 @@ bool DexFileVerifier::CheckShortyDescriptorMatch(char shorty_char, const char* d
return true;
}
-bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, const char* label) {
+bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem_size,
+ const char* label) {
+ // Check that size is not 0.
+ CHECK_NE(elem_size, 0U);
+
const byte* range_start = reinterpret_cast<const byte*>(start);
- const byte* range_end = reinterpret_cast<const byte*>(end);
const byte* file_start = reinterpret_cast<const byte*>(begin_);
+
+ // Check for overflow.
+ uintptr_t max = 0 - 1;
+ size_t available_bytes_till_end_of_mem = max - reinterpret_cast<uintptr_t>(start);
+ size_t max_count = available_bytes_till_end_of_mem / elem_size;
+ if (max_count < count) {
+ ErrorStringPrintf("Overflow in range for %s: %zx for %zu@%zu", label,
+ static_cast<size_t>(range_start - file_start),
+ count, elem_size);
+ return false;
+ }
+
+ const byte* range_end = range_start + count * elem_size;
const byte* file_end = file_start + size_;
- if (UNLIKELY((range_start < file_start) || (range_start > file_end) ||
- (range_end < file_start) || (range_end > file_end))) {
+ if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
+ // Note: these two tests are enough as we make sure above that there's no overflow.
ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
static_cast<size_t>(range_start - file_start),
static_cast<size_t>(range_end - file_start));
@@ -185,12 +201,6 @@ bool DexFileVerifier::CheckPointerRange(const void* start, const void* end, cons
return true;
}
-bool DexFileVerifier::CheckListSize(const void* start, uint32_t count,
- uint32_t element_size, const char* label) {
- const byte* list_start = reinterpret_cast<const byte*>(start);
- return CheckPointerRange(list_start, list_start + (count * element_size), label);
-}
-
bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) {
if (UNLIKELY(field >= limit)) {
ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
@@ -329,7 +339,7 @@ bool DexFileVerifier::CheckMap() {
uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
uint32_t result = 0;
- if (LIKELY(CheckPointerRange(ptr_, ptr_ + size, "encoded_value"))) {
+ if (LIKELY(CheckListSize(ptr_, size, sizeof(byte), "encoded_value"))) {
for (uint32_t i = 0; i < size; i++) {
result |= ((uint32_t) *(ptr_++)) << (i * 8);
}
@@ -447,7 +457,7 @@ bool DexFileVerifier::CheckClassDataItemMethod(uint32_t idx, uint32_t access_fla
bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
if (offset < aligned_offset) {
- if (!CheckPointerRange(begin_ + offset, begin_ + aligned_offset, "section")) {
+ if (!CheckListSize(begin_ + offset, aligned_offset - offset, sizeof(byte), "section")) {
return false;
}
while (offset < aligned_offset) {
@@ -463,7 +473,7 @@ bool DexFileVerifier::CheckPadding(size_t offset, uint32_t aligned_offset) {
}
bool DexFileVerifier::CheckEncodedValue() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "encoded_value header")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "encoded_value header")) {
return false;
}
@@ -656,7 +666,7 @@ bool DexFileVerifier::CheckIntraClassDataItem() {
bool DexFileVerifier::CheckIntraCodeItem() {
const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(ptr_);
- if (!CheckPointerRange(code_item, code_item + 1, "code")) {
+ if (!CheckListSize(code_item, 1, sizeof(DexFile::CodeItem), "code")) {
return false;
}
@@ -945,7 +955,7 @@ bool DexFileVerifier::CheckIntraDebugInfoItem() {
}
bool DexFileVerifier::CheckIntraAnnotationItem() {
- if (!CheckPointerRange(ptr_, ptr_ + 1, "annotation visibility")) {
+ if (!CheckListSize(ptr_, 1, sizeof(byte), "annotation visibility")) {
return false;
}
@@ -970,7 +980,7 @@ bool DexFileVerifier::CheckIntraAnnotationItem() {
bool DexFileVerifier::CheckIntraAnnotationsDirectoryItem() {
const DexFile::AnnotationsDirectoryItem* item =
reinterpret_cast<const DexFile::AnnotationsDirectoryItem*>(ptr_);
- if (!CheckPointerRange(item, item + 1, "annotations_directory")) {
+ if (!CheckListSize(item, 1, sizeof(DexFile::AnnotationsDirectoryItem), "annotations_directory")) {
return false;
}
@@ -1064,42 +1074,42 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t count, ui
// Check depending on the section type.
switch (type) {
case DexFile::kDexTypeStringIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::StringId), "string_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::StringId), "string_ids")) {
return false;
}
ptr_ += sizeof(DexFile::StringId);
break;
}
case DexFile::kDexTypeTypeIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::TypeId), "type_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::TypeId), "type_ids")) {
return false;
}
ptr_ += sizeof(DexFile::TypeId);
break;
}
case DexFile::kDexTypeProtoIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ProtoId), "proto_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ProtoId), "proto_ids")) {
return false;
}
ptr_ += sizeof(DexFile::ProtoId);
break;
}
case DexFile::kDexTypeFieldIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::FieldId), "field_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::FieldId), "field_ids")) {
return false;
}
ptr_ += sizeof(DexFile::FieldId);
break;
}
case DexFile::kDexTypeMethodIdItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::MethodId), "method_ids")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::MethodId), "method_ids")) {
return false;
}
ptr_ += sizeof(DexFile::MethodId);
break;
}
case DexFile::kDexTypeClassDefItem: {
- if (!CheckPointerRange(ptr_, ptr_ + sizeof(DexFile::ClassDef), "class_defs")) {
+ if (!CheckListSize(ptr_, 1, sizeof(DexFile::ClassDef), "class_defs")) {
return false;
}
ptr_ += sizeof(DexFile::ClassDef);
@@ -1110,7 +1120,7 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t count, ui
const DexFile::TypeItem* item = &list->GetTypeItem(0);
uint32_t count = list->Size();
- if (!CheckPointerRange(list, list + 1, "type_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::TypeList), "type_list") ||
!CheckListSize(item, count, sizeof(DexFile::TypeItem), "type_list size")) {
return false;
}
@@ -1123,7 +1133,8 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t count, ui
const DexFile::AnnotationSetRefItem* item = list->list_;
uint32_t count = list->size_;
- if (!CheckPointerRange(list, list + 1, "annotation_set_ref_list") ||
+ if (!CheckListSize(list, 1, sizeof(DexFile::AnnotationSetRefList),
+ "annotation_set_ref_list") ||
!CheckListSize(item, count, sizeof(DexFile::AnnotationSetRefItem),
"annotation_set_ref_list size")) {
return false;
@@ -1137,7 +1148,7 @@ bool DexFileVerifier::CheckIntraSectionIterate(size_t offset, uint32_t count, ui
const uint32_t* item = set->entries_;
uint32_t count = set->size_;
- if (!CheckPointerRange(set, set + 1, "annotation_set_item") ||
+ if (!CheckListSize(set, 1, sizeof(DexFile::AnnotationSetItem), "annotation_set_item") ||
!CheckListSize(item, count, sizeof(uint32_t), "annotation_set_item size")) {
return false;
}
@@ -1644,12 +1655,25 @@ bool DexFileVerifier::CheckInterMethodIdItem() {
bool DexFileVerifier::CheckInterClassDefItem() {
const DexFile::ClassDef* item = reinterpret_cast<const DexFile::ClassDef*>(ptr_);
+ // Check for duplicate class def.
+ if (defined_classes_.find(item->class_idx_) != defined_classes_.end()) {
+ ErrorStringPrintf("Redefinition of class with type idx: '%d'", item->class_idx_);
+ return false;
+ }
+ defined_classes_.insert(item->class_idx_);
+
LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_class_def_item class_idx")
if (UNLIKELY(!IsValidDescriptor(class_descriptor) || class_descriptor[0] != 'L')) {
ErrorStringPrintf("Invalid class descriptor: '%s'", class_descriptor);
return false;
}
+ // Only allow non-runtime modifiers.
+ if ((item->access_flags_ & ~kAccJavaFlagsMask) != 0) {
+ ErrorStringPrintf("Invalid class flags: '%d'", item->access_flags_);
+ return false;
+ }
+
if (item->interfaces_off_ != 0 &&
!CheckOffsetToTypeMap(item->interfaces_off_, DexFile::kDexTypeTypeList)) {
return false;
diff --git a/runtime/dex_file_verifier.h b/runtime/dex_file_verifier.h
index f845993113..0af3549eae 100644
--- a/runtime/dex_file_verifier.h
+++ b/runtime/dex_file_verifier.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_DEX_FILE_VERIFIER_H_
#define ART_RUNTIME_DEX_FILE_VERIFIER_H_
+#include <unordered_set>
+
#include "dex_file.h"
#include "safe_map.h"
@@ -40,8 +42,7 @@ class DexFileVerifier {
bool Verify();
bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
- bool CheckPointerRange(const void* start, const void* end, const char* label);
- bool CheckListSize(const void* start, uint32_t count, uint32_t element_size, const char* label);
+ bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
bool CheckHeader();
@@ -115,6 +116,9 @@ class DexFileVerifier {
const void* previous_item_;
std::string failure_reason_;
+
+ // Set of type ids for which there are ClassDef elements in the dex file.
+ std::unordered_set<decltype(DexFile::ClassDef::class_idx_)> defined_classes_;
};
} // namespace art
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index e5402e1c66..6179b5e8d1 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -837,6 +837,7 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
}
}
+ bool reserved = false;
for (Elf32_Word i = 0; i < GetProgramHeaderNum(); i++) {
Elf32_Phdr& program_header = GetProgramHeader(i);
@@ -853,10 +854,8 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
// Found something to load.
- // If p_vaddr is zero, it must be the first loadable segment,
- // since they must be in order. Since it is zero, there isn't a
- // specific address requested, so first request a contiguous chunk
- // of required size for all segments, but with no
+ // Before load the actual segments, reserve a contiguous chunk
+ // of required size and address for all segments, but with no
// permissions. We'll then carve that up with the proper
// permissions as we load the actual segments. If p_vaddr is
// non-zero, the segments require the specific address specified,
@@ -870,18 +869,24 @@ bool ElfFile::Load(bool executable, std::string* error_msg) {
return false;
}
size_t file_length = static_cast<size_t>(temp_file_length);
- if (program_header.p_vaddr == 0) {
+ if (!reserved) {
+ byte* reserve_base = ((program_header.p_vaddr != 0) ?
+ reinterpret_cast<byte*>(program_header.p_vaddr) : nullptr);
std::string reservation_name("ElfFile reservation for ");
reservation_name += file_->GetPath();
std::unique_ptr<MemMap> reserve(MemMap::MapAnonymous(reservation_name.c_str(),
- nullptr, GetLoadedSize(), PROT_NONE, false,
- error_msg));
+ reserve_base,
+ GetLoadedSize(), PROT_NONE, false,
+ error_msg));
if (reserve.get() == nullptr) {
*error_msg = StringPrintf("Failed to allocate %s: %s",
reservation_name.c_str(), error_msg->c_str());
return false;
}
- base_address_ = reserve->Begin();
+ reserved = true;
+ if (reserve_base == nullptr) {
+ base_address_ = reserve->Begin();
+ }
segments_.push_back(reserve.release());
}
// empty segment, nothing to map
@@ -1033,18 +1038,13 @@ static FDE* NextFDE(FDE* frame) {
}
static bool IsFDE(FDE* frame) {
- // TODO This seems to be the constant everyone uses (for the .debug_frame
- // section at least), however we should investigate this further.
- const uint32_t kDwarfCIE_id = 0xffffffff;
- const uint32_t kReservedLengths[] = {0xffffffff, 0xfffffff0};
- return frame->CIE_pointer != kDwarfCIE_id &&
- frame->raw_length_ != kReservedLengths[0] && frame->raw_length_ != kReservedLengths[1];
+ return frame->CIE_pointer != 0;
}
// TODO This only works for 32-bit Elf Files.
-static bool FixupDebugFrame(uintptr_t text_start, byte* dbg_frame, size_t dbg_frame_size) {
- FDE* last_frame = reinterpret_cast<FDE*>(dbg_frame + dbg_frame_size);
- FDE* frame = NextFDE(reinterpret_cast<FDE*>(dbg_frame));
+static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
+ FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
+ FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
for (; frame < last_frame; frame = NextFDE(frame)) {
if (!IsFDE(frame)) {
return false;
@@ -1301,7 +1301,7 @@ static bool FixupDebugInfo(uint32_t text_start, DebugInfoIterator* iter) {
static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
uintptr_t text_start,
byte* dbg_info, size_t dbg_info_size,
- byte* dbg_frame, size_t dbg_frame_size) {
+ byte* eh_frame, size_t eh_frame_size) {
std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
if (abbrev.get() == nullptr) {
return false;
@@ -1313,7 +1313,7 @@ static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
return false;
}
return FixupDebugInfo(text_start, iter.get())
- && FixupDebugFrame(text_start, dbg_frame, dbg_frame_size);
+ && FixupEHFrame(text_start, eh_frame, eh_frame_size);
}
void ElfFile::GdbJITSupport() {
@@ -1334,20 +1334,16 @@ void ElfFile::GdbJITSupport() {
// Do we have interesting sections?
const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
- const Elf32_Shdr* debug_frame = all.FindSectionByName(".debug_frame");
+ const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
Elf32_Shdr* text_sec = all.FindSectionByName(".text");
- if (debug_info == nullptr || debug_abbrev == nullptr || debug_frame == nullptr ||
- debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+ if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
+ debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
+ symtab_sec == nullptr) {
return;
}
-#ifdef __LP64__
- if (true) {
- return; // No ELF debug support in 64bit.
- }
-#endif
// We need to add in a strtab and symtab to the image.
// all is MAP_PRIVATE so it can be written to freely.
// We also already have strtab and symtab so we are fine there.
@@ -1364,7 +1360,7 @@ void ElfFile::GdbJITSupport() {
if (!FixupDebugSections(
all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
all.Begin() + debug_info->sh_offset, debug_info->sh_size,
- all.Begin() + debug_frame->sh_offset, debug_frame->sh_size)) {
+ all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
LOG(ERROR) << "Failed to load GDB data";
return;
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 542e1a9e2e..b874a74e7c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -25,7 +25,6 @@
#include "indirect_reference_table.h"
#include "invoke_type.h"
#include "jni_internal.h"
-#include "method_helper.h"
#include "mirror/art_method.h"
#include "mirror/array.h"
#include "mirror/class-inl.h"
@@ -38,11 +37,11 @@ namespace art {
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ALWAYS_INLINE
+static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self, bool* slow_path) {
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == NULL)) {
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
@@ -88,9 +87,10 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
}
// TODO: Fix no thread safety analysis when annotalysis is smarter.
-ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
- Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
+ Thread* self,
+ bool* slow_path) {
if (UNLIKELY(!klass->IsInitialized())) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_class(hs.NewHandle(klass));
@@ -118,11 +118,11 @@ ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(m
// check.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckObjectAlloc<kAccessCheck>(type_idx, method, self, &slow_path);
if (UNLIKELY(slow_path)) {
@@ -138,11 +138,11 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_id
// Given the context of a calling Method and a resolved class, create an instance.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
bool slow_path = false;
klass = CheckClassInitializedForObjectAlloc(klass, self, &slow_path);
@@ -161,11 +161,11 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::
// Given the context of a calling Method and an initialized class, create an instance.
// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
// Pass in false since the object can not be finalizable.
return klass->Alloc<kInstrumented, false>(self, allocator_type);
@@ -174,17 +174,17 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirro
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
-ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ bool* slow_path) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
*slow_path = true;
return nullptr; // Failure
}
- mirror::Class* klass = method->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == nullptr)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
*slow_path = true;
@@ -211,12 +211,12 @@ ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
// check.
// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
bool slow_path = false;
mirror::Class* klass = CheckArrayAlloc<kAccessCheck>(type_idx, method, component_count,
&slow_path);
@@ -234,12 +234,12 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
}
template <bool kAccessCheck, bool kInstrumented>
-ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
- int32_t component_count,
- Thread* self,
- gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS {
+ALWAYS_INLINE
+static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
+ mirror::ArtMethod* method,
+ int32_t component_count,
+ Thread* self,
+ gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
@@ -398,26 +398,26 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
case kDirect:
return resolved_method;
case kVirtual: {
- mirror::ObjectArray<mirror::ArtMethod>* vtable = (*this_object)->GetClass()->GetVTable();
+ mirror::Class* klass = (*this_object)->GetClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
if (access_check &&
- (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength()))) {
+ (!klass->HasVTable() ||
+ vtable_index >= static_cast<uint32_t>(klass->GetVTableLength()))) {
// Behavior to agree with that of the verifier.
ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
resolved_method->GetName(), resolved_method->GetSignature());
return nullptr; // Failure.
}
- DCHECK(vtable != nullptr);
- return vtable->GetWithoutChecks(vtable_index);
+ DCHECK(klass->HasVTable()) << PrettyClass(klass);
+ return klass->GetVTableEntry(vtable_index);
}
case kSuper: {
mirror::Class* super_class = (*referrer)->GetDeclaringClass()->GetSuperClass();
uint16_t vtable_index = resolved_method->GetMethodIndex();
- mirror::ObjectArray<mirror::ArtMethod>* vtable;
if (access_check) {
// Check existence of super class.
- vtable = (super_class != nullptr) ? super_class->GetVTable() : nullptr;
- if (vtable == nullptr || vtable_index >= static_cast<uint32_t>(vtable->GetLength())) {
+ if (super_class == nullptr || !super_class->HasVTable() ||
+ vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
// Behavior to agree with that of the verifier.
ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
resolved_method->GetName(), resolved_method->GetSignature());
@@ -426,10 +426,9 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
} else {
// Super class must exist.
DCHECK(super_class != nullptr);
- vtable = super_class->GetVTable();
}
- DCHECK(vtable != nullptr);
- return vtable->GetWithoutChecks(vtable_index);
+ DCHECK(super_class->HasVTable());
+ return super_class->GetVTableEntry(vtable_index);
}
case kInterface: {
uint32_t imt_index = resolved_method->GetDexMethodIndex() % mirror::Class::kImtSize;
@@ -476,8 +475,7 @@ EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
// Fast path field resolution that can't initialize classes or throw exceptions.
static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
mirror::ArtMethod* referrer,
- FindFieldType type, size_t expected_size)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ FindFieldType type, size_t expected_size) {
mirror::ArtField* resolved_field =
referrer->GetDeclaringClass()->GetDexCache()->GetResolvedField(field_idx);
if (UNLIKELY(resolved_field == nullptr)) {
@@ -534,8 +532,7 @@ static inline mirror::ArtField* FindFieldFast(uint32_t field_idx,
static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
mirror::Object* this_object,
mirror::ArtMethod* referrer,
- bool access_check, InvokeType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool access_check, InvokeType type) {
bool is_direct = type == kStatic || type == kDirect;
if (UNLIKELY(this_object == NULL && !is_direct)) {
return NULL;
@@ -565,19 +562,18 @@ static inline mirror::ArtMethod* FindMethodFast(uint32_t method_idx,
} else if (is_direct) {
return resolved_method;
} else if (type == kSuper) {
- return referrer->GetDeclaringClass()->GetSuperClass()->GetVTable()->
- Get(resolved_method->GetMethodIndex());
+ return referrer->GetDeclaringClass()->GetSuperClass()
+ ->GetVTableEntry(resolved_method->GetMethodIndex());
} else {
DCHECK(type == kVirtual);
- return this_object->GetClass()->GetVTable()->Get(resolved_method->GetMethodIndex());
+ return this_object->GetClass()->GetVTableEntry(resolved_method->GetMethodIndex());
}
}
static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
mirror::ArtMethod* referrer,
Thread* self, bool can_run_clinit,
- bool verify_access)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ bool verify_access) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
mirror::Class* klass = class_linker->ResolveType(type_idx, referrer);
if (UNLIKELY(klass == nullptr)) {
@@ -611,14 +607,12 @@ static inline mirror::Class* ResolveVerifyAndClinit(uint32_t type_idx,
}
static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
- uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t string_idx) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
return class_linker->ResolveString(string_idx, referrer);
}
-static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
- NO_THREAD_SAFETY_ANALYSIS /* SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) */ {
+static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
mirror::Throwable* saved_exception = NULL;
ThrowLocation saved_throw_location;
@@ -642,27 +636,7 @@ static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
}
}
-static inline void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (o == NULL) {
- return;
- }
- mirror::ArtMethod* m = self->GetCurrentMethod(NULL);
- if (o == kInvalidIndirectRefObject) {
- JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str());
- }
- // Make sure that the result is an instance of the type this method was expected to return.
- StackHandleScope<1> hs(self);
- Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
- mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
-
- if (!o->InstanceOf(return_type)) {
- JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(),
- PrettyMethod(h_m.Get()).c_str());
- }
-}
-
-static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+static inline void CheckSuspend(Thread* thread) {
for (;;) {
if (thread->ReadFlag(kCheckpointRequest)) {
thread->RunCheckpointFunction();
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index d063dfb425..4755b9e9db 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -20,6 +20,7 @@
#include "class_linker-inl.h"
#include "dex_file-inl.h"
#include "gc/accounting/card_table-inl.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -40,7 +41,7 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx, mirror:
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
}
- mirror::Class* klass = referrer->GetDexCacheResolvedTypes()->GetWithoutChecks(type_idx);
+ mirror::Class* klass = referrer->GetDexCacheResolvedType<false>(type_idx);
if (UNLIKELY(klass == NULL)) { // Not in dex cache so try to resolve
klass = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, referrer);
if (klass == NULL) { // Error
@@ -109,8 +110,8 @@ mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, mirror:
void ThrowStackOverflowError(Thread* self) {
if (self->IsHandlingStackOverflow()) {
- LOG(ERROR) << "Recursive stack overflow.";
- // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
+ LOG(ERROR) << "Recursive stack overflow.";
+ // We don't fail here because SetStackEndForStackOverflow will print better diagnostics.
}
if (Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()) {
@@ -122,15 +123,90 @@ void ThrowStackOverflowError(Thread* self) {
JNIEnvExt* env = self->GetJniEnv();
std::string msg("stack size ");
msg += PrettySize(self->GetStackSize());
- // Use low-level JNI routine and pre-baked error class to avoid class linking operations that
- // would consume more stack.
- int rc = ::art::ThrowNewException(env, WellKnownClasses::java_lang_StackOverflowError,
- msg.c_str(), NULL);
- if (rc != JNI_OK) {
- // TODO: ThrowNewException failed presumably because of an OOME, we continue to throw the OOME
- // or die in the CHECK below. We may want to throw a pre-baked StackOverflowError
- // instead.
- LOG(ERROR) << "Couldn't throw new StackOverflowError because JNI ThrowNew failed.";
+
+ // Avoid running Java code for exception initialization.
+ // TODO: Checks to make this a bit less brittle.
+
+ std::string error_msg;
+
+ // Allocate an uninitialized object.
+ ScopedLocalRef<jobject> exc(env,
+ env->AllocObject(WellKnownClasses::java_lang_StackOverflowError));
+ if (exc.get() != nullptr) {
+ // "Initialize".
+ // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object.
+ // Only Throwable has "custom" fields:
+ // String detailMessage.
+ // Throwable cause (= this).
+ // List<Throwable> suppressedExceptions (= Collections.emptyList()).
+ // Object stackState;
+ // StackTraceElement[] stackTrace;
+ // Only Throwable has a non-empty constructor:
+ // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT;
+ // fillInStackTrace();
+
+ // detailMessage.
+ // TODO: Use String::FromModifiedUTF...?
+ ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str()));
+ if (s.get() != nullptr) {
+ jfieldID detail_message_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "detailMessage", "Ljava/lang/String;");
+ env->SetObjectField(exc.get(), detail_message_id, s.get());
+
+ // cause.
+ jfieldID cause_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "cause", "Ljava/lang/Throwable;");
+ env->SetObjectField(exc.get(), cause_id, exc.get());
+
+ // suppressedExceptions.
+ jfieldID emptylist_id = env->GetStaticFieldID(WellKnownClasses::java_util_Collections,
+ "EMPTY_LIST", "Ljava/util/List;");
+ ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField(
+ WellKnownClasses::java_util_Collections, emptylist_id));
+ CHECK(emptylist.get() != nullptr);
+ jfieldID suppressed_id = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "suppressedExceptions", "Ljava/util/List;");
+ env->SetObjectField(exc.get(), suppressed_id, emptylist.get());
+
+ // stackState is set as result of fillInStackTrace. fillInStackTrace calls
+ // nativeFillInStackTrace.
+ ScopedLocalRef<jobject> stack_state_val(env, nullptr);
+ {
+ ScopedObjectAccessUnchecked soa(env);
+ stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa));
+ }
+ if (stack_state_val.get() != nullptr) {
+ jfieldID stackstateID = env->GetFieldID(WellKnownClasses::java_lang_Throwable,
+ "stackState", "Ljava/lang/Object;");
+ env->SetObjectField(exc.get(), stackstateID, stack_state_val.get());
+
+ // stackTrace.
+ jfieldID stack_trace_elem_id = env->GetStaticFieldID(
+ WellKnownClasses::libcore_util_EmptyArray, "STACK_TRACE_ELEMENT",
+ "[Ljava/lang/StackTraceElement;");
+ ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField(
+ WellKnownClasses::libcore_util_EmptyArray, stack_trace_elem_id));
+ jfieldID stacktrace_id = env->GetFieldID(
+ WellKnownClasses::java_lang_Throwable, "stackTrace", "[Ljava/lang/StackTraceElement;");
+ env->SetObjectField(exc.get(), stacktrace_id, stack_trace_elem.get());
+
+ // Throw the exception.
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->SetException(throw_location,
+ reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
+ } else {
+ error_msg = "Could not create stack trace.";
+ }
+ } else {
+ // Could not allocate a string object.
+ error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
+ }
+ } else {
+ error_msg = "Could not allocate StackOverflowError object.";
+ }
+
+ if (!error_msg.empty()) {
+ LOG(ERROR) << error_msg;
CHECK(self->IsExceptionPending());
}
@@ -138,6 +214,27 @@ void ThrowStackOverflowError(Thread* self) {
self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size.
}
+void CheckReferenceResult(mirror::Object* o, Thread* self) {
+ if (o == NULL) {
+ return;
+ }
+ mirror::ArtMethod* m = self->GetCurrentMethod(NULL);
+ if (o == kInvalidIndirectRefObject) {
+ Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "invalid reference returned from %s",
+ PrettyMethod(m).c_str());
+ }
+ // Make sure that the result is an instance of the type this method was expected to return.
+ StackHandleScope<1> hs(self);
+ Handle<mirror::ArtMethod> h_m(hs.NewHandle(m));
+ mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
+
+ if (!o->InstanceOf(return_type)) {
+ Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "attempt to return an instance of %s from %s",
+ PrettyTypeOf(o).c_str(),
+ PrettyMethod(h_m.Get()).c_str());
+ }
+}
+
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_method_jobj,
std::vector<jvalue>& args) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 11a67ac5dd..44c89adada 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -40,67 +40,63 @@ namespace mirror {
class ScopedObjectAccessAlreadyRunnable;
class Thread;
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
mirror::ArtMethod* method,
Thread* self,
- gc::AllocatorType allocator_type);
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
mirror::ArtMethod* method,
Thread* self,
- gc::AllocatorType allocator_type);
+ gc::AllocatorType allocator_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
mirror::ArtMethod* method,
int32_t component_count,
bool* slow_path)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
mirror::ArtMethod* method,
int32_t component_count,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -108,7 +104,7 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Cl
int32_t component_count,
Thread* self,
gc::AllocatorType allocator_type)
- NO_THREAD_SAFETY_ANALYSIS;
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method,
int32_t component_count, Thread* self,
@@ -171,10 +167,11 @@ static inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
uint32_t string_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ NO_THREAD_SAFETY_ANALYSIS;
-static inline void CheckReferenceResult(mirror::Object* o, Thread* self)
+void CheckReferenceResult(mirror::Object* o, Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static inline void CheckSuspend(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
index be6231cae5..431735803d 100644
--- a/runtime/entrypoints/portable/portable_throw_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
@@ -98,7 +98,7 @@ extern "C" int32_t art_portable_find_catch_block_from_code(mirror::ArtMethod* cu
}
// Does this catch exception type apply?
mirror::Class* iter_exception_type =
- current_method->GetDexCacheResolvedTypes()->Get(iter_type_idx);
+ current_method->GetDexCacheResolvedType(iter_type_idx);
if (UNLIKELY(iter_exception_type == NULL)) {
// TODO: check, the verifier (class linker?) should take care of resolving all exception
// classes early.
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 1f2713a4f7..9d850c55bf 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -25,11 +25,34 @@
namespace art {
+static constexpr bool kUseTlabFastPath = true;
+
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
+ if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
@@ -37,6 +60,26 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ if (LIKELY(klass->IsInitialized())) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
} \
@@ -44,6 +87,24 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
StackReference<mirror::ArtMethod>* sp) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+ size_t byte_count = klass->GetObjectSize(); \
+ byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+ mirror::Object* obj; \
+ if (LIKELY(byte_count < self->TlabSize())) { \
+ obj = self->AllocTlab(byte_count); \
+ DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+ obj->SetClass(klass); \
+ if (kUseBakerOrBrooksReadBarrier) { \
+ if (kUseBrooksReadBarrier) { \
+ obj->SetReadBarrierPointer(obj); \
+ } \
+ obj->AssertReadBarrierPointer(); \
+ } \
+ QuasiAtomic::ThreadFenceForConstructor(); \
+ return obj; \
+ } \
+ } \
FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
} \
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 032f6bebad..8c108a816d 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -37,107 +37,11 @@ class Thread;
// Pointers to functions that are called by quick compiler generated code via thread-local storage.
struct PACKED(4) QuickEntryPoints {
- // Alloc
- void* (*pAllocArray)(uint32_t, void*, int32_t);
- void* (*pAllocArrayResolved)(void*, void*, int32_t);
- void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
- void* (*pAllocObject)(uint32_t, void*);
- void* (*pAllocObjectResolved)(void*, void*);
- void* (*pAllocObjectInitialized)(void*, void*);
- void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
- void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
- void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
-
- // Cast
- uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*);
- void (*pCheckCast)(void*, void*);
-
- // DexCache
- void* (*pInitializeStaticStorage)(uint32_t, void*);
- void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*);
- void* (*pInitializeType)(uint32_t, void*);
- void* (*pResolveString)(void*, uint32_t);
-
- // Field
- int (*pSet32Instance)(uint32_t, void*, int32_t); // field_idx, obj, src
- int (*pSet32Static)(uint32_t, int32_t);
- int (*pSet64Instance)(uint32_t, void*, int64_t);
- int (*pSet64Static)(uint32_t, int64_t);
- int (*pSetObjInstance)(uint32_t, void*, void*);
- int (*pSetObjStatic)(uint32_t, void*);
- int32_t (*pGet32Instance)(uint32_t, void*);
- int32_t (*pGet32Static)(uint32_t);
- int64_t (*pGet64Instance)(uint32_t, void*);
- int64_t (*pGet64Static)(uint32_t);
- void* (*pGetObjInstance)(uint32_t, void*);
- void* (*pGetObjStatic)(uint32_t);
-
- // Array
- void (*pAputObjectWithNullAndBoundCheck)(void*, uint32_t, void*); // array, index, src
- void (*pAputObjectWithBoundCheck)(void*, uint32_t, void*); // array, index, src
- void (*pAputObject)(void*, uint32_t, void*); // array, index, src
- void (*pHandleFillArrayData)(void*, void*);
-
- // JNI
- uint32_t (*pJniMethodStart)(Thread*);
- uint32_t (*pJniMethodStartSynchronized)(jobject to_lock, Thread* self);
- void (*pJniMethodEnd)(uint32_t cookie, Thread* self);
- void (*pJniMethodEndSynchronized)(uint32_t cookie, jobject locked, Thread* self);
- mirror::Object* (*pJniMethodEndWithReference)(jobject result, uint32_t cookie, Thread* self);
- mirror::Object* (*pJniMethodEndWithReferenceSynchronized)(jobject result, uint32_t cookie,
- jobject locked, Thread* self);
- void (*pQuickGenericJniTrampoline)(mirror::ArtMethod*);
-
- // Locks
- void (*pLockObject)(void*);
- void (*pUnlockObject)(void*);
-
- // Math
- int32_t (*pCmpgDouble)(double, double);
- int32_t (*pCmpgFloat)(float, float);
- int32_t (*pCmplDouble)(double, double);
- int32_t (*pCmplFloat)(float, float);
- double (*pFmod)(double, double);
- double (*pL2d)(int64_t);
- float (*pFmodf)(float, float);
- float (*pL2f)(int64_t);
- int32_t (*pD2iz)(double);
- int32_t (*pF2iz)(float);
- int32_t (*pIdivmod)(int32_t, int32_t);
- int64_t (*pD2l)(double);
- int64_t (*pF2l)(float);
- int64_t (*pLdiv)(int64_t, int64_t);
- int64_t (*pLmod)(int64_t, int64_t);
- int64_t (*pLmul)(int64_t, int64_t);
- uint64_t (*pShlLong)(uint64_t, uint32_t);
- uint64_t (*pShrLong)(uint64_t, uint32_t);
- uint64_t (*pUshrLong)(uint64_t, uint32_t);
-
- // Intrinsics
- int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
- int32_t (*pStringCompareTo)(void*, void*);
- void* (*pMemcpy)(void*, const void*, size_t);
-
- // Invocation
- void (*pQuickImtConflictTrampoline)(mirror::ArtMethod*);
- void (*pQuickResolutionTrampoline)(mirror::ArtMethod*);
- void (*pQuickToInterpreterBridge)(mirror::ArtMethod*);
- void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
- void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
-
- // Thread
- void (*pTestSuspend)(); // Stub that is periodically called to test the suspend count
-
- // Throws
- void (*pDeliverException)(void*);
- void (*pThrowArrayBounds)(int32_t, int32_t);
- void (*pThrowDivZero)();
- void (*pThrowNoSuchMethod)(int32_t);
- void (*pThrowNullPointer)();
- void (*pThrowStackOverflow)(void*);
+#define ENTRYPOINT_ENUM(name, rettype, ...) rettype ( * p ## name )( __VA_ARGS__ );
+#include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
};
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
new file mode 100644
index 0000000000..84158cd8b0
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
+
+#include "quick_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+
+// Define an enum for the entrypoints. Names are prepended a 'kQuick'.
+enum QuickEntrypointEnum
+{ // NOLINT(whitespace/braces)
+#define ENTRYPOINT_ENUM(name, rettype, ...) kQuick ## name,
+#include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+#undef QUICK_ENTRYPOINT_LIST
+#undef ENTRYPOINT_ENUM
+};
+
+std::ostream& operator<<(std::ostream& os, const QuickEntrypointEnum& kind);
+
+// Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
+template <size_t pointer_size>
+static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
+ switch (trampoline)
+ { // NOLINT(whitespace/braces)
+ #define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \
+ return QUICK_ENTRYPOINT_OFFSET(pointer_size, p ## name);
+ #include "quick_entrypoints_list.h"
+ QUICK_ENTRYPOINT_LIST(ENTRYPOINT_ENUM)
+ #undef QUICK_ENTRYPOINT_LIST
+ #undef ENTRYPOINT_ENUM
+ };
+ LOG(FATAL) << "Unexpected trampoline " << static_cast<int>(trampoline);
+ return ThreadOffset<pointer_size>(-1);
+}
+
+} // namespace art
+
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_ENUM_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
new file mode 100644
index 0000000000..f858743314
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+
+// All quick entrypoints. Format is name, return type, argument types.
+
+#define QUICK_ENTRYPOINT_LIST(V) \
+ V(AllocArray, void*, uint32_t, void*, int32_t) \
+ V(AllocArrayResolved, void*, void*, void*, int32_t) \
+ V(AllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+ V(AllocObject, void*, uint32_t, void*) \
+ V(AllocObjectResolved, void*, void*, void*) \
+ V(AllocObjectInitialized, void*, void*, void*) \
+ V(AllocObjectWithAccessCheck, void*, uint32_t, void*) \
+ V(CheckAndAllocArray, void*, uint32_t, void*, int32_t) \
+ V(CheckAndAllocArrayWithAccessCheck, void*, uint32_t, void*, int32_t) \
+\
+ V(InstanceofNonTrivial, uint32_t, const mirror::Class*, const mirror::Class*) \
+ V(CheckCast, void , void*, void*) \
+\
+ V(InitializeStaticStorage, void*, uint32_t, void*) \
+ V(InitializeTypeAndVerifyAccess, void*, uint32_t, void*) \
+ V(InitializeType, void*, uint32_t, void*) \
+ V(ResolveString, void*, void*, uint32_t) \
+\
+ V(Set32Instance, int, uint32_t, void*, int32_t) \
+ V(Set32Static, int, uint32_t, int32_t) \
+ V(Set64Instance, int, uint32_t, void*, int64_t) \
+ V(Set64Static, int, uint32_t, int64_t) \
+ V(SetObjInstance, int, uint32_t, void*, void*) \
+ V(SetObjStatic, int, uint32_t, void*) \
+ V(Get32Instance, int32_t, uint32_t, void*) \
+ V(Get32Static, int32_t, uint32_t) \
+ V(Get64Instance, int64_t, uint32_t, void*) \
+ V(Get64Static, int64_t, uint32_t) \
+ V(GetObjInstance, void*, uint32_t, void*) \
+ V(GetObjStatic, void*, uint32_t) \
+\
+ V(AputObjectWithNullAndBoundCheck, void, void*, uint32_t, void*) \
+ V(AputObjectWithBoundCheck, void, void*, uint32_t, void*) \
+ V(AputObject, void, void*, uint32_t, void*) \
+ V(HandleFillArrayData, void, void*, void*) \
+\
+ V(JniMethodStart, uint32_t, Thread*) \
+ V(JniMethodStartSynchronized, uint32_t, jobject to_lock, Thread* self) \
+ V(JniMethodEnd, void, uint32_t cookie, Thread* self) \
+ V(JniMethodEndSynchronized, void, uint32_t cookie, jobject locked, Thread* self) \
+ V(JniMethodEndWithReference, mirror::Object*, jobject result, uint32_t cookie, Thread* self) \
+ V(JniMethodEndWithReferenceSynchronized, mirror::Object*, jobject result, uint32_t cookie, jobject locked, Thread* self) \
+ V(QuickGenericJniTrampoline, void, mirror::ArtMethod*) \
+\
+ V(LockObject, void, void*) \
+ V(UnlockObject, void, void*) \
+\
+ V(CmpgDouble, int32_t, double, double) \
+ V(CmpgFloat, int32_t, float, float) \
+ V(CmplDouble, int32_t, double, double) \
+ V(CmplFloat, int32_t, float, float) \
+ V(Fmod, double, double, double) \
+ V(L2d, double, int64_t) \
+ V(Fmodf, float, float, float) \
+ V(L2f, float, int64_t) \
+ V(D2iz, int32_t, double) \
+ V(F2iz, int32_t, float) \
+ V(Idivmod, int32_t, int32_t, int32_t) \
+ V(D2l, int64_t, double) \
+ V(F2l, int64_t, float) \
+ V(Ldiv, int64_t, int64_t, int64_t) \
+ V(Lmod, int64_t, int64_t, int64_t) \
+ V(Lmul, int64_t, int64_t, int64_t) \
+ V(ShlLong, uint64_t, uint64_t, uint32_t) \
+ V(ShrLong, uint64_t, uint64_t, uint32_t) \
+ V(UshrLong, uint64_t, uint64_t, uint32_t) \
+\
+ V(IndexOf, int32_t, void*, uint32_t, uint32_t, uint32_t) \
+ V(StringCompareTo, int32_t, void*, void*) \
+ V(Memcpy, void*, void*, const void*, size_t) \
+\
+ V(QuickImtConflictTrampoline, void, mirror::ArtMethod*) \
+ V(QuickResolutionTrampoline, void, mirror::ArtMethod*) \
+ V(QuickToInterpreterBridge, void, mirror::ArtMethod*) \
+ V(InvokeDirectTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeInterfaceTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeStaticTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeSuperTrampolineWithAccessCheck, void, uint32_t, void*) \
+ V(InvokeVirtualTrampolineWithAccessCheck, void, uint32_t, void*) \
+\
+ V(TestSuspend, void, void) \
+\
+ V(DeliverException, void, void*) \
+ V(ThrowArrayBounds, void, int32_t, int32_t) \
+ V(ThrowDivZero, void, void) \
+ V(ThrowNoSuchMethod, void, int32_t) \
+ V(ThrowNullPointer, void, void) \
+ V(ThrowStackOverflow, void, void*) \
+\
+ V(A64Load, int64_t, volatile const int64_t *) \
+ V(A64Store, void, volatile int64_t *, int64_t)
+
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_
+#undef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_LIST_H_ // #define is only for lint.
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 338bd06f7c..4730701f2c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -58,9 +58,12 @@ class QuickArgumentVisitor {
static constexpr bool kQuickSoftFloatAbi = true; // This is a soft float ABI.
static constexpr size_t kNumQuickGprArgs = 3; // 3 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 0; // 0 arguments passed in FPRs.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 0; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 8; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 44; // Offset of return address.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+ arm::ArmCalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+ arm::ArmCalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
+ arm::ArmCalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -74,13 +77,13 @@ class QuickArgumentVisitor {
// | arg1 spill | |
// | Method* | ---
// | LR |
- // | X28 |
+ // | X29 |
// | : |
- // | X19 |
+ // | X20 |
// | X7 |
// | : |
// | X1 |
- // | D15 |
+ // | D7 |
// | : |
// | D0 |
// | | padding
@@ -88,9 +91,12 @@ class QuickArgumentVisitor {
static constexpr bool kQuickSoftFloatAbi = false; // This is a hard float ABI.
static constexpr size_t kNumQuickGprArgs = 7; // 7 arguments passed in GPRs.
static constexpr size_t kNumQuickFprArgs = 8; // 8 arguments passed in FPRs.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset = 16; // Offset of first FPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset = 144; // Offset of first GPR arg.
- static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset = 296; // Offset of return address.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset =
+ arm64::Arm64CalleeSaveFpr1Offset(Runtime::kRefsAndArgs); // Offset of first FPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset =
+ arm64::Arm64CalleeSaveGpr1Offset(Runtime::kRefsAndArgs); // Offset of first GPR arg.
+ static constexpr size_t kQuickCalleeSaveFrame_RefAndArgs_LrOffset =
+ arm64::Arm64CalleeSaveLrOffset(Runtime::kRefsAndArgs); // Offset of return address.
static size_t GprIndexToGprOffset(uint32_t gpr_index) {
return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
}
@@ -586,8 +592,7 @@ extern "C" uint64_t artQuickProxyInvokeHandler(mirror::ArtMethod* proxy_method,
const char* old_cause =
self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
// Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(sp->AsMirrorPtr(), proxy_method)
- << PrettyMethod(proxy_method);
+ DCHECK_EQ(sp->AsMirrorPtr(), proxy_method) << PrettyMethod(proxy_method);
self->SetTopOfStack(sp, 0);
DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes())
@@ -785,8 +790,8 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
// We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
// of the sharpened method.
- if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
- caller->GetDexCacheResolvedMethods()->Set<false>(called->GetDexMethodIndex(), called);
+ if (called->HasSameDexCacheResolvedMethods(caller)) {
+ caller->SetDexCacheResolvedMethod(called->GetDexMethodIndex(), called);
} else {
// Calling from one dex file to another, need to compute the method index appropriate to
// the caller's dex file. Since we get here only if the original called was a runtime
@@ -796,7 +801,7 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called,
MethodHelper mh(hs.NewHandle(called));
uint32_t method_index = mh.FindDexMethodIndexInOtherDexFile(*dex_file, dex_method_idx);
if (method_index != DexFile::kDexNoIndex) {
- caller->GetDexCacheResolvedMethods()->Set<false>(method_index, called);
+ caller->SetDexCacheResolvedMethod(method_index, called);
}
}
}
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 79c68a24e0..ae1b94fa90 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -259,8 +259,10 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowDivZero, pThrowNoSuchMethod, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNoSuchMethod, pThrowNullPointer, kPointerSize);
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowNullPointer, pThrowStackOverflow, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pThrowStackOverflow, pA64Load, kPointerSize);
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pA64Load, pA64Store, kPointerSize);
- CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pThrowStackOverflow)
+ CHECKED(OFFSETOF_MEMBER(QuickEntryPoints, pA64Store)
+ kPointerSize == sizeof(QuickEntryPoints), QuickEntryPoints_all);
}
};
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index b4355cad5c..8ddaf5cf24 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -18,12 +18,9 @@
#include <sys/mman.h>
#include <sys/ucontext.h>
-
#include "mirror/art_method.h"
#include "mirror/class.h"
-#ifdef HAVE_ANDROID_OS
#include "sigchain.h"
-#endif
#include "thread-inl.h"
#include "verify_object-inl.h"
@@ -40,6 +37,7 @@ void art_sigsegv_fault() {
// Signal handler called on SIGSEGV.
static void art_fault_handler(int sig, siginfo_t* info, void* context) {
+ // std::cout << "handling fault in ART handler\n";
fault_manager.HandleFault(sig, info, context);
}
@@ -48,10 +46,6 @@ FaultManager::FaultManager() {
}
FaultManager::~FaultManager() {
-#ifdef HAVE_ANDROID_OS
- UnclaimSignalChain(SIGSEGV);
-#endif
- sigaction(SIGSEGV, &oldaction_, nullptr); // Restore old handler.
}
@@ -65,11 +59,12 @@ void FaultManager::Init() {
#endif
// Set our signal handler now.
- sigaction(SIGSEGV, &action, &oldaction_);
-#ifdef HAVE_ANDROID_OS
+ int e = sigaction(SIGSEGV, &action, &oldaction_);
+ if (e != 0) {
+ VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
+ }
// Make sure our signal handler is called before any user handlers.
ClaimSignalChain(SIGSEGV, &oldaction_);
-#endif
}
void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
@@ -77,8 +72,13 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
//
// If malloc calls abort, it will be holding its lock.
// If the handler tries to call malloc, it will deadlock.
+
+ // Also, there is only an 8K stack available here to logging can cause memory
+ // overwrite issues if you are unlucky. If you want to enable logging and
+ // are getting crashes, allocate more space for the alternate signal stack.
+
VLOG(signals) << "Handling fault";
- if (IsInGeneratedCode(context, true)) {
+ if (IsInGeneratedCode(info, context, true)) {
VLOG(signals) << "in generated code, looking for handler";
for (const auto& handler : generated_code_handlers_) {
VLOG(signals) << "invoking Action on handler " << handler;
@@ -92,13 +92,11 @@ void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
return;
}
}
+
art_sigsegv_fault();
-#ifdef HAVE_ANDROID_OS
+ // Pass this on to the next handler in the chain, or the default if none.
InvokeUserSignalHandler(sig, info, context);
-#else
- oldaction_.sa_sigaction(sig, info, context);
-#endif
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -125,7 +123,7 @@ void FaultManager::RemoveHandler(FaultHandler* handler) {
// This function is called within the signal handler. It checks that
// the mutator_lock is held (shared). No annotalysis is done.
-bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) {
+bool FaultManager::IsInGeneratedCode(siginfo_t* siginfo, void* context, bool check_dex_pc) {
// We can only be running Java code in the current thread if it
// is in Runnable state.
VLOG(signals) << "Checking for generated code";
@@ -154,7 +152,7 @@ bool FaultManager::IsInGeneratedCode(void* context, bool check_dex_pc) {
// Get the architecture specific method address and return address. These
// are in architecture specific files in arch/<arch>/fault_handler_<arch>.
- GetMethodAndReturnPCAndSP(context, &method_obj, &return_pc, &sp);
+ GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp);
// If we don't have a potential method, we're outta here.
VLOG(signals) << "potential method: " << method_obj;
@@ -235,12 +233,12 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- if (manager_->IsInGeneratedCode(context, false)) {
+ if (manager_->IsInGeneratedCode(siginfo, context, false)) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
mirror::ArtMethod* method = nullptr;
uintptr_t return_pc = 0;
uintptr_t sp = 0;
- manager_->GetMethodAndReturnPCAndSP(context, &method, &return_pc, &sp);
+ manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
Thread* self = Thread::Current();
// Inside of generated code, sp[0] is the method, so sp is the frame.
StackReference<mirror::ArtMethod>* frame =
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 026f5b9c4a..1acd0247e8 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -43,9 +43,16 @@ class FaultManager {
void HandleFault(int sig, siginfo_t* info, void* context);
void AddHandler(FaultHandler* handler, bool generated_code);
void RemoveHandler(FaultHandler* handler);
- void GetMethodAndReturnPCAndSP(void* context, mirror::ArtMethod** out_method,
- uintptr_t* out_return_pc, uintptr_t* out_sp);
- bool IsInGeneratedCode(void *context, bool check_dex_pc) NO_THREAD_SAFETY_ANALYSIS;
+
+ // Note that the following two functions are called in the context of a signal handler.
+ // The IsInGeneratedCode() function checks that the mutator lock is held before it
+ // calls GetMethodAndReturnPCAndSP().
+ // TODO: think about adding lock assertions and fake lock and unlock functions.
+ void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, mirror::ArtMethod** out_method,
+ uintptr_t* out_return_pc, uintptr_t* out_sp)
+ NO_THREAD_SAFETY_ANALYSIS;
+ bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
+ NO_THREAD_SAFETY_ANALYSIS;
private:
std::vector<FaultHandler*> generated_code_handlers_;
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 46b9363b9a..217360f21d 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -37,12 +37,13 @@ static inline bool byte_cas(byte old_value, byte new_value, byte* address) {
// Align the address down.
address -= shift_in_bytes;
const size_t shift_in_bits = shift_in_bytes * kBitsPerByte;
- AtomicInteger* word_atomic = reinterpret_cast<AtomicInteger*>(address);
+ Atomic<uintptr_t>* word_atomic = reinterpret_cast<Atomic<uintptr_t>*>(address);
// Word with the byte we are trying to cas cleared.
- const int32_t cur_word = word_atomic->LoadRelaxed() & ~(0xFF << shift_in_bits);
- const int32_t old_word = cur_word | (static_cast<int32_t>(old_value) << shift_in_bits);
- const int32_t new_word = cur_word | (static_cast<int32_t>(new_value) << shift_in_bits);
+ const uintptr_t cur_word = word_atomic->LoadRelaxed() &
+ ~(static_cast<uintptr_t>(0xFF) << shift_in_bits);
+ const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits);
+ const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits);
return word_atomic->CompareExchangeWeakRelaxed(old_word, new_word);
#endif
}
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index ceb42e5936..049855000b 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -28,6 +28,11 @@ namespace art {
namespace gc {
namespace accounting {
+constexpr size_t CardTable::kCardShift;
+constexpr size_t CardTable::kCardSize;
+constexpr uint8_t CardTable::kCardClean;
+constexpr uint8_t CardTable::kCardDirty;
+
/*
* Maintain a card table from the write barrier. All writes of
* non-NULL values to heap addresses should go through an entry in
@@ -55,9 +60,9 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
- std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
- capacity + 256, PROT_READ | PROT_WRITE,
- false, &error_msg));
+ std::unique_ptr<MemMap> mem_map(
+ MemMap::MapAnonymous("card table", nullptr, capacity + 256, PROT_READ | PROT_WRITE,
+ false, &error_msg));
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
// All zeros is the correct initial value; all clean. Anonymous mmaps are initialized to zero, we
// don't clear the card table to avoid unnecessary pages being allocated
@@ -67,17 +72,17 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
CHECK(cardtable_begin != NULL);
// We allocated up to a bytes worth of extra space to allow biased_begin's byte value to equal
- // GC_CARD_DIRTY, compute a offset value to make this the case
+ // kCardDirty, compute a offset value to make this the case
size_t offset = 0;
byte* biased_begin = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(cardtable_begin) -
(reinterpret_cast<uintptr_t>(heap_begin) >> kCardShift));
- if (((uintptr_t)biased_begin & 0xff) != kCardDirty) {
- int delta = kCardDirty - (reinterpret_cast<uintptr_t>(biased_begin) & 0xff);
+ uintptr_t biased_byte = reinterpret_cast<uintptr_t>(biased_begin) & 0xff;
+ if (biased_byte != kCardDirty) {
+ int delta = kCardDirty - biased_byte;
offset = delta + (delta < 0 ? 0x100 : 0);
biased_begin += offset;
}
CHECK_EQ(reinterpret_cast<uintptr_t>(biased_begin) & 0xff, kCardDirty);
-
return new CardTable(mem_map.release(), biased_begin, offset);
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 7934974081..fbeea85554 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -46,10 +46,10 @@ template<size_t kAlignment> class SpaceBitmap;
// WriteBarrier, and from there to here.
class CardTable {
public:
- static const size_t kCardShift = 7;
- static const size_t kCardSize = (1 << kCardShift);
- static const uint8_t kCardClean = 0x0;
- static const uint8_t kCardDirty = 0x70;
+ static constexpr size_t kCardShift = 7;
+ static constexpr size_t kCardSize = 1 << kCardShift;
+ static constexpr uint8_t kCardClean = 0x0;
+ static constexpr uint8_t kCardDirty = 0x70;
static CardTable* Create(const byte* heap_begin, size_t heap_capacity);
diff --git a/runtime/gc/accounting/card_table_test.cc b/runtime/gc/accounting/card_table_test.cc
new file mode 100644
index 0000000000..433855a755
--- /dev/null
+++ b/runtime/gc/accounting/card_table_test.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "card_table-inl.h"
+
+#include <string>
+
+#include "atomic.h"
+#include "common_runtime_test.h"
+#include "handle_scope-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/string-inl.h" // Strings are easiest to allocate
+#include "scoped_thread_state_change.h"
+#include "thread_pool.h"
+#include "utils.h"
+
+namespace art {
+
+namespace mirror {
+ class Object;
+} // namespace mirror
+
+namespace gc {
+namespace accounting {
+
+class CardTableTest : public CommonRuntimeTest {
+ public:
+ std::unique_ptr<CardTable> card_table_;
+
+ void CommonSetup() {
+ if (card_table_.get() == nullptr) {
+ card_table_.reset(CardTable::Create(heap_begin_, heap_size_));
+ EXPECT_TRUE(card_table_.get() != nullptr);
+ } else {
+ ClearCardTable();
+ }
+ }
+ // Default values for the test, not random to avoid undeterministic behaviour.
+ CardTableTest() : heap_begin_(reinterpret_cast<byte*>(0x2000000)), heap_size_(2 * MB) {
+ }
+ void ClearCardTable() {
+ card_table_->ClearCardTable();
+ }
+ byte* HeapBegin() const {
+ return heap_begin_;
+ }
+ byte* HeapLimit() const {
+ return HeapBegin() + heap_size_;
+ }
+ // Return a pseudo random card for an address.
+ byte PseudoRandomCard(const byte* addr) const {
+ size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
+ return 1 + offset % 254;
+ }
+ void FillRandom() {
+ for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
+ EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
+ byte* card = card_table_->CardFromAddr(addr);
+ *card = PseudoRandomCard(addr);
+ }
+ }
+
+ private:
+ byte* const heap_begin_;
+ const size_t heap_size_;
+};
+
+TEST_F(CardTableTest, TestMarkCard) {
+ CommonSetup();
+ for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
+ auto obj = reinterpret_cast<const mirror::Object*>(addr);
+ EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
+ EXPECT_TRUE(!card_table_->IsDirty(obj));
+ card_table_->MarkCard(addr);
+ EXPECT_TRUE(card_table_->IsDirty(obj));
+ EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
+ byte* card_addr = card_table_->CardFromAddr(addr);
+ EXPECT_EQ(*card_addr, CardTable::kCardDirty);
+ *card_addr = CardTable::kCardClean;
+ EXPECT_EQ(*card_addr, CardTable::kCardClean);
+ }
+}
+
+class UpdateVisitor {
+ public:
+ byte operator()(byte c) const {
+ return c * 93 + 123;
+ }
+ void operator()(byte* /*card*/, byte /*expected_value*/, byte /*new_value*/) const {
+ }
+};
+
+TEST_F(CardTableTest, TestModifyCardsAtomic) {
+ CommonSetup();
+ FillRandom();
+ const size_t delta = std::min(static_cast<size_t>(HeapLimit() - HeapBegin()),
+ 8U * CardTable::kCardSize);
+ UpdateVisitor visitor;
+ size_t start_offset = 0;
+ for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+ start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
+ size_t end_offset = 0;
+ for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
+ // Don't always start at a card boundary.
+ byte* start = cstart + start_offset;
+ byte* end = cend - end_offset;
+ end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
+ // Modify cards.
+ card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
+ // Check adjacent cards not modified.
+ for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+ cur -= CardTable::kCardSize) {
+ EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+ PseudoRandomCard(cur));
+ }
+ for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+ cur += CardTable::kCardSize) {
+ EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+ PseudoRandomCard(cur));
+ }
+ // Verify Range.
+ for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+ cur += CardTable::kCardSize) {
+ byte* card = card_table_->CardFromAddr(cur);
+ byte value = PseudoRandomCard(cur);
+ EXPECT_EQ(visitor(value), *card);
+ // Restore for next iteration.
+ *card = value;
+ }
+ }
+ }
+}
+
+// TODO: Add test for CardTable::Scan.
+} // namespace accounting
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 228d1dc668..2686af0529 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -185,7 +185,7 @@ class CheckReferenceVisitor {
<< from_space->GetGcRetentionPolicy();
LOG(INFO) << "ToSpace " << to_space->GetName() << " type "
<< to_space->GetGcRetentionPolicy();
- heap->DumpSpaces();
+ heap->DumpSpaces(LOG(INFO));
LOG(FATAL) << "FATAL ERROR";
}
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 46d79bf796..07b61e6cf0 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -55,7 +55,8 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
: heap_(heap),
name_(name),
pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
- cumulative_timings_(name) {
+ cumulative_timings_(name),
+ pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true) {
ResetCumulativeStatistics();
}
@@ -65,10 +66,11 @@ void GarbageCollector::RegisterPause(uint64_t nano_length) {
void GarbageCollector::ResetCumulativeStatistics() {
cumulative_timings_.Reset();
- pause_histogram_.Reset();
total_time_ns_ = 0;
total_freed_objects_ = 0;
total_freed_bytes_ = 0;
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ pause_histogram_.Reset();
}
void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
@@ -95,6 +97,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
}
total_time_ns_ += current_iteration->GetDurationNs();
for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
+ MutexLock mu(self, pause_histogram_lock_);
pause_histogram_.AddValue(pause_time / 1000);
}
ATRACE_END();
@@ -137,8 +140,11 @@ uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
}
void GarbageCollector::ResetMeasurements() {
+ {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ pause_histogram_.Reset();
+ }
cumulative_timings_.Reset();
- pause_histogram_.Reset();
total_time_ns_ = 0;
total_freed_objects_ = 0;
total_freed_bytes_ = 0;
@@ -171,6 +177,38 @@ void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
heap_->RecordFree(freed.objects, freed.bytes);
}
+uint64_t GarbageCollector::GetTotalPausedTimeNs() {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ return pause_histogram_.AdjustedSum();
+}
+
+void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
+ const CumulativeLogger& logger = GetCumulativeTimings();
+ const size_t iterations = logger.GetIterations();
+ if (iterations == 0) {
+ return;
+ }
+ os << ConstDumpable<CumulativeLogger>(logger);
+ const uint64_t total_ns = logger.GetTotalNs();
+ double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+ const uint64_t freed_bytes = GetTotalFreedBytes();
+ const uint64_t freed_objects = GetTotalFreedObjects();
+ {
+ MutexLock mu(Thread::Current(), pause_histogram_lock_);
+ if (pause_histogram_.SampleSize() > 0) {
+ Histogram<uint64_t>::CumulativeData cumulative_data;
+ pause_histogram_.CreateHistogram(&cumulative_data);
+ pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
+ }
+ }
+ os << GetName() << " total time: " << PrettyDuration(total_ns)
+ << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
+ << GetName() << " freed: " << freed_objects
+ << " objects with total size " << PrettySize(freed_bytes) << "\n"
+ << GetName() << " throughput: " << freed_objects / seconds << "/s / "
+ << PrettySize(freed_bytes / seconds) << "/s\n";
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 885569efd9..b8094694b0 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -119,18 +119,13 @@ class GarbageCollector {
GarbageCollector(Heap* heap, const std::string& name);
virtual ~GarbageCollector() { }
-
const char* GetName() const {
return name_.c_str();
}
-
virtual GcType GetGcType() const = 0;
-
virtual CollectorType GetCollectorType() const = 0;
-
// Run the garbage collector.
void Run(GcCause gc_cause, bool clear_soft_references);
-
Heap* GetHeap() const {
return heap_;
}
@@ -138,24 +133,17 @@ class GarbageCollector {
const CumulativeLogger& GetCumulativeTimings() const {
return cumulative_timings_;
}
-
void ResetCumulativeStatistics();
-
// Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- uint64_t GetTotalPausedTimeNs() const {
- return pause_histogram_.AdjustedSum();
- }
+ uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
}
uint64_t GetTotalFreedObjects() const {
return total_freed_objects_;
}
- const Histogram<uint64_t>& GetPauseHistogram() const {
- return pause_histogram_;
- }
// Reset the cumulative timings and pause histogram.
void ResetMeasurements();
// Returns the estimated throughput in bytes / second.
@@ -174,11 +162,11 @@ class GarbageCollector {
void RecordFree(const ObjectBytePair& freed);
// Record a free of large objects.
void RecordFreeLOS(const ObjectBytePair& freed);
+ void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
-
// Revoke all the thread-local buffers.
virtual void RevokeAllThreadLocalBuffers() = 0;
@@ -188,11 +176,12 @@ class GarbageCollector {
Heap* const heap_;
std::string name_;
// Cumulative statistics.
- Histogram<uint64_t> pause_histogram_;
+ Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
int64_t total_freed_bytes_;
CumulativeLogger cumulative_timings_;
+ mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index 974952d992..104ed36014 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -32,10 +32,7 @@ namespace collector {
template<typename MarkVisitor, typename ReferenceVisitor>
inline void MarkSweep::ScanObjectVisit(mirror::Object* obj, const MarkVisitor& visitor,
const ReferenceVisitor& ref_visitor) {
- if (kIsDebugBuild && !IsMarked(obj)) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Scanning unmarked object " << obj;
- }
+ DCHECK(IsMarked(obj)) << "Scanning unmarked object " << obj << "\n" << heap_->DumpSpaces();
obj->VisitReferences<false>(visitor, ref_visitor);
if (kCountScannedTypes) {
mirror::Class* klass = obj->GetClass<kVerifyNone>();
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 7e97b3b16b..95530be202 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -313,10 +313,8 @@ void MarkSweep::FindDefaultSpaceBitmap() {
}
}
}
- if (current_space_bitmap_ == nullptr) {
- heap_->DumpSpaces();
- LOG(FATAL) << "Could not find a default mark bitmap";
- }
+ CHECK(current_space_bitmap_ != nullptr) << "Could not find a default mark bitmap\n"
+ << heap_->DumpSpaces();
}
void MarkSweep::ExpandMarkStack() {
@@ -943,12 +941,9 @@ mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg
void MarkSweep::VerifyIsLive(const Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
- if (std::find(heap_->allocation_stack_->Begin(), heap_->allocation_stack_->End(), obj) ==
- heap_->allocation_stack_->End()) {
- // Object not found!
- heap_->DumpSpaces();
- LOG(FATAL) << "Found dead object " << obj;
- }
+ accounting::ObjectStack* allocation_stack = heap_->allocation_stack_.get();
+ CHECK(std::find(allocation_stack->Begin(), allocation_stack->End(), obj) !=
+ allocation_stack->End()) << "Found dead object " << obj << "\n" << heap_->DumpSpaces();
}
}
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 47682cc584..922a71ceb2 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -64,34 +64,25 @@ inline void SemiSpace::MarkObject(
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (!immune_region_.ContainsObject(obj)) {
- if (from_space_->HasAddress(obj)) {
- mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
- // If the object has already been moved, return the new forward address.
- if (UNLIKELY(forward_address == nullptr)) {
- forward_address = MarkNonForwardedObject(obj);
- DCHECK(forward_address != nullptr);
- // Make sure to only update the forwarding address AFTER you copy the object so that the
- // monitor word doesn't Get stomped over.
- obj->SetLockWord(
- LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
- // Push the object onto the mark stack for later processing.
- MarkStackPush(forward_address);
- }
- obj_ptr->Assign(forward_address);
- } else {
- BitmapSetSlowPathVisitor visitor(this);
- if (kIsDebugBuild && mark_bitmap_->GetContinuousSpaceBitmap(obj) != nullptr) {
- // If a bump pointer space only collection, we should not
- // reach here as we don't/won't mark the objects in the
- // non-moving space (except for the promoted objects.) Note
- // the non-moving space is added to the immune space.
- DCHECK(!generational_ || whole_heap_collection_);
- }
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
- }
+ if (from_space_->HasAddress(obj)) {
+ mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+ // If the object has already been moved, return the new forward address.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = MarkNonForwardedObject(obj);
+ DCHECK(forward_address != nullptr);
+ // Make sure to only update the forwarding address AFTER you copy the object so that the
+ // monitor word doesn't Get stomped over.
+ obj->SetLockWord(
+ LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)), false);
+ // Push the object onto the mark stack for later processing.
+ MarkStackPush(forward_address);
+ }
+ obj_ptr->Assign(forward_address);
+ } else if (!collect_from_space_only_ && !immune_region_.ContainsObject(obj)) {
+ BitmapSetSlowPathVisitor visitor(this);
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
}
}
}
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cabfe2176c..8fb33cec2f 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -63,23 +63,23 @@ void SemiSpace::BindBitmaps() {
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
// Mark all of the spaces we never collect as immune.
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->GetLiveBitmap() != nullptr) {
- if (space == to_space_) {
- CHECK(to_space_->IsContinuousMemMapAllocSpace());
- to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
- } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
- || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect
- // Add the main free list space and the non-moving
- // space to the immune space if a bump pointer space
- // only collection.
- || (generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
- CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space;
+ } else if (space->GetLiveBitmap() != nullptr) {
+ if (space == to_space_ || collect_from_space_only_) {
+ if (collect_from_space_only_) {
+ // Bind the bitmaps of the main free list space and the non-moving space we are doing a
+ // bump pointer space only collection.
+ CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
+ space == GetHeap()->GetNonMovingSpace());
+ }
+ CHECK(space->IsContinuousMemMapAllocSpace());
+ space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
}
}
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
}
@@ -95,7 +95,7 @@ SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_pref
bytes_promoted_(0),
bytes_promoted_since_last_whole_heap_collection_(0),
large_object_bytes_allocated_at_last_whole_heap_collection_(0),
- whole_heap_collection_(true),
+ collect_from_space_only_(generational),
collector_name_(name_),
swap_semi_spaces_(true) {
}
@@ -147,6 +147,10 @@ void SemiSpace::InitializePhase() {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
mark_bitmap_ = heap_->GetMarkBitmap();
}
+ if (generational_) {
+ promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
+ }
+ fallback_space_ = GetHeap()->GetNonMovingSpace();
}
void SemiSpace::ProcessReferences(Thread* self) {
@@ -180,9 +184,9 @@ void SemiSpace::MarkingPhase() {
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
- if (whole_heap_collection_) {
+ if (!collect_from_space_only_) {
VLOG(heap) << "Whole heap collection";
name_ = collector_name_ + " whole";
} else {
@@ -191,7 +195,7 @@ void SemiSpace::MarkingPhase() {
}
}
- if (!generational_ || whole_heap_collection_) {
+ if (!collect_from_space_only_) {
// If non-generational, always clear soft references.
// If generational, clear soft references if a whole heap collection.
GetCurrentIteration()->SetClearSoftReferences(true);
@@ -218,7 +222,6 @@ void SemiSpace::MarkingPhase() {
heap_->GetCardTable()->ClearCardTable();
// Need to do this before the checkpoint since we don't want any threads to add references to
// the live stack during the recursive mark.
- t.NewTiming("SwapStacks");
if (kUseThreadLocalAllocationStack) {
TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings());
heap_->RevokeAllThreadLocalAllocationStacks(self_);
@@ -227,8 +230,6 @@ void SemiSpace::MarkingPhase() {
{
WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
MarkRoots();
- // Mark roots of immune spaces.
- UpdateAndMarkModUnion();
// Recursively mark remaining objects.
MarkReachableObjects();
}
@@ -259,46 +260,6 @@ void SemiSpace::MarkingPhase() {
}
}
-void SemiSpace::UpdateAndMarkModUnion() {
- for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune then we need to mark the references to other spaces.
- if (immune_region_.ContainsSpace(space)) {
- accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
- if (table != nullptr) {
- // TODO: Improve naming.
- TimingLogger::ScopedTiming t(
- space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
- "UpdateAndMarkImageModUnionTable",
- GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
- } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) {
- DCHECK(kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. The non-moving space
- // doesn't have a mod union table, but has a remembered
- // set. Its dirty cards will be scanned later in
- // MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- } else {
- DCHECK(!kUseRememberedSet);
- // If a bump pointer space only collection, the non-moving
- // space is added to the immune space. But the non-moving
- // space doesn't have a mod union table. Instead, its live
- // bitmap will be scanned later in MarkReachableObjects().
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()))
- << "Space " << space->GetName() << " "
- << "generational_=" << generational_ << " "
- << "whole_heap_collection_=" << whole_heap_collection_ << " ";
- }
- }
- }
-}
-
class SemiSpaceScanObjectVisitor {
public:
explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {}
@@ -355,20 +316,30 @@ void SemiSpace::MarkReachableObjects() {
heap_->MarkAllocStackAsLive(live_stack);
live_stack->Reset();
}
- t.NewTiming("UpdateAndMarkRememberedSets");
for (auto& space : heap_->GetContinuousSpaces()) {
- // If the space is immune and has no mod union table (the
- // non-moving space when the bump pointer space only collection is
- // enabled,) then we need to scan its live bitmap or dirty cards as roots
- // (including the objects on the live stack which have just marked
- // in the live bitmap above in MarkAllocStackAsLive().)
- if (immune_region_.ContainsSpace(space) &&
- heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(generational_ && !whole_heap_collection_ &&
- (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
- accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space);
- if (kUseRememberedSet) {
- DCHECK(rem_set != nullptr);
+ // If the space is immune then we need to mark the references to other spaces.
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // TODO: Improve naming.
+ TimingLogger::ScopedTiming t2(
+ space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+ "UpdateAndMarkImageModUnionTable",
+ GetTimings());
+ table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
+ } else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
+ // If the space has no mod union table (the non-moving space and main spaces when the bump
+ // pointer space only collection is enabled,) then we need to scan its live bitmap or dirty
+ // cards as roots (including the objects on the live stack which have just marked in the live
+ // bitmap above in MarkAllocStackAsLive().)
+ DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
+ << "Space " << space->GetName() << " "
+ << "generational_=" << generational_ << " "
+ << "collect_from_space_only_=" << collect_from_space_only_;
+ accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
+ CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
+ if (rem_set != nullptr) {
+ TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
from_space_, this);
if (kIsDebugBuild) {
@@ -383,7 +354,7 @@ void SemiSpace::MarkReachableObjects() {
visitor);
}
} else {
- DCHECK(rem_set == nullptr);
+ TimingLogger::ScopedTiming t2("VisitLiveBits", GetTimings());
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
@@ -393,9 +364,10 @@ void SemiSpace::MarkReachableObjects() {
}
}
+ CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
if (is_large_object_space_immune_) {
TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings());
- DCHECK(generational_ && !whole_heap_collection_);
+ DCHECK(collect_from_space_only_);
// Delay copying the live set to the marked set until here from
// BindBitmaps() as the large objects on the allocation stack may
// be newly added to the live set above in MarkAllocStackAsLive().
@@ -506,19 +478,20 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
}
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
- size_t object_size = obj->SizeOf();
+ const size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
- space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- forward_address = promo_dest_space->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ // No logic for marking the bitmap, so it must be null.
+ DCHECK(to_space_live_bitmap_ == nullptr);
} else {
bytes_promoted_ += bytes_allocated;
// Dirty the card at the destionation as it may contain
@@ -526,12 +499,12 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// space.
GetHeap()->WriteBarrierEveryFieldOf(forward_address);
// Handle the bitmaps marking.
- accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap();
+ accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (!whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
@@ -559,12 +532,23 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
mark_bitmap->Set(forward_address);
}
}
- DCHECK(forward_address != nullptr);
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+ to_space_live_bitmap_->Set(forward_address);
+ }
+ }
+ // If it's still null, attempt to use the fallback space.
+ if (UNLIKELY(forward_address == nullptr)) {
+ forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
+ nullptr);
+ CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
+ accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
+ if (bitmap != nullptr) {
+ bitmap->Set(forward_address);
+ }
}
- CHECK(forward_address != nullptr) << "Out of memory in the to-space.";
++objects_moved_;
bytes_moved_ += bytes_allocated;
// Copy over the object and add it to the mark stack since we still need to update its
@@ -579,11 +563,10 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
}
forward_address->AssertReadBarrierPointer();
}
- if (to_space_live_bitmap_ != nullptr) {
- to_space_live_bitmap_->Set(forward_address);
- }
DCHECK(to_space_->HasAddress(forward_address) ||
- (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ fallback_space_->HasAddress(forward_address) ||
+ (generational_ && promo_dest_space_->HasAddress(forward_address)))
+ << forward_address << "\n" << GetHeap()->DumpSpaces();
return forward_address;
}
@@ -648,7 +631,7 @@ void SemiSpace::SweepSystemWeaks() {
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
- return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space);
+ return space != from_space_ && space != to_space_;
}
void SemiSpace::Sweep(bool swap_bitmaps) {
@@ -714,22 +697,20 @@ void SemiSpace::ScanObject(Object* obj) {
// Scan anything that's on the mark stack.
void SemiSpace::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- space::MallocSpace* promo_dest_space = nullptr;
accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
- if (generational_ && !whole_heap_collection_) {
+ if (collect_from_space_only_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
- promo_dest_space = GetHeap()->GetPrimaryFreeListSpace();
- live_bitmap = promo_dest_space->GetLiveBitmap();
+ live_bitmap = promo_dest_space_->GetLiveBitmap();
DCHECK(live_bitmap != nullptr);
- accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
+ accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK_EQ(live_bitmap, mark_bitmap);
}
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
+ if (collect_from_space_only_ && promo_dest_space_->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -742,16 +723,12 @@ void SemiSpace::ProcessMarkStack() {
inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
// All immune objects are assumed marked.
- if (immune_region_.ContainsObject(obj)) {
- return obj;
- }
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or nullptr.
return GetForwardingAddressInFromSpace(obj);
- } else if (to_space_->HasAddress(obj)) {
- // Should be unlikely.
- // Already forwarded, must be marked.
- return obj;
+ } else if (collect_from_space_only_ || immune_region_.ContainsObject(obj) ||
+ to_space_->HasAddress(obj)) {
+ return obj; // Already forwarded, must be marked.
}
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
@@ -777,9 +754,9 @@ void SemiSpace::FinishPhase() {
if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
- // whole_heap_collection.
- if (!whole_heap_collection_) {
- // Enable whole_heap_collection if the bytes promoted since the
+ // collect_from_space_only_.
+ if (collect_from_space_only_) {
+ // Disable collect_from_space_only_ if the bytes promoted since the
// last whole heap collection or the large object bytes
// allocated exceeds a threshold.
bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
@@ -792,14 +769,14 @@ void SemiSpace::FinishPhase() {
current_los_bytes_allocated >=
last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
- whole_heap_collection_ = true;
+ collect_from_space_only_ = false;
}
} else {
// Reset the counters.
bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
large_object_bytes_allocated_at_last_whole_heap_collection_ =
GetHeap()->GetLargeObjectsSpace()->GetBytesAllocated();
- whole_heap_collection_ = false;
+ collect_from_space_only_ = true;
}
}
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 7f6d1dc01f..71a83f2624 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -244,9 +244,14 @@ class SemiSpace : public GarbageCollector {
// large objects were allocated at the last whole heap collection.
uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
- // Used for the generational mode. When true, collect the whole
- // heap. When false, collect only the bump pointer spaces.
- bool whole_heap_collection_;
+ // Used for generational mode. When true, we only collect the from_space_.
+ bool collect_from_space_only_;
+
+ // The space which we are promoting into, only used for GSS.
+ space::ContinuousMemMapAllocSpace* promo_dest_space_;
+
+ // The space which we copy to if the to_space_ is full.
+ space::ContinuousMemMapAllocSpace* fallback_space_;
// How many objects and bytes we moved, used so that we don't need to Get the size of the
// to_space_ when calculating how many objects and bytes we freed.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 292173da50..6d8190ed02 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -96,6 +96,7 @@ static const size_t kDefaultMarkStackSize = 64 * KB;
static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
+static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -179,16 +180,16 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
use_tlab_(use_tlab),
main_space_backup_(nullptr),
- min_interval_homogeneous_space_compaction_by_oom_(min_interval_homogeneous_space_compaction_by_oom),
+ min_interval_homogeneous_space_compaction_by_oom_(
+ min_interval_homogeneous_space_compaction_by_oom),
last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
- const bool is_zygote = Runtime::Current()->IsZygote();
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
- if (!is_zygote) {
+ if (!Runtime::Current()->IsZygote()) {
large_object_threshold_ = kDefaultLargeObjectThreshold;
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
@@ -197,7 +198,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
ChangeCollector(desired_collector_type_);
-
live_bitmap_.reset(new accounting::HeapBitmap(this));
mark_bitmap_.reset(new accounting::HeapBitmap(this));
// Requested begin for the alloc space, to follow the mapped image and oat files
@@ -213,130 +213,117 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
CHECK_GT(oat_file_end_addr, image_space->End());
requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
}
-
/*
requested_alloc_space_begin -> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+- nonmoving space (kNonMovingSpaceCapacity) +-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
- +- main alloc space (capacity_) +-
+ +-main alloc space / bump space 1 (capacity_) +-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
- +- main alloc space 1 (capacity_) +-
+ +-????????????????????????????????????????????+-
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+ +-main alloc space2 / bump space 2 (capacity_)+-
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
*/
- bool create_backup_main_space =
+ bool support_homogeneous_space_compaction =
background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
use_homogeneous_space_compaction_for_oom;
- if (is_zygote) {
- // Reserve the address range before we create the non moving space to make sure bitmaps don't
- // take it.
- std::string error_str;
- MemMap* main_space_map = MemMap::MapAnonymous(
- kMemMapSpaceName[0], requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- CHECK(main_space_map != nullptr) << error_str;
- MemMap* main_space_1_map = nullptr;
- // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
- if (create_backup_main_space) {
- main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], main_space_map->End(), capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- if (main_space_1_map == nullptr) {
- LOG(WARNING) << "Failed to create map " << kMemMapSpaceName[1] << " with error "
- << error_str;
- }
- }
+ // We may use the same space the main space for the non moving space if we don't need to compact
+ // from the main space.
+ // This is not the case if we support homogeneous compaction or have a moving background
+ // collector type.
+ const bool is_zygote = Runtime::Current()->IsZygote();
+ bool separate_non_moving_space = is_zygote ||
+ support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
+ IsMovingGc(background_collector_type_);
+ if (foreground_collector_type == kCollectorTypeGSS) {
+ separate_non_moving_space = false;
+ }
+ std::unique_ptr<MemMap> main_mem_map_1;
+ std::unique_ptr<MemMap> main_mem_map_2;
+ byte* request_begin = requested_alloc_space_begin;
+ if (request_begin != nullptr && separate_non_moving_space) {
+ request_begin += kNonMovingSpaceCapacity;
+ }
+ std::string error_str;
+ std::unique_ptr<MemMap> non_moving_space_mem_map;
+ if (separate_non_moving_space) {
+ // Reserve the non moving mem map before the other two since it needs to be at a specific
+ // address.
+ non_moving_space_mem_map.reset(
+ MemMap::MapAnonymous("non moving space", requested_alloc_space_begin,
+ kNonMovingSpaceCapacity, PROT_READ | PROT_WRITE, true, &error_str));
+ CHECK(non_moving_space_mem_map != nullptr) << error_str;
+ }
+ // Attempt to create 2 mem maps at or after the requested begin.
+ main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
+ PROT_READ | PROT_WRITE, &error_str));
+ CHECK(main_mem_map_1.get() != nullptr) << error_str;
+ if (support_homogeneous_space_compaction ||
+ background_collector_type_ == kCollectorTypeSS ||
+ foreground_collector_type_ == kCollectorTypeSS) {
+ main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
+ capacity_, PROT_READ | PROT_WRITE,
+ &error_str));
+ CHECK(main_mem_map_2.get() != nullptr) << error_str;
+ }
+ // Create the non moving space first so that bitmaps don't take up the address range.
+ if (separate_non_moving_space) {
// Non moving space is always dlmalloc since we currently don't have support for multiple
// active rosalloc spaces.
- non_moving_space_ = space::DlMallocSpace::Create(
- "zygote / non moving space", initial_size, kNonMovingSpaceCapacity,
- kNonMovingSpaceCapacity, requested_alloc_space_begin, false);
+ const size_t size = non_moving_space_mem_map->Size();
+ non_moving_space_ = space::DlMallocSpace::CreateFromMemMap(
+ non_moving_space_mem_map.release(), "zygote / non moving space", initial_size,
+ initial_size, size, size, false);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
- CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
- if (main_space_1_map != nullptr) {
- const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
- growth_limit_, capacity_, name, true);
- }
- } else {
- std::string error_str;
- byte* request_begin = requested_alloc_space_begin;
- if (request_begin == nullptr) {
- // Disable homogeneous space compaction since we don't have an image.
- create_backup_main_space = false;
- }
- MemMap* main_space_1_map = nullptr;
- if (create_backup_main_space) {
- request_begin += kNonMovingSpaceCapacity;
- // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
- main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], request_begin + capacity_,
- capacity_, PROT_READ | PROT_WRITE, true, &error_str);
- if (main_space_1_map == nullptr) {
- LOG(WARNING) << "Failed to create map " << kMemMapSpaceName[1] << " with error "
- << error_str;
- request_begin = requested_alloc_space_begin;
- }
- }
- MemMap* main_space_map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, true, &error_str);
- CHECK(main_space_map != nullptr) << error_str;
- // Introduce a seperate non moving space.
- if (main_space_1_map != nullptr) {
- // Do this before creating the main malloc space to prevent bitmaps from being placed here.
- non_moving_space_ = space::DlMallocSpace::Create(
- "non moving space", kDefaultInitialSize, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
- requested_alloc_space_begin, false);
- non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
- }
- // Create the main free list space, which doubles as the non moving space. We can do this since
- // non zygote means that we won't have any background compaction.
- CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
- if (main_space_1_map != nullptr) {
- const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
- main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
- growth_limit_, capacity_, name, true);
- CHECK(main_space_backup_ != nullptr);
- } else {
- non_moving_space_ = main_space_;
- }
+ CHECK(non_moving_space_ != nullptr) << "Failed creating non moving space "
+ << requested_alloc_space_begin;
+ AddSpace(non_moving_space_);
}
- CHECK(non_moving_space_ != nullptr);
-
- // We need to create the bump pointer if the foreground collector is a compacting GC. We only
- // create the bump pointer space if we are not a moving foreground collector but have a moving
- // background collector since the heap transition code will create the temp space by recycling
- // the bitmap from the main space.
- if (kMovingCollector &&
- (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
+ // Create other spaces based on whether or not we have a moving GC.
+ if (IsMovingGc(foreground_collector_type_) && foreground_collector_type_ != kCollectorTypeGSS) {
+ // Create bump pointer spaces.
+ // We only to create the bump pointer if the foreground collector is a compacting GC.
// TODO: Place bump-pointer spaces somewhere to minimize size of card table.
- // Divide by 2 for a temporary fix for reducing virtual memory usage.
- const size_t bump_pointer_space_capacity = capacity_ / 2;
- bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
- bump_pointer_space_capacity, nullptr);
+ bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 1",
+ main_mem_map_1.release());
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
- bump_pointer_space_capacity, nullptr);
+ temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+ main_mem_map_2.release());
CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(temp_space_);
- }
- if (non_moving_space_ != main_space_) {
- AddSpace(non_moving_space_);
- }
- if (main_space_backup_ != nullptr) {
- AddSpace(main_space_backup_);
+ CHECK(separate_non_moving_space);
} else {
- const char* disable_msg = "Disabling homogenous space compact due to no backup main space";
- if (background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact) {
- background_collector_type_ = collector_type_;
- LOG(WARNING) << disable_msg;
- } else if (use_homogeneous_space_compaction_for_oom_) {
- LOG(WARNING) << disable_msg;
- }
- use_homogeneous_space_compaction_for_oom_ = false;
- }
- if (main_space_ != nullptr) {
+ CreateMainMallocSpace(main_mem_map_1.release(), initial_size, growth_limit_, capacity_);
+ CHECK(main_space_ != nullptr);
AddSpace(main_space_);
+ if (!separate_non_moving_space) {
+ non_moving_space_ = main_space_;
+ CHECK(!non_moving_space_->CanMoveObjects());
+ }
+ if (foreground_collector_type_ == kCollectorTypeGSS) {
+ CHECK_EQ(foreground_collector_type_, background_collector_type_);
+ // Create bump pointer spaces instead of a backup space.
+ main_mem_map_2.release();
+ bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space 1",
+ kGSSBumpPointerSpaceCapacity, nullptr);
+ CHECK(bump_pointer_space_ != nullptr);
+ AddSpace(bump_pointer_space_);
+ temp_space_ = space::BumpPointerSpace::Create("Bump pointer space 2",
+ kGSSBumpPointerSpaceCapacity, nullptr);
+ CHECK(temp_space_ != nullptr);
+ AddSpace(temp_space_);
+ } else if (main_mem_map_2.get() != nullptr) {
+ const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(main_mem_map_2.release(), initial_size,
+ growth_limit_, capacity_, name, true));
+ CHECK(main_space_backup_.get() != nullptr);
+ // Add the space so its accounted for in the heap_begin and heap_end.
+ AddSpace(main_space_backup_.get());
+ }
}
-
+ CHECK(non_moving_space_ != nullptr);
+ CHECK(!non_moving_space_->CanMoveObjects());
// Allocate the large object space.
if (kUseFreeListSpaceForLOS) {
large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
@@ -345,19 +332,19 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
CHECK(large_object_space_ != nullptr) << "Failed to create large object space";
AddSpace(large_object_space_);
-
// Compute heap capacity. Continuous spaces are sorted in order of Begin().
CHECK(!continuous_spaces_.empty());
-
// Relies on the spaces being sorted.
byte* heap_begin = continuous_spaces_.front()->Begin();
byte* heap_end = continuous_spaces_.back()->Limit();
size_t heap_capacity = heap_end - heap_begin;
-
+ // Remove the main backup space since it slows down the GC to have unused extra spaces.
+ if (main_space_backup_.get() != nullptr) {
+ RemoveSpace(main_space_backup_.get());
+ }
// Allocate the card table.
card_table_.reset(accounting::CardTable::Create(heap_begin, heap_capacity));
CHECK(card_table_.get() != NULL) << "Failed to create card table";
-
// Card cache for now since it makes it easier for us to update the references to the copying
// spaces.
accounting::ModUnionTable* mod_union_table =
@@ -365,17 +352,14 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
GetImageSpace());
CHECK(mod_union_table != nullptr) << "Failed to create image mod-union table";
AddModUnionTable(mod_union_table);
-
if (collector::SemiSpace::kUseRememberedSet && non_moving_space_ != main_space_) {
accounting::RememberedSet* non_moving_space_rem_set =
new accounting::RememberedSet("Non-moving space remembered set", this, non_moving_space_);
CHECK(non_moving_space_rem_set != nullptr) << "Failed to create non-moving space remembered set";
AddRememberedSet(non_moving_space_rem_set);
}
-
- // TODO: Count objects in the image space here.
+ // TODO: Count objects in the image space here?
num_bytes_allocated_.StoreRelaxed(0);
-
mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize,
kDefaultMarkStackSize));
const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize;
@@ -383,7 +367,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
"allocation stack", max_allocation_stack_size_, alloc_stack_capacity));
live_stack_.reset(accounting::ObjectStack::Create(
"live stack", max_allocation_stack_size_, alloc_stack_capacity));
-
// It's still too early to take a lock because there are no threads yet, but we can create locks
// now. We don't create it earlier to make it clear that you can't use locks during heap
// initialization.
@@ -392,13 +375,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
*gc_complete_lock_));
heap_trim_request_lock_ = new Mutex("Heap trim request lock");
last_gc_size_ = GetBytesAllocated();
-
if (ignore_max_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
CHECK_NE(max_allowed_footprint_, 0U);
-
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
@@ -417,26 +398,38 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
mark_compact_collector_ = new collector::MarkCompact(this);
garbage_collectors_.push_back(mark_compact_collector_);
}
-
- if (GetImageSpace() != nullptr && main_space_ != nullptr) {
- // Check that there's no gap between the image space and the main space so that the immune
- // region won't break (eg. due to a large object allocated in the gap).
- bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
+ if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
+ // Check that there's no gap between the image space and the non moving space so that the
+ // immune region won't break (eg. due to a large object allocated in the gap).
+ bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
+ non_moving_space_->GetMemMap());
if (!no_gap) {
MemMap::DumpMaps(LOG(ERROR));
LOG(FATAL) << "There's a gap between the image space and the main space";
}
}
-
if (running_on_valgrind_) {
Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
}
-
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() exiting";
}
}
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, byte* request_begin, size_t capacity,
+ int prot_flags, std::string* out_error_str) {
+ while (true) {
+ MemMap* map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity,
+ PROT_READ | PROT_WRITE, true, out_error_str);
+ if (map != nullptr || request_begin == nullptr) {
+ return map;
+ }
+ // Retry a second time with no specified request begin.
+ request_begin = nullptr;
+ }
+ return nullptr;
+}
+
space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
size_t growth_limit, size_t capacity,
const char* name, bool can_move_objects) {
@@ -474,7 +467,8 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
// After the zygote we want this to be false if we don't have background compaction enabled so
// that getting primitive array elements is faster.
- can_move_objects = !have_zygote_space_;
+ // We never have homogeneous compaction with GSS and don't need a space with movable objects.
+ can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
}
if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
RemoveRememberedSet(main_space_);
@@ -675,18 +669,11 @@ void Heap::VisitObjects(ObjectCallback callback, void* arg) {
}
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
- space::ContinuousSpace* space1 = rosalloc_space_ != nullptr ? rosalloc_space_ : non_moving_space_;
- space::ContinuousSpace* space2 = dlmalloc_space_ != nullptr ? dlmalloc_space_ : non_moving_space_;
- // This is just logic to handle a case of either not having a rosalloc or dlmalloc space.
+ space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
+ space::ContinuousSpace* space2 = non_moving_space_;
// TODO: Generalize this to n bitmaps?
- if (space1 == nullptr) {
- DCHECK(space2 != nullptr);
- space1 = space2;
- }
- if (space2 == nullptr) {
- DCHECK(space1 != nullptr);
- space2 = space1;
- }
+ CHECK(space1 != nullptr);
+ CHECK(space2 != nullptr);
MarkAllocStack(space1->GetLiveBitmap(), space2->GetLiveBitmap(),
large_object_space_->GetLiveBitmap(), stack);
}
@@ -705,7 +692,7 @@ void Heap::AddSpace(space::Space* space) {
accounting::ContinuousSpaceBitmap* live_bitmap = continuous_space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = continuous_space->GetMarkBitmap();
if (live_bitmap != nullptr) {
- DCHECK(mark_bitmap != nullptr);
+ CHECK(mark_bitmap != nullptr);
live_bitmap_->AddContinuousSpaceBitmap(live_bitmap);
mark_bitmap_->AddContinuousSpaceBitmap(mark_bitmap);
}
@@ -716,7 +703,7 @@ void Heap::AddSpace(space::Space* space) {
return a->Begin() < b->Begin();
});
} else {
- DCHECK(space->IsDiscontinuousSpace());
+ CHECK(space->IsDiscontinuousSpace());
space::DiscontinuousSpace* discontinuous_space = space->AsDiscontinuousSpace();
live_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetLiveBitmap());
mark_bitmap_->AddLargeObjectBitmap(discontinuous_space->GetMarkBitmap());
@@ -771,15 +758,11 @@ void Heap::RemoveSpace(space::Space* space) {
}
void Heap::RegisterGCAllocation(size_t bytes) {
- if (this != nullptr) {
- gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
- }
+ gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
- if (this != nullptr) {
- gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
- }
+ gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
}
void Heap::DumpGcPerformanceInfo(std::ostream& os) {
@@ -789,28 +772,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
// Dump cumulative loggers for each GC type.
uint64_t total_paused_time = 0;
for (auto& collector : garbage_collectors_) {
- const CumulativeLogger& logger = collector->GetCumulativeTimings();
- const size_t iterations = logger.GetIterations();
- const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
- if (iterations != 0 && pause_histogram.SampleSize() != 0) {
- os << ConstDumpable<CumulativeLogger>(logger);
- const uint64_t total_ns = logger.GetTotalNs();
- const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
- double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
- const uint64_t freed_bytes = collector->GetTotalFreedBytes();
- const uint64_t freed_objects = collector->GetTotalFreedObjects();
- Histogram<uint64_t>::CumulativeData cumulative_data;
- pause_histogram.CreateHistogram(&cumulative_data);
- pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
- os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
- << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
- << collector->GetName() << " freed: " << freed_objects
- << " objects with total size " << PrettySize(freed_bytes) << "\n"
- << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
- << PrettySize(freed_bytes / seconds) << "/s\n";
- total_duration += total_ns;
- total_paused_time += total_pause_ns;
- }
+ total_duration += collector->GetCumulativeTimings().GetTotalNs();
+ total_paused_time += collector->GetTotalPausedTimeNs();
+ collector->DumpPerformanceInfo(os);
collector->ResetMeasurements();
}
uint64_t allocation_time =
@@ -903,12 +867,15 @@ void Heap::ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType
<< " free bytes";
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
- space::MallocSpace* space = nullptr;
+ space::AllocSpace* space = nullptr;
if (allocator_type == kAllocatorTypeNonMoving) {
space = non_moving_space_;
} else if (allocator_type == kAllocatorTypeRosAlloc ||
allocator_type == kAllocatorTypeDlMalloc) {
space = main_space_;
+ } else if (allocator_type == kAllocatorTypeBumpPointer ||
+ allocator_type == kAllocatorTypeTLAB) {
+ space = bump_pointer_space_;
}
if (space != nullptr) {
space->LogFragmentationAllocFailure(oss, byte_count);
@@ -1125,7 +1092,13 @@ bool Heap::IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack,
return false;
}
-void Heap::DumpSpaces(std::ostream& stream) {
+std::string Heap::DumpSpaces() const {
+ std::ostringstream oss;
+ DumpSpaces(oss);
+ return oss.str();
+}
+
+void Heap::DumpSpaces(std::ostream& stream) const {
for (const auto& space : continuous_spaces_) {
accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
@@ -1143,9 +1116,10 @@ void Heap::DumpSpaces(std::ostream& stream) {
}
void Heap::VerifyObjectBody(mirror::Object* obj) {
- if (this == nullptr && verify_object_mode_ == kVerifyObjectModeDisabled) {
+ if (verify_object_mode_ == kVerifyObjectModeDisabled) {
return;
}
+
// Ignore early dawn of the universe verifications.
if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
@@ -1158,10 +1132,7 @@ void Heap::VerifyObjectBody(mirror::Object* obj) {
if (verify_object_mode_ > kVerifyObjectModeFast) {
// Note: the bitmap tests below are racy since we don't hold the heap bitmap lock.
- if (!IsLiveObjectLocked(obj)) {
- DumpSpaces();
- LOG(FATAL) << "Object is dead: " << obj;
- }
+ CHECK(IsLiveObjectLocked(obj)) << "Object is dead " << obj << "\n" << DumpSpaces();
}
}
@@ -1512,15 +1483,18 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
tl->SuspendAll();
uint64_t start_time = NanoTime();
// Launch compaction.
- space::MallocSpace* to_space = main_space_backup_;
+ space::MallocSpace* to_space = main_space_backup_.release();
space::MallocSpace* from_space = main_space_;
to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const uint64_t space_size_before_compaction = from_space->Size();
+ AddSpace(to_space);
Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
// Leave as prot read so that we can still run ROSAlloc verification on this space.
from_space->GetMemMap()->Protect(PROT_READ);
const uint64_t space_size_after_compaction = to_space->Size();
- std::swap(main_space_, main_space_backup_);
+ main_space_ = to_space;
+ main_space_backup_.reset(from_space);
+ RemoveSpace(from_space);
SetSpaceAsDefault(main_space_); // Set as default to reset the proper dlmalloc space.
// Update performed homogeneous space compaction count.
count_performed_homogeneous_space_compaction_++;
@@ -1587,17 +1561,38 @@ void Heap::TransitionCollector(CollectorType collector_type) {
}
tl->SuspendAll();
switch (collector_type) {
- case kCollectorTypeSS:
- // Fall-through.
- case kCollectorTypeGSS: {
+ case kCollectorTypeSS: {
if (!IsMovingGc(collector_type_)) {
+ // Create the bump pointer space from the backup space.
+ CHECK(main_space_backup_ != nullptr);
+ std::unique_ptr<MemMap> mem_map(main_space_backup_->ReleaseMemMap());
// We are transitioning from non moving GC -> moving GC, since we copied from the bump
// pointer space last transition it will be protected.
- bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+ CHECK(mem_map != nullptr);
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
+ mem_map.release());
+ AddSpace(bump_pointer_space_);
Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
+ // Use the now empty main space mem map for the bump pointer temp space.
+ mem_map.reset(main_space_->ReleaseMemMap());
+ // Unset the pointers just in case.
+ if (dlmalloc_space_ == main_space_) {
+ dlmalloc_space_ = nullptr;
+ } else if (rosalloc_space_ == main_space_) {
+ rosalloc_space_ = nullptr;
+ }
// Remove the main space so that we don't try to trim it, this doens't work for debug
// builds since RosAlloc attempts to read the magic number from a protected page.
RemoveSpace(main_space_);
+ RemoveRememberedSet(main_space_);
+ delete main_space_; // Delete the space since it has been removed.
+ main_space_ = nullptr;
+ RemoveRememberedSet(main_space_backup_.get());
+ main_space_backup_.reset(nullptr); // Deletes the space.
+ temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+ mem_map.release());
+ AddSpace(temp_space_);
}
break;
}
@@ -1605,10 +1600,32 @@ void Heap::TransitionCollector(CollectorType collector_type) {
// Fall through.
case kCollectorTypeCMS: {
if (IsMovingGc(collector_type_)) {
+ CHECK(temp_space_ != nullptr);
+ std::unique_ptr<MemMap> mem_map(temp_space_->ReleaseMemMap());
+ RemoveSpace(temp_space_);
+ temp_space_ = nullptr;
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ CreateMainMallocSpace(mem_map.get(), kDefaultInitialSize, mem_map->Size(),
+ mem_map->Size());
+ mem_map.release();
// Compact to the main space from the bump pointer space, don't need to swap semispaces.
AddSpace(main_space_);
- main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
+ mem_map.reset(bump_pointer_space_->ReleaseMemMap());
+ RemoveSpace(bump_pointer_space_);
+ bump_pointer_space_ = nullptr;
+ const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+ // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
+ if (kIsDebugBuild && kUseRosAlloc) {
+ mem_map->Protect(PROT_READ | PROT_WRITE);
+ }
+ main_space_backup_.reset(CreateMallocSpaceFromMemMap(mem_map.get(), kDefaultInitialSize,
+ mem_map->Size(), mem_map->Size(),
+ name, true));
+ if (kIsDebugBuild && kUseRosAlloc) {
+ mem_map->Protect(PROT_NONE);
+ }
+ mem_map.release();
}
break;
}
@@ -1811,6 +1828,7 @@ void Heap::PreZygoteFork() {
// there.
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
// Change the collector to the post zygote one.
+ bool same_space = non_moving_space_ == main_space_;
if (kCompactZygote) {
DCHECK(semi_space_collector_ != nullptr);
// Temporarily disable rosalloc verification because the zygote
@@ -1877,6 +1895,11 @@ void Heap::PreZygoteFork() {
space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
low_memory_mode_,
&non_moving_space_);
+ CHECK(!non_moving_space_->CanMoveObjects());
+ if (same_space) {
+ main_space_ = non_moving_space_;
+ SetSpaceAsDefault(main_space_);
+ }
delete old_alloc_space;
CHECK(zygote_space != nullptr) << "Failed creating zygote space";
AddSpace(zygote_space);
@@ -2178,7 +2201,7 @@ class VerifyReferenceVisitor {
LOG(ERROR) << "Object " << obj << " class(" << obj->GetClass() << ") not a heap address";
}
- // Attmept to find the class inside of the recently freed objects.
+ // Attempt to find the class inside of the recently freed objects.
space::ContinuousSpace* ref_space = heap_->FindContinuousSpaceFromObject(ref, true);
if (ref_space != nullptr && ref_space->IsMallocSpace()) {
space::MallocSpace* space = ref_space->AsMallocSpace();
@@ -2353,7 +2376,7 @@ size_t Heap::VerifyHeapReferences(bool verify_referents) {
accounting::RememberedSet* remembered_set = table_pair.second;
remembered_set->Dump(LOG(ERROR) << remembered_set->GetName() << ": ");
}
- DumpSpaces();
+ DumpSpaces(LOG(ERROR));
}
return visitor.GetFailureCount();
}
@@ -2470,12 +2493,7 @@ bool Heap::VerifyMissingCardMarks() {
visitor(*it);
}
}
-
- if (visitor.Failed()) {
- DumpSpaces();
- return false;
- }
- return true;
+ return !visitor.Failed();
}
void Heap::SwapStacks(Thread* self) {
@@ -2496,6 +2514,17 @@ void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
}
}
+void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ if (rosalloc_space_ != nullptr) {
+ rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ if (bump_pointer_space_ != nullptr) {
+ bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
+ }
+ }
+}
+
void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
if (kIsDebugBuild) {
if (bump_pointer_space_ != nullptr) {
@@ -2573,9 +2602,8 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
SwapStacks(self);
// Sort the live stack so that we can quickly binary search it later.
- if (!VerifyMissingCardMarks()) {
- LOG(FATAL) << "Pre " << gc->GetName() << " missing card mark verification failed";
- }
+ CHECK(VerifyMissingCardMarks()) << "Pre " << gc->GetName()
+ << " missing card mark verification failed\n" << DumpSpaces();
SwapStacks(self);
}
if (verify_mod_union_table_) {
@@ -3089,6 +3117,7 @@ void Heap::RemoveRememberedSet(space::Space* space) {
CHECK(space != nullptr);
auto it = remembered_sets_.find(space);
CHECK(it != remembered_sets_.end());
+ delete it->second;
remembered_sets_.erase(it);
CHECK(remembered_sets_.find(space) == remembered_sets_.end());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b20795369e..1851662669 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -444,8 +444,7 @@ class Heap {
bool fail_ok) const;
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
- void DumpForSigQuit(std::ostream& os);
-
+ void DumpForSigQuit(std::ostream& os) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Do a pending heap transition or trim.
void DoPendingTransitionOrTrim() LOCKS_EXCLUDED(heap_trim_request_lock_);
@@ -456,6 +455,7 @@ class Heap {
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeRosAllocThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
void RosAllocVerification(TimingLogger* timings, const char* name)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -539,7 +539,8 @@ class Heap {
}
}
- void DumpSpaces(std::ostream& stream = LOG(INFO));
+ std::string DumpSpaces() const WARN_UNUSED;
+ void DumpSpaces(std::ostream& stream) const;
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -572,6 +573,7 @@ class Heap {
accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
void AddRememberedSet(accounting::RememberedSet* remembered_set);
+ // Also deletes the remebered set.
void RemoveRememberedSet(space::Space* space);
bool IsCompilingBoot() const;
@@ -593,8 +595,13 @@ class Heap {
void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
+ // Create a mem map with a preferred base address.
+ static MemMap* MapAnonymousPreferredAddress(const char* name, byte* request_begin,
+ size_t capacity, int prot_flags,
+ std::string* out_error_str);
+
bool SupportHSpaceCompaction() const {
- // Returns true if we can do hspace compaction.
+ // Returns true if we can do hspace compaction
return main_space_backup_ != nullptr;
}
@@ -656,7 +663,7 @@ class Heap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kGrow>
- bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+ ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
// Returns true if the address passed in is within the address range of a continuous space.
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
@@ -1006,7 +1013,8 @@ class Heap {
const bool use_tlab_;
// Pointer to the space which becomes the new main space when we do homogeneous space compaction.
- space::MallocSpace* main_space_backup_;
+ // Use unique_ptr since the space is only added during the homogeneous compaction phase.
+ std::unique_ptr<space::MallocSpace> main_space_backup_;
// Minimal interval allowed between two homogeneous space compactions caused by OOM.
uint64_t min_interval_homogeneous_space_compaction_by_oom_;
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 8b3569232a..fb6bbac6d0 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -258,6 +258,14 @@ bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
return true;
}
+void BumpPointerSpace::LogFragmentationAllocFailure(std::ostream& os,
+ size_t /* failed_alloc_bytes */) {
+ size_t max_contiguous_allocation = Limit() - End();
+ os << "; failed due to fragmentation (largest possible contiguous allocation "
+ << max_contiguous_allocation << " bytes)";
+ // Caller's job to print failed_alloc_bytes.
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index feee34f8bb..71b15baff1 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -151,6 +151,9 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes);
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index a87aa890c8..1d10af2e63 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -16,6 +16,8 @@
#include "image_space.h"
+#include <random>
+
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "base/scoped_flock.h"
@@ -43,6 +45,26 @@ ImageSpace::ImageSpace(const std::string& image_filename, const char* image_loca
live_bitmap_.reset(live_bitmap);
}
+static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
+ CHECK_ALIGNED(min_delta, kPageSize);
+ CHECK_ALIGNED(max_delta, kPageSize);
+ CHECK_LT(min_delta, max_delta);
+
+ std::default_random_engine generator;
+ generator.seed(NanoTime() * getpid());
+ std::uniform_int_distribution<int32_t> distribution(min_delta, max_delta);
+ int32_t r = distribution(generator);
+ if (r % 2 == 0) {
+ r = RoundUp(r, kPageSize);
+ } else {
+ r = RoundDown(r, kPageSize);
+ }
+ CHECK_LE(min_delta, r);
+ CHECK_GE(max_delta, r);
+ CHECK_ALIGNED(r, kPageSize);
+ return r;
+}
+
static bool GenerateImage(const std::string& image_filename, std::string* error_msg) {
const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
std::vector<std::string> boot_class_path;
@@ -73,12 +95,13 @@ static bool GenerateImage(const std::string& image_filename, std::string* error_
Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
- arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS));
+ int32_t base_offset = ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA,
+ ART_BASE_ADDRESS_MAX_DELTA);
+ LOG(INFO) << "Using an offset of 0x" << std::hex << base_offset << " from default "
+ << "art base address of 0x" << std::hex << ART_BASE_ADDRESS;
+ arg_vector.push_back(StringPrintf("--base=0x%x", ART_BASE_ADDRESS + base_offset));
- if (kIsTargetBuild) {
- arg_vector.push_back("--image-classes-zip=/system/framework/framework.jar");
- arg_vector.push_back("--image-classes=preloaded-classes");
- } else {
+ if (!kIsTargetBuild) {
arg_vector.push_back("--host");
}
@@ -94,86 +117,284 @@ static bool GenerateImage(const std::string& image_filename, std::string* error_
bool ImageSpace::FindImageFilename(const char* image_location,
const InstructionSet image_isa,
- std::string* image_filename,
- bool *is_system) {
+ std::string* system_filename,
+ bool* has_system,
+ std::string* cache_filename,
+ bool* dalvik_cache_exists,
+ bool* has_cache) {
+ *has_system = false;
+ *has_cache = false;
// image_location = /system/framework/boot.art
// system_image_location = /system/framework/<image_isa>/boot.art
std::string system_image_filename(GetSystemImageFilename(image_location, image_isa));
if (OS::FileExists(system_image_filename.c_str())) {
- *image_filename = system_image_filename;
- *is_system = true;
- return true;
+ *system_filename = system_image_filename;
+ *has_system = true;
}
- const std::string dalvik_cache = GetDalvikCacheOrDie(GetInstructionSetString(image_isa));
+ bool have_android_data = false;
+ *dalvik_cache_exists = false;
+ std::string dalvik_cache;
+ GetDalvikCache(GetInstructionSetString(image_isa), true, &dalvik_cache,
+ &have_android_data, dalvik_cache_exists);
+
+ if (have_android_data && *dalvik_cache_exists) {
+ // Always set output location even if it does not exist,
+ // so that the caller knows where to create the image.
+ //
+ // image_location = /system/framework/boot.art
+ // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
+ std::string error_msg;
+ if (!GetDalvikCacheFilename(image_location, dalvik_cache.c_str(), cache_filename, &error_msg)) {
+ LOG(WARNING) << error_msg;
+ return *has_system;
+ }
+ *has_cache = OS::FileExists(cache_filename->c_str());
+ }
+ return *has_system || *has_cache;
+}
- // Always set output location even if it does not exist,
- // so that the caller knows where to create the image.
- //
- // image_location = /system/framework/boot.art
- // *image_filename = /data/dalvik-cache/<image_isa>/boot.art
- *image_filename = GetDalvikCacheFilenameOrDie(image_location, dalvik_cache.c_str());
- *is_system = false;
- return OS::FileExists(image_filename->c_str());
+static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
+ std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
+ if (image_file.get() == nullptr) {
+ return false;
+ }
+ const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
+ if (!success || !image_header->IsValid()) {
+ return false;
+ }
+ return true;
+}
+
+// Relocate the image at image_location to dest_filename and relocate it by a random amount.
+static bool RelocateImage(const char* image_location, const char* dest_filename,
+ InstructionSet isa, std::string* error_msg) {
+ std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
+
+ std::string input_image_location_arg("--input-image-location=");
+ input_image_location_arg += image_location;
+
+ std::string output_image_filename_arg("--output-image-file=");
+ output_image_filename_arg += dest_filename;
+
+ std::string input_oat_location_arg("--input-oat-location=");
+ input_oat_location_arg += ImageHeader::GetOatLocationFromImageLocation(image_location);
+
+ std::string output_oat_filename_arg("--output-oat-file=");
+ output_oat_filename_arg += ImageHeader::GetOatLocationFromImageLocation(dest_filename);
+
+ std::string instruction_set_arg("--instruction-set=");
+ instruction_set_arg += GetInstructionSetString(isa);
+
+ std::string base_offset_arg("--base-offset-delta=");
+ StringAppendF(&base_offset_arg, "%d", ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA,
+ ART_BASE_ADDRESS_MAX_DELTA));
+
+ std::vector<std::string> argv;
+ argv.push_back(patchoat);
+
+ argv.push_back(input_image_location_arg);
+ argv.push_back(output_image_filename_arg);
+
+ argv.push_back(input_oat_location_arg);
+ argv.push_back(output_oat_filename_arg);
+
+ argv.push_back(instruction_set_arg);
+ argv.push_back(base_offset_arg);
+
+ std::string command_line(Join(argv, ' '));
+ LOG(INFO) << "RelocateImage: " << command_line;
+ return Exec(argv, error_msg);
+}
+
+static ImageHeader* ReadSpecificImageHeaderOrDie(const char* filename) {
+ std::unique_ptr<ImageHeader> hdr(new ImageHeader);
+ if (!ReadSpecificImageHeader(filename, hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << filename;
+ return nullptr;
+ }
+ return hdr.release();
}
ImageHeader* ImageSpace::ReadImageHeaderOrDie(const char* image_location,
const InstructionSet image_isa) {
- std::string image_filename;
- bool is_system = false;
- if (FindImageFilename(image_location, image_isa, &image_filename, &is_system)) {
- std::unique_ptr<File> image_file(OS::OpenFileForReading(image_filename.c_str()));
- std::unique_ptr<ImageHeader> image_header(new ImageHeader);
- const bool success = image_file->ReadFully(image_header.get(), sizeof(ImageHeader));
- if (!success || !image_header->IsValid()) {
- LOG(FATAL) << "Invalid Image header for: " << image_filename;
- return nullptr;
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ if (FindImageFilename(image_location, image_isa, &system_filename, &has_system,
+ &cache_filename, &dalvik_cache_exists, &has_cache)) {
+ if (Runtime::Current()->ShouldRelocate()) {
+ if (has_system && has_cache) {
+ std::unique_ptr<ImageHeader> sys_hdr(new ImageHeader);
+ std::unique_ptr<ImageHeader> cache_hdr(new ImageHeader);
+ if (!ReadSpecificImageHeader(system_filename.c_str(), sys_hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << image_location << " at "
+ << system_filename;
+ return nullptr;
+ }
+ if (!ReadSpecificImageHeader(cache_filename.c_str(), cache_hdr.get())) {
+ LOG(FATAL) << "Unable to read image header for " << image_location << " at "
+ << cache_filename;
+ return nullptr;
+ }
+ if (sys_hdr->GetOatChecksum() != cache_hdr->GetOatChecksum()) {
+ LOG(FATAL) << "Unable to find a relocated version of image file " << image_location;
+ return nullptr;
+ }
+ return cache_hdr.release();
+ } else if (!has_cache) {
+ LOG(FATAL) << "Unable to find a relocated version of image file " << image_location;
+ return nullptr;
+ } else if (!has_system && has_cache) {
+ // This can probably just use the cache one.
+ return ReadSpecificImageHeaderOrDie(cache_filename.c_str());
+ }
+ } else {
+ // We don't want to relocate, Just pick the appropriate one if we have it and return.
+ if (has_system && has_cache) {
+ // We want the cache if the checksum matches, otherwise the system.
+ std::unique_ptr<ImageHeader> system(ReadSpecificImageHeaderOrDie(system_filename.c_str()));
+ std::unique_ptr<ImageHeader> cache(ReadSpecificImageHeaderOrDie(cache_filename.c_str()));
+ if (system.get() == nullptr ||
+ (cache.get() != nullptr && cache->GetOatChecksum() == system->GetOatChecksum())) {
+ return cache.release();
+ } else {
+ return system.release();
+ }
+ } else if (has_system) {
+ return ReadSpecificImageHeaderOrDie(system_filename.c_str());
+ } else if (has_cache) {
+ return ReadSpecificImageHeaderOrDie(cache_filename.c_str());
+ }
}
-
- return image_header.release();
}
LOG(FATAL) << "Unable to find image file for: " << image_location;
return nullptr;
}
+static bool ChecksumsMatch(const char* image_a, const char* image_b) {
+ ImageHeader hdr_a;
+ ImageHeader hdr_b;
+ return ReadSpecificImageHeader(image_a, &hdr_a) && ReadSpecificImageHeader(image_b, &hdr_b)
+ && hdr_a.GetOatChecksum() == hdr_b.GetOatChecksum();
+}
+
ImageSpace* ImageSpace::Create(const char* image_location,
const InstructionSet image_isa) {
- std::string image_filename;
std::string error_msg;
- bool is_system = false;
- const bool found_image = FindImageFilename(image_location, image_isa, &image_filename,
- &is_system);
-
- // Note that we must not use the file descriptor associated with
- // ScopedFlock::GetFile to Init the image file. We want the file
- // descriptor (and the associated exclusive lock) to be released when
- // we leave Create.
- ScopedFlock image_lock;
- image_lock.Init(image_filename.c_str(), &error_msg);
-
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ const bool found_image = FindImageFilename(image_location, image_isa, &system_filename,
+ &has_system, &cache_filename, &dalvik_cache_exists,
+ &has_cache);
+
+ ImageSpace* space;
+ bool relocate = Runtime::Current()->ShouldRelocate();
if (found_image) {
- ImageSpace* space = ImageSpace::Init(image_filename.c_str(), image_location, !is_system,
- &error_msg);
+ const std::string* image_filename;
+ bool is_system = false;
+ bool relocated_version_used = false;
+ if (relocate) {
+ CHECK(dalvik_cache_exists) << "Requiring relocation for image " << image_location << " "
+ << "at " << system_filename << " but we do not have any "
+ << "dalvik_cache to find/place it in.";
+ if (has_system) {
+ if (has_cache && ChecksumsMatch(system_filename.c_str(), cache_filename.c_str())) {
+ // We already have a relocated version
+ image_filename = &cache_filename;
+ relocated_version_used = true;
+ } else {
+ // We cannot have a relocated version, Relocate the system one and use it.
+ if (RelocateImage(image_location, cache_filename.c_str(), image_isa,
+ &error_msg)) {
+ relocated_version_used = true;
+ image_filename = &cache_filename;
+ } else {
+ LOG(FATAL) << "Unable to relocate image " << image_location << " "
+ << "from " << system_filename << " to " << cache_filename << ": "
+ << error_msg;
+ return nullptr;
+ }
+ }
+ } else {
+ CHECK(has_cache);
+ // We can just use cache's since it should be fine. This might or might not be relocated.
+ image_filename = &cache_filename;
+ }
+ } else {
+ if (has_system && has_cache) {
+ // Check they have the same cksum. If they do use the cache. Otherwise system.
+ if (ChecksumsMatch(system_filename.c_str(), cache_filename.c_str())) {
+ image_filename = &cache_filename;
+ relocated_version_used = true;
+ } else {
+ image_filename = &system_filename;
+ is_system = true;
+ }
+ } else if (has_system) {
+ image_filename = &system_filename;
+ is_system = true;
+ } else {
+ CHECK(has_cache);
+ image_filename = &cache_filename;
+ }
+ }
+ {
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image_lock;
+ image_lock.Init(image_filename->c_str(), &error_msg);
+ LOG(INFO) << "Using image file " << image_filename->c_str() << " for image location "
+ << image_location;
+ // If we are in /system we can assume the image is good. We can also
+ // assume this if we are using a relocated image (i.e. image checksum
+ // matches) since this is only different by the offset. We need this to
+ // make sure that host tests continue to work.
+ space = ImageSpace::Init(image_filename->c_str(), image_location,
+ !(is_system || relocated_version_used), &error_msg);
+ }
if (space != nullptr) {
return space;
}
- // If the /system file exists, it should be up-to-date, don't try to generate it.
- // If it's not the /system file, log a warning and fall through to GenerateImage.
- if (is_system) {
- LOG(FATAL) << "Failed to load image '" << image_filename << "': " << error_msg;
+ // If the /system file exists, it should be up-to-date, don't try to generate it. Same if it is
+ // a relocated copy from something in /system (i.e. checksum's match).
+ // Otherwise, log a warning and fall through to GenerateImage.
+ if (relocated_version_used) {
+ LOG(FATAL) << "Attempted to use relocated version of " << image_location << " "
+ << "at " << cache_filename << " generated from " << system_filename << " "
+ << "but image failed to load: " << error_msg;
+ return nullptr;
+ } else if (is_system) {
+ LOG(FATAL) << "Failed to load /system image '" << *image_filename << "': " << error_msg;
return nullptr;
} else {
LOG(WARNING) << error_msg;
}
}
- CHECK(GenerateImage(image_filename, &error_msg))
- << "Failed to generate image '" << image_filename << "': " << error_msg;
- ImageSpace* space = ImageSpace::Init(image_filename.c_str(), image_location, true, &error_msg);
+ CHECK(dalvik_cache_exists) << "No place to put generated image.";
+ CHECK(GenerateImage(cache_filename, &error_msg))
+ << "Failed to generate image '" << cache_filename << "': " << error_msg;
+ {
+ // Note that we must not use the file descriptor associated with
+ // ScopedFlock::GetFile to Init the image file. We want the file
+ // descriptor (and the associated exclusive lock) to be released when
+ // we leave Create.
+ ScopedFlock image_lock;
+ image_lock.Init(cache_filename.c_str(), &error_msg);
+ space = ImageSpace::Init(cache_filename.c_str(), image_location, true, &error_msg);
+ }
if (space == nullptr) {
- LOG(FATAL) << "Failed to load image '" << image_filename << "': " << error_msg;
+ LOG(FATAL) << "Failed to load generated image '" << cache_filename << "': " << error_msg;
}
return space;
}
@@ -316,6 +537,15 @@ OatFile* ImageSpace::OpenOatFile(const char* image_path, std::string* error_msg)
" in image %s", oat_checksum, image_oat_checksum, GetName());
return nullptr;
}
+ int32_t image_patch_delta = image_header.GetPatchDelta();
+ int32_t oat_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
+ if (oat_patch_delta != image_patch_delta) {
+ // We should have already relocated by this point. Bail out.
+ *error_msg = StringPrintf("Failed to match oat file patch delta %d to expected patch delta %d "
+ "in image %s", oat_patch_delta, image_patch_delta, GetName());
+ return nullptr;
+ }
+
return oat_file;
}
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index dd9b58084d..6be3b8f3df 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -98,6 +98,20 @@ class ImageSpace : public MemMapSpace {
return false;
}
+ // Returns the filename of the image corresponding to
+ // requested image_location, or the filename where a new image
+ // should be written if one doesn't exist. Looks for a generated
+ // image in the specified location and then in the dalvik-cache.
+ //
+ // Returns true if an image was found, false otherwise.
+ static bool FindImageFilename(const char* image_location,
+ InstructionSet image_isa,
+ std::string* system_location,
+ bool* has_system,
+ std::string* data_location,
+ bool* dalvik_cache_exists,
+ bool* has_data);
+
private:
// Tries to initialize an ImageSpace from the given image path,
// returning NULL on error.
@@ -110,17 +124,6 @@ class ImageSpace : public MemMapSpace {
bool validate_oat_file, std::string* error_msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Returns the filename of the image corresponding to
- // requested image_location, or the filename where a new image
- // should be written if one doesn't exist. Looks for a generated
- // image in the specified location and then in the dalvik-cache.
- //
- // Returns true if an image was found, false otherwise.
- static bool FindImageFilename(const char* image_location,
- InstructionSet image_isa,
- std::string* location,
- bool* is_system);
-
OatFile* OpenOatFile(const char* image, std::string* error_msg) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index abae8ff346..d5a03c66ec 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -431,6 +431,11 @@ collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
return scc.freed;
}
+void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+ size_t /*failed_alloc_bytes*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 01982d06ab..b1c20ca9e6 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -89,6 +89,9 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
return end_;
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
explicit LargeObjectSpace(const std::string& name, byte* begin, byte* end);
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 27f92b571e..ba7e5c1eca 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -51,12 +51,12 @@ MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map,
live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
+ CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
<< bitmap_index;
mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
Begin(), NonGrowthLimitCapacity()));
- DCHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
+ CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
<< bitmap_index;
}
for (auto& freed : recent_freed_objects_) {
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 6f49fbf203..a52b92b084 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -133,8 +133,6 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
return can_move_objects_;
}
- virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
-
protected:
MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 92c6f534cb..3f39c7707b 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -338,6 +338,12 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
rosalloc_->RevokeAllThreadLocalRuns();
}
+void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+ if (kIsDebugBuild) {
+ rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
+ }
+}
+
void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
if (kIsDebugBuild) {
rosalloc_->AssertAllThreadLocalRunsAreRevoked();
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index f50530576b..f1ce115bde 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -101,6 +101,7 @@ class RosAllocSpace : public MallocSpace {
void RevokeThreadLocalBuffers(Thread* thread);
void RevokeAllThreadLocalBuffers();
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
// Returns the class of a recently freed object.
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index fff4df1e0e..523d4fe8fd 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -223,6 +223,8 @@ class AllocSpace {
// threads, if the alloc space implementation uses any.
virtual void RevokeAllThreadLocalBuffers() = 0;
+ virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
+
protected:
struct SweepCallbackContext {
SweepCallbackContext(bool swap_bitmaps, space::Space* space);
@@ -407,11 +409,11 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
// Clear the space back to an empty space.
virtual void Clear() = 0;
- accounting::ContinuousSpaceBitmap* GetLiveBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetLiveBitmap() const OVERRIDE {
return live_bitmap_.get();
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() const {
+ accounting::ContinuousSpaceBitmap* GetMarkBitmap() const OVERRIDE {
return mark_bitmap_.get();
}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index fb3a12efb5..51d84f5acb 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -77,25 +77,30 @@ void ZygoteSpace::Dump(std::ostream& os) const {
mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return nullptr;
}
size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
- LOG(FATAL) << "Unimplemented";
+ UNIMPLEMENTED(FATAL);
return 0;
}
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
+ size_t /*failed_alloc_bytes*/) {
+ UNIMPLEMENTED(FATAL);
+}
+
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
DCHECK(context->space->IsZygoteSpace());
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 5d5fe76b74..0cf4bb139c 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -74,6 +74,9 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
return false;
}
+ void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
protected:
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
return &SweepCallback;
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
new file mode 100644
index 0000000000..482f7bca0e
--- /dev/null
+++ b/runtime/gc_root-inl.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ROOT_INL_H_
+#define ART_RUNTIME_GC_ROOT_INL_H_
+
+#include "gc_root.h"
+
+#include "read_barrier-inl.h"
+
+namespace art {
+
+template<class MirrorType>
+template<ReadBarrierOption kReadBarrierOption>
+inline MirrorType* GcRoot<MirrorType>::Read() {
+ return ReadBarrier::BarrierForRoot<MirrorType, kReadBarrierOption>(&root_);
+}
+
+} // namespace art
+#endif // ART_RUNTIME_GC_ROOT_INL_H_
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
new file mode 100644
index 0000000000..86a8847431
--- /dev/null
+++ b/runtime/gc_root.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ROOT_H_
+#define ART_RUNTIME_GC_ROOT_H_
+
+#include "base/mutex.h" // For Locks::mutator_lock_.
+#include "object_callbacks.h"
+
+namespace art {
+
+template<class MirrorType>
+class GcRoot {
+ public:
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) {
+ callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type);
+ }
+
+ // This is only used by IrtIterator.
+ ALWAYS_INLINE MirrorType** AddressWithoutBarrier() {
+ return &root_;
+ }
+
+ bool IsNull() const {
+ // It's safe to null-check it without a read barrier.
+ return root_ == nullptr;
+ }
+
+ ALWAYS_INLINE explicit GcRoot<MirrorType>() : root_(nullptr) {
+ }
+
+ ALWAYS_INLINE explicit GcRoot<MirrorType>(MirrorType* ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : root_(ref) {
+ }
+
+ private:
+ MirrorType* root_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ROOT_H_
diff --git a/runtime/globals.h b/runtime/globals.h
index 1d9f22c35e..107e0646a8 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -118,6 +118,8 @@ static constexpr TraceClockSource kDefaultTraceClockSource = kTraceClockSourceDu
static constexpr TraceClockSource kDefaultTraceClockSource = kTraceClockSourceWall;
#endif
+static constexpr bool kDefaultMustRelocate = true;
+
} // namespace art
#endif // ART_RUNTIME_GLOBALS_H_
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 7e3b6bab26..fd67197986 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -151,7 +151,8 @@ enum HprofHeapTag {
enum HprofHeapId {
HPROF_HEAP_DEFAULT = 0,
HPROF_HEAP_ZYGOTE = 'Z',
- HPROF_HEAP_APP = 'A'
+ HPROF_HEAP_APP = 'A',
+ HPROF_HEAP_IMAGE = 'I',
};
enum HprofBasicType {
@@ -633,8 +634,12 @@ class Hprof {
// U1: NUL-terminated magic string.
fwrite(magic, 1, sizeof(magic), header_fp_);
- // U4: size of identifiers. We're using addresses as IDs, so make sure a pointer fits.
- U4_TO_BUF_BE(buf, 0, sizeof(void*));
+ // U4: size of identifiers. We're using addresses as IDs and our heap references are stored
+ // as uint32_t.
+ // Note of warning: hprof-conv hard-codes the size of identifiers to 4.
+ COMPILE_ASSERT(sizeof(mirror::HeapReference<mirror::Object>) == sizeof(uint32_t),
+ UnexpectedHeapReferenceSize);
+ U4_TO_BUF_BE(buf, 0, sizeof(uint32_t));
fwrite(buf, 1, sizeof(uint32_t), header_fp_);
// The current time, in milliseconds since 0:00 GMT, 1/1/70.
@@ -842,26 +847,37 @@ static int StackTraceSerialNumber(const mirror::Object* /*obj*/) {
int Hprof::DumpHeapObject(mirror::Object* obj) {
HprofRecord* rec = &current_record_;
- HprofHeapId desiredHeap = false ? HPROF_HEAP_ZYGOTE : HPROF_HEAP_APP; // TODO: zygote objects?
-
+ gc::space::ContinuousSpace* space =
+ Runtime::Current()->GetHeap()->FindContinuousSpaceFromObject(obj, true);
+ HprofHeapId heap_type = HPROF_HEAP_APP;
+ if (space != nullptr) {
+ if (space->IsZygoteSpace()) {
+ heap_type = HPROF_HEAP_ZYGOTE;
+ } else if (space->IsImageSpace()) {
+ heap_type = HPROF_HEAP_IMAGE;
+ }
+ }
if (objects_in_segment_ >= OBJECTS_PER_SEGMENT || rec->Size() >= BYTES_PER_SEGMENT) {
StartNewHeapDumpSegment();
}
- if (desiredHeap != current_heap_) {
+ if (heap_type != current_heap_) {
HprofStringId nameId;
// This object is in a different heap than the current one.
// Emit a HEAP_DUMP_INFO tag to change heaps.
rec->AddU1(HPROF_HEAP_DUMP_INFO);
- rec->AddU4((uint32_t)desiredHeap); // uint32_t: heap id
- switch (desiredHeap) {
+ rec->AddU4(static_cast<uint32_t>(heap_type)); // uint32_t: heap type
+ switch (heap_type) {
case HPROF_HEAP_APP:
nameId = LookupStringId("app");
break;
case HPROF_HEAP_ZYGOTE:
nameId = LookupStringId("zygote");
break;
+ case HPROF_HEAP_IMAGE:
+ nameId = LookupStringId("image");
+ break;
default:
// Internal error
LOG(ERROR) << "Unexpected desiredHeap";
@@ -869,7 +885,7 @@ int Hprof::DumpHeapObject(mirror::Object* obj) {
break;
}
rec->AddStringId(nameId);
- current_heap_ = desiredHeap;
+ current_heap_ = heap_type;
}
mirror::Class* c = obj->GetClass();
diff --git a/runtime/implicit_check_options.h b/runtime/implicit_check_options.h
deleted file mode 100644
index a6595b88e0..0000000000
--- a/runtime/implicit_check_options.h
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-#define ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
-
-#include "gc/heap.h"
-#include "gc/space/image_space.h"
-#include "instruction_set.h"
-#include "runtime.h"
-
-#include <string>
-
-namespace art {
-
-class ImplicitCheckOptions {
- public:
- static constexpr const char* kImplicitChecksOatHeaderKey = "implicit-checks";
-
- static std::string Serialize(bool explicit_null_checks, bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks) {
- char tmp[4];
- tmp[0] = explicit_null_checks ? 'N' : 'n';
- tmp[1] = explicit_stack_overflow_checks ? 'O' : 'o';
- tmp[2] = explicit_suspend_checks ? 'S' : 's';
- tmp[3] = 0;
- return std::string(tmp);
- }
-
- static bool Parse(const char* str, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- if (str != nullptr && str[0] != 0 && str[1] != 0 && str[2] != 0 &&
- (str[0] == 'n' || str[0] == 'N') &&
- (str[1] == 'o' || str[1] == 'O') &&
- (str[2] == 's' || str[2] == 'S')) {
- *explicit_null_checks = str[0] == 'N';
- *explicit_stack_overflow_checks = str[1] == 'O';
- *explicit_suspend_checks = str[2] == 'S';
- return true;
- } else {
- return false;
- }
- }
-
- // Check whether the given flags are correct with respect to the current runtime and the given
- // executable flag.
- static bool CheckRuntimeSupport(bool executable, bool explicit_null_checks,
- bool explicit_stack_overflow_checks,
- bool explicit_suspend_checks, std::string* error_msg) {
- if (!executable) {
- // Not meant to be run, i.e., either we are compiling or dumping. Just accept.
- return true;
- }
-
- Runtime* runtime = Runtime::Current();
- // We really should have a runtime.
- DCHECK_NE(static_cast<Runtime*>(nullptr), runtime);
-
- if (runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
- // We are an interpret-only environment. Ignore the check value.
- return true;
- }
-
- if (runtime->ExplicitNullChecks() != explicit_null_checks ||
- runtime->ExplicitStackOverflowChecks() != explicit_stack_overflow_checks ||
- runtime->ExplicitSuspendChecks() != explicit_suspend_checks) {
- if (error_msg != nullptr) {
- // Create an error message.
-
- std::ostringstream os;
- os << "Explicit check options do not match runtime: ";
- os << runtime->ExplicitNullChecks() << " vs " << explicit_null_checks << " | ";
- os << runtime->ExplicitStackOverflowChecks() << " vs " << explicit_stack_overflow_checks
- << " | ";
- os << runtime->ExplicitSuspendChecks() << " vs " << explicit_suspend_checks;
-
- *error_msg = os.str();
- }
-
- // Currently we do not create correct images when pre-opting, so the emulator will fail with
- // this change. Once the change is in the tree, REMOVE.
- if (true) {
- // At least try to log it, though.
- if (error_msg != nullptr) {
- LOG(WARNING) << *error_msg;
- }
- return true;
- } else {
- return false;
- }
- }
-
- // Accepted.
- return true;
- }
-
- // Check (and override) the flags depending on current support in the ISA.
- // Right now will reset all flags to explicit except on ARM.
- static void CheckISASupport(InstructionSet isa, bool* explicit_null_checks,
- bool* explicit_stack_overflow_checks, bool* explicit_suspend_checks) {
- switch (isa) {
- case kArm:
- case kThumb2:
- break; // All checks implemented, leave as is.
-
- default: // No checks implemented, reset all to explicit checks.
- *explicit_null_checks = true;
- *explicit_stack_overflow_checks = true;
- *explicit_suspend_checks = true;
- }
- }
-
- static bool CheckForCompiling(InstructionSet host, InstructionSet target,
- bool* explicit_null_checks, bool* explicit_stack_overflow_checks,
- bool* explicit_suspend_checks) {
- // Check the boot image settings.
- Runtime* runtime = Runtime::Current();
- if (runtime != nullptr) {
- gc::space::ImageSpace* ispace = runtime->GetHeap()->GetImageSpace();
- if (ispace != nullptr) {
- const OatFile* oat_file = ispace->GetOatFile();
- if (oat_file != nullptr) {
- const char* v = oat_file->GetOatHeader().GetStoreValueByKey(kImplicitChecksOatHeaderKey);
- if (!Parse(v, explicit_null_checks, explicit_stack_overflow_checks,
- explicit_suspend_checks)) {
- LOG(FATAL) << "Should have been able to parse boot image implicit check values";
- }
- return true;
- }
- }
- }
-
- // Check the current runtime.
- bool cross_compiling = true;
- switch (host) {
- case kArm:
- case kThumb2:
- cross_compiling = target != kArm && target != kThumb2;
- break;
- default:
- cross_compiling = host != target;
- break;
- }
- if (!cross_compiling) {
- Runtime* runtime = Runtime::Current();
- *explicit_null_checks = runtime->ExplicitNullChecks();
- *explicit_stack_overflow_checks = runtime->ExplicitStackOverflowChecks();
- *explicit_suspend_checks = runtime->ExplicitSuspendChecks();
- return true;
- }
-
- // Give up.
- return false;
- }
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_IMPLICIT_CHECK_OPTIONS_H_
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index f561643399..c826716787 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -46,7 +46,7 @@ inline bool IndirectReferenceTable::GetChecked(IndirectRef iref) const {
AbortIfNoCheckJNI();
return false;
}
- if (UNLIKELY(table_[idx] == nullptr)) {
+ if (UNLIKELY(table_[idx].IsNull())) {
LOG(ERROR) << "JNI ERROR (app bug): accessed deleted " << kind_ << " " << iref;
AbortIfNoCheckJNI();
return false;
@@ -75,11 +75,11 @@ inline mirror::Object* IndirectReferenceTable::Get(IndirectRef iref) const {
if (!GetChecked(iref)) {
return kInvalidIndirectRefObject;
}
- mirror::Object** root = &table_[ExtractIndex(iref)];
- mirror::Object* obj = *root;
+ uint32_t idx = ExtractIndex(iref);
+ mirror::Object* obj = table_[idx].Read<kWithoutReadBarrier>();
if (LIKELY(obj != kClearedJniWeakGlobal)) {
// The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
- obj = ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(root);
+ obj = table_[idx].Read();
VerifyObject(obj);
}
return obj;
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index ad798ed60f..1ba2291adf 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -56,7 +56,8 @@ std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
void IndirectReferenceTable::AbortIfNoCheckJNI() {
// If -Xcheck:jni is on, it'll give a more detailed error before aborting.
- if (!Runtime::Current()->GetJavaVM()->check_jni) {
+ JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+ if (!vm->IsCheckJniEnabled()) {
// Otherwise, we want to abort rather than hand back a bad reference.
LOG(FATAL) << "JNI ERROR (app bug): see above.";
}
@@ -74,8 +75,9 @@ IndirectReferenceTable::IndirectReferenceTable(size_t initialCount,
table_mem_map_.reset(MemMap::MapAnonymous("indirect ref table", nullptr, table_bytes,
PROT_READ | PROT_WRITE, false, &error_str));
CHECK(table_mem_map_.get() != nullptr) << error_str;
+ CHECK_EQ(table_mem_map_->Size(), table_bytes);
- table_ = reinterpret_cast<mirror::Object**>(table_mem_map_->Begin());
+ table_ = reinterpret_cast<GcRoot<mirror::Object>*>(table_mem_map_->Begin());
CHECK(table_ != nullptr);
memset(table_, 0xd1, initial_bytes);
@@ -131,20 +133,22 @@ IndirectRef IndirectReferenceTable::Add(uint32_t cookie, mirror::Object* obj) {
if (numHoles > 0) {
DCHECK_GT(topIndex, 1U);
// Find the first hole; likely to be near the end of the list.
- mirror::Object** pScan = &table_[topIndex - 1];
- DCHECK(*pScan != NULL);
- while (*--pScan != NULL) {
+ GcRoot<mirror::Object>* pScan = &table_[topIndex - 1];
+ DCHECK(!pScan->IsNull());
+ --pScan;
+ while (!pScan->IsNull()) {
DCHECK_GE(pScan, table_ + prevState.parts.topIndex);
+ --pScan;
}
UpdateSlotAdd(obj, pScan - table_);
result = ToIndirectRef(pScan - table_);
- *pScan = obj;
+ *pScan = GcRoot<mirror::Object>(obj);
segment_state_.parts.numHoles--;
} else {
// Add to the end.
UpdateSlotAdd(obj, topIndex);
result = ToIndirectRef(topIndex);
- table_[topIndex++] = obj;
+ table_[topIndex++] = GcRoot<mirror::Object>(obj);
segment_state_.parts.topIndex = topIndex;
}
if (false) {
@@ -210,15 +214,16 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
return false;
}
- table_[idx] = NULL;
+ table_[idx] = GcRoot<mirror::Object>(nullptr);
int numHoles = segment_state_.parts.numHoles - prevState.parts.numHoles;
if (numHoles != 0) {
while (--topIndex > bottomIndex && numHoles != 0) {
if (false) {
LOG(INFO) << "+++ checking for hole at " << topIndex-1
- << " (cookie=" << cookie << ") val=" << table_[topIndex - 1];
+ << " (cookie=" << cookie << ") val="
+ << table_[topIndex - 1].Read<kWithoutReadBarrier>();
}
- if (table_[topIndex-1] != NULL) {
+ if (!table_[topIndex-1].IsNull()) {
break;
}
if (false) {
@@ -238,7 +243,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
// Not the top-most entry. This creates a hole. We NULL out the
// entry to prevent somebody from deleting it twice and screwing up
// the hole count.
- if (table_[idx] == NULL) {
+ if (table_[idx].IsNull()) {
LOG(INFO) << "--- WEIRD: removing null entry " << idx;
return false;
}
@@ -246,7 +251,7 @@ bool IndirectReferenceTable::Remove(uint32_t cookie, IndirectRef iref) {
return false;
}
- table_[idx] = NULL;
+ table_[idx] = GcRoot<mirror::Object>(nullptr);
segment_state_.parts.numHoles++;
if (false) {
LOG(INFO) << "+++ left hole at " << idx << ", holes=" << segment_state_.parts.numHoles;
@@ -268,17 +273,16 @@ void IndirectReferenceTable::Dump(std::ostream& os) const {
os << kind_ << " table dump:\n";
ReferenceTable::Table entries;
for (size_t i = 0; i < Capacity(); ++i) {
- mirror::Object** root = &table_[i];
- mirror::Object* obj = *root;
+ mirror::Object* obj = table_[i].Read<kWithoutReadBarrier>();
if (UNLIKELY(obj == nullptr)) {
// Remove NULLs.
} else if (UNLIKELY(obj == kClearedJniWeakGlobal)) {
// ReferenceTable::Dump() will handle kClearedJniWeakGlobal
// while the read barrier won't.
- entries.push_back(obj);
+ entries.push_back(GcRoot<mirror::Object>(obj));
} else {
- obj = ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
- entries.push_back(obj);
+ obj = table_[i].Read();
+ entries.push_back(GcRoot<mirror::Object>(obj));
}
}
ReferenceTable::Dump(os, entries);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index b3a855dfb3..d25bc42dfc 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -24,16 +24,19 @@
#include "base/logging.h"
#include "base/mutex.h"
-#include "mem_map.h"
+#include "gc_root.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
namespace art {
+
namespace mirror {
class Object;
} // namespace mirror
+class MemMap;
+
/*
* Maintain a table of indirect references. Used for local/global JNI
* references.
@@ -204,12 +207,13 @@ union IRTSegmentState {
class IrtIterator {
public:
- explicit IrtIterator(mirror::Object** table, size_t i, size_t capacity)
+ explicit IrtIterator(GcRoot<mirror::Object>* table, size_t i, size_t capacity)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
SkipNullsAndTombstones();
}
- IrtIterator& operator++() {
+ IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
++i_;
SkipNullsAndTombstones();
return *this;
@@ -217,7 +221,7 @@ class IrtIterator {
mirror::Object** operator*() {
// This does not have a read barrier as this is used to visit roots.
- return &table_[i_];
+ return table_[i_].AddressWithoutBarrier();
}
bool equals(const IrtIterator& rhs) const {
@@ -225,14 +229,16 @@ class IrtIterator {
}
private:
- void SkipNullsAndTombstones() {
+ void SkipNullsAndTombstones() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We skip NULLs and tombstones. Clients don't want to see implementation details.
- while (i_ < capacity_ && (table_[i_] == NULL || table_[i_] == kClearedJniWeakGlobal)) {
+ while (i_ < capacity_ &&
+ (table_[i_].IsNull() ||
+ table_[i_].Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal)) {
++i_;
}
}
- mirror::Object** const table_;
+ GcRoot<mirror::Object>* const table_;
size_t i_;
size_t capacity_;
};
@@ -309,7 +315,8 @@ class IndirectReferenceTable {
return IrtIterator(table_, Capacity(), Capacity());
}
- void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type);
+ void VisitRoots(RootCallback* callback, void* arg, uint32_t tid, RootType root_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -373,7 +380,7 @@ class IndirectReferenceTable {
std::unique_ptr<MemMap> slot_mem_map_;
// bottom of the stack. Do not directly access the object references
// in this as they are roots. Use Get() that has a read barrier.
- mirror::Object** table_;
+ GcRoot<mirror::Object>* table_;
/* bit mask, ORed into all irefs */
IndirectRefKind kind_;
/* extended debugging info */
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 5b6039647c..d7e358ce96 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -83,6 +83,44 @@ size_t GetInstructionSetAlignment(InstructionSet isa) {
}
}
+
+static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
+static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
+
+// TODO: Lower once implicit stack-overflow checks can work with less than 16K.
+static constexpr size_t kArmStackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kArm64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kX86StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+static constexpr size_t kX86_64StackOverflowReservedBytes = (kIsDebugBuild ? 16 : 16) * KB;
+
+size_t GetStackOverflowReservedBytes(InstructionSet isa) {
+ switch (isa) {
+ case kArm: // Intentional fall-through.
+ case kThumb2:
+ return kArmStackOverflowReservedBytes;
+
+ case kArm64:
+ return kArm64StackOverflowReservedBytes;
+
+ case kMips:
+ return kMipsStackOverflowReservedBytes;
+
+ case kX86:
+ return kX86StackOverflowReservedBytes;
+
+ case kX86_64:
+ return kX86_64StackOverflowReservedBytes;
+
+ case kNone:
+ LOG(FATAL) << "kNone has no stack overflow size";
+ return 0;
+
+ default:
+ LOG(FATAL) << "Unknown instruction set" << isa;
+ return 0;
+ }
+}
+
std::string InstructionSetFeatures::GetFeatureString() const {
std::string result;
if ((mask_ & kHwDiv) != 0) {
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index dce1c150ba..f212811e32 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -169,33 +169,7 @@ static inline size_t GetBytesPerFprSpillLocation(InstructionSet isa) {
}
}
-static constexpr size_t kDefaultStackOverflowReservedBytes = 16 * KB;
-static constexpr size_t kArmStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-static constexpr size_t kMipsStackOverflowReservedBytes = kDefaultStackOverflowReservedBytes;
-
-// TODO: shrink reserved space, in particular for 64bit.
-
-// Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
-// But this one works rather well.
-static constexpr size_t kArm64StackOverflowReservedBytes = 32 * KB;
-// TODO: Bumped to workaround regression (http://b/14982147) Specifically to fix:
-// test-art-host-run-test-interpreter-018-stack-overflow
-// test-art-host-run-test-interpreter-107-int-math2
-static constexpr size_t kX86StackOverflowReservedBytes = (kIsDebugBuild ? 32 : 24) * KB;
-static constexpr size_t kX86_64StackOverflowReservedBytes = 32 * KB;
-
-static constexpr size_t GetStackOverflowReservedBytes(InstructionSet isa) {
- return (isa == kArm || isa == kThumb2) ? kArmStackOverflowReservedBytes :
- isa == kArm64 ? kArm64StackOverflowReservedBytes :
- isa == kMips ? kMipsStackOverflowReservedBytes :
- isa == kX86 ? kX86StackOverflowReservedBytes :
- isa == kX86_64 ? kX86_64StackOverflowReservedBytes :
- isa == kNone ? (LOG(FATAL) << "kNone has no stack overflow size", 0) :
- (LOG(FATAL) << "Unknown instruction set" << isa, 0);
-}
-
-static constexpr size_t kRuntimeStackOverflowReservedBytes =
- GetStackOverflowReservedBytes(kRuntimeISA);
+size_t GetStackOverflowReservedBytes(InstructionSet isa);
enum InstructionFeatures {
kHwDiv = 0x1, // Supports hardware divide.
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index f4eaa61c1e..16be077de1 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -25,6 +25,7 @@
#include "debugger.h"
#include "dex_file-inl.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
+#include "gc_root-inl.h"
#include "interpreter/interpreter.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -519,7 +520,7 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
bool empty;
{
ReaderMutexLock mu(self, deoptimized_methods_lock_);
- empty = deoptimized_methods_.empty(); // Avoid lock violation.
+ empty = IsDeoptimizedMethodsEmpty(); // Avoid lock violation.
}
if (empty) {
instrumentation_stubs_installed_ = false;
@@ -580,7 +581,7 @@ void Instrumentation::ResetQuickAllocEntryPoints() {
}
void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code) const {
+ const void* portable_code, bool have_portable_code) {
const void* new_portable_code;
const void* new_quick_code;
bool new_have_portable_code;
@@ -617,20 +618,74 @@ void Instrumentation::UpdateMethodsCode(mirror::ArtMethod* method, const void* q
UpdateEntrypoints(method, new_quick_code, new_portable_code, new_have_portable_code);
}
+bool Instrumentation::AddDeoptimizedMethod(mirror::ArtMethod* method) {
+ // Note that the insert() below isn't read barrier-aware. So, this
+ // FindDeoptimizedMethod() call is necessary or else we would end up
+ // storing the same method twice in the map (the from-space and the
+ // to-space ones).
+ if (FindDeoptimizedMethod(method)) {
+ // Already in the map. Return.
+ return false;
+ }
+ // Not found. Add it.
+ int32_t hash_code = method->IdentityHashCode();
+ deoptimized_methods_.insert(std::make_pair(hash_code, GcRoot<mirror::ArtMethod>(method)));
+ return true;
+}
+
+bool Instrumentation::FindDeoptimizedMethod(mirror::ArtMethod* method) {
+ int32_t hash_code = method->IdentityHashCode();
+ auto range = deoptimized_methods_.equal_range(hash_code);
+ for (auto it = range.first; it != range.second; ++it) {
+ mirror::ArtMethod* m = it->second.Read();
+ if (m == method) {
+ // Found.
+ return true;
+ }
+ }
+ // Not found.
+ return false;
+}
+
+mirror::ArtMethod* Instrumentation::BeginDeoptimizedMethod() {
+ auto it = deoptimized_methods_.begin();
+ if (it == deoptimized_methods_.end()) {
+ // Empty.
+ return nullptr;
+ }
+ return it->second.Read();
+}
+
+bool Instrumentation::RemoveDeoptimizedMethod(mirror::ArtMethod* method) {
+ int32_t hash_code = method->IdentityHashCode();
+ auto range = deoptimized_methods_.equal_range(hash_code);
+ for (auto it = range.first; it != range.second; ++it) {
+ mirror::ArtMethod* m = it->second.Read();
+ if (m == method) {
+ // Found. Erase and return.
+ deoptimized_methods_.erase(it);
+ return true;
+ }
+ }
+ // Not found.
+ return false;
+}
+
+bool Instrumentation::IsDeoptimizedMethodsEmpty() const {
+ return deoptimized_methods_.empty();
+}
+
void Instrumentation::Deoptimize(mirror::ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
CHECK(!method->IsAbstract());
Thread* self = Thread::Current();
- std::pair<std::set<mirror::ArtMethod*>::iterator, bool> pair;
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
- pair = deoptimized_methods_.insert(method);
+ bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
+ CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
}
- bool already_deoptimized = !pair.second;
- CHECK(!already_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
-
if (!interpreter_stubs_installed_) {
UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint(), GetPortableToInterpreterBridge(),
false);
@@ -652,11 +707,10 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
bool empty;
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
- auto it = deoptimized_methods_.find(method);
- CHECK(it != deoptimized_methods_.end()) << "Method " << PrettyMethod(method)
+ bool found_and_erased = RemoveDeoptimizedMethod(method);
+ CHECK(found_and_erased) << "Method " << PrettyMethod(method)
<< " is not deoptimized";
- deoptimized_methods_.erase(it);
- empty = deoptimized_methods_.empty();
+ empty = IsDeoptimizedMethodsEmpty();
}
// Restore code and possibly stack only if we did not deoptimize everything.
@@ -684,15 +738,15 @@ void Instrumentation::Undeoptimize(mirror::ArtMethod* method) {
}
}
-bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) const {
- ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+bool Instrumentation::IsDeoptimized(mirror::ArtMethod* method) {
DCHECK(method != nullptr);
- return deoptimized_methods_.find(method) != deoptimized_methods_.end();
+ ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
+ return FindDeoptimizedMethod(method);
}
void Instrumentation::EnableDeoptimization() {
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- CHECK(deoptimized_methods_.empty());
+ CHECK(IsDeoptimizedMethodsEmpty());
CHECK_EQ(deoptimization_enabled_, false);
deoptimization_enabled_ = true;
}
@@ -708,10 +762,11 @@ void Instrumentation::DisableDeoptimization() {
mirror::ArtMethod* method;
{
ReaderMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- if (deoptimized_methods_.empty()) {
+ if (IsDeoptimizedMethodsEmpty()) {
break;
}
- method = *deoptimized_methods_.begin();
+ method = BeginDeoptimizedMethod();
+ CHECK(method != nullptr);
}
Undeoptimize(method);
}
@@ -963,16 +1018,12 @@ void Instrumentation::PopMethodForUnwind(Thread* self, bool is_deoptimization) c
void Instrumentation::VisitRoots(RootCallback* callback, void* arg) {
WriterMutexLock mu(Thread::Current(), deoptimized_methods_lock_);
- if (deoptimized_methods_.empty()) {
+ if (IsDeoptimizedMethodsEmpty()) {
return;
}
- std::set<mirror::ArtMethod*> new_deoptimized_methods;
- for (mirror::ArtMethod* method : deoptimized_methods_) {
- DCHECK(method != nullptr);
- callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootVMInternal);
- new_deoptimized_methods.insert(method);
+ for (auto pair : deoptimized_methods_) {
+ pair.second.VisitRoot(callback, arg, 0, kRootVMInternal);
}
- deoptimized_methods_ = new_deoptimized_methods;
}
std::string InstrumentationStackFrame::Dump() const {
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index d0cb4ded04..66c6b388d4 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -18,13 +18,14 @@
#define ART_RUNTIME_INSTRUMENTATION_H_
#include <stdint.h>
-#include <set>
#include <list>
+#include <map>
#include "atomic.h"
#include "instruction_set.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -162,7 +163,9 @@ class Instrumentation {
LOCKS_EXCLUDED(Locks::thread_list_lock_, deoptimized_methods_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsDeoptimized(mirror::ArtMethod* method) const LOCKS_EXCLUDED(deoptimized_methods_lock_);
+ bool IsDeoptimized(mirror::ArtMethod* method)
+ LOCKS_EXCLUDED(deoptimized_methods_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs.
void EnableMethodTracing()
@@ -186,7 +189,7 @@ class Instrumentation {
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(mirror::ArtMethod* method, const void* quick_code,
- const void* portable_code, bool have_portable_code) const
+ const void* portable_code, bool have_portable_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
@@ -367,6 +370,23 @@ class Instrumentation {
mirror::ArtField* field, const JValue& field_value) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Read barrier-aware utility functions for accessing deoptimized_methods_
+ bool AddDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool FindDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool RemoveDeoptimizedMethod(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ mirror::ArtMethod* BeginDeoptimizedMethod()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+ bool IsDeoptimizedMethodsEmpty() const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ SHARED_LOCKS_REQUIRED(deoptimized_methods_lock_);
+
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
bool instrumentation_stubs_installed_;
@@ -421,7 +441,8 @@ class Instrumentation {
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
mutable ReaderWriterMutex deoptimized_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::set<mirror::ArtMethod*> deoptimized_methods_ GUARDED_BY(deoptimized_methods_lock_);
+ std::multimap<int32_t, GcRoot<mirror::ArtMethod>> deoptimized_methods_
+ GUARDED_BY(deoptimized_methods_lock_);
bool deoptimization_enabled_;
// Current interpreter handler table. This is updated each time the thread state flags are
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 14305006e4..aadd85a0f4 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -58,27 +58,27 @@ void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
if ((flags & kVisitRootFlagAllRoots) != 0) {
for (auto& strong_intern : strong_interns_) {
- callback(reinterpret_cast<mirror::Object**>(&strong_intern.second), arg, 0,
- kRootInternedString);
- DCHECK(strong_intern.second != nullptr);
+ strong_intern.second.VisitRoot(callback, arg, 0, kRootInternedString);
+ DCHECK(!strong_intern.second.IsNull());
}
} else if ((flags & kVisitRootFlagNewRoots) != 0) {
for (auto& pair : new_strong_intern_roots_) {
- mirror::String* old_ref = pair.second;
- callback(reinterpret_cast<mirror::Object**>(&pair.second), arg, 0, kRootInternedString);
- if (UNLIKELY(pair.second != old_ref)) {
- // Uh ohes, GC moved a root in the log. Need to search the strong interns and update the
- // corresponding object. This is slow, but luckily for us, this may only happen with a
- // concurrent moving GC.
- for (auto it = strong_interns_.lower_bound(pair.first), end = strong_interns_.end();
+ mirror::String* old_ref = pair.second.Read<kWithoutReadBarrier>();
+ pair.second.VisitRoot(callback, arg, 0, kRootInternedString);
+ mirror::String* new_ref = pair.second.Read<kWithoutReadBarrier>();
+ if (UNLIKELY(new_ref != old_ref)) {
+ // Uh ohes, GC moved a root in the log. Need to search the strong interns and update the
+ // corresponding object. This is slow, but luckily for us, this may only happen with a
+ // concurrent moving GC.
+ for (auto it = strong_interns_.lower_bound(pair.first), end = strong_interns_.end();
it != end && it->first == pair.first; ++it) {
- // If the class stored matches the old class, update it to the new value.
- if (old_ref == it->second) {
- it->second = pair.second;
- }
- }
- }
- }
+ // If the class stored matches the old class, update it to the new value.
+ if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
+ it->second = GcRoot<mirror::String>(new_ref);
+ }
+ }
+ }
+ }
}
if ((flags & kVisitRootFlagClearRootLog) != 0) {
@@ -105,9 +105,7 @@ mirror::String* InternTable::Lookup(Table* table, mirror::String* s, int32_t has
Locks::intern_table_lock_->AssertHeld(Thread::Current());
for (auto it = table->lower_bound(hash_code), end = table->end();
it != end && it->first == hash_code; ++it) {
- mirror::String* existing_string;
- mirror::String** root = &it->second;
- existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
+ mirror::String* existing_string = it->second.Read();
if (existing_string->Equals(s)) {
return existing_string;
}
@@ -121,9 +119,9 @@ mirror::String* InternTable::InsertStrong(mirror::String* s, int32_t hash_code)
runtime->RecordStrongStringInsertion(s, hash_code);
}
if (log_new_roots_) {
- new_strong_intern_roots_.push_back(std::make_pair(hash_code, s));
+ new_strong_intern_roots_.push_back(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
}
- strong_interns_.insert(std::make_pair(hash_code, s));
+ strong_interns_.insert(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
return s;
}
@@ -132,7 +130,7 @@ mirror::String* InternTable::InsertWeak(mirror::String* s, int32_t hash_code) {
if (runtime->IsActiveTransaction()) {
runtime->RecordWeakStringInsertion(s, hash_code);
}
- weak_interns_.insert(std::make_pair(hash_code, s));
+ weak_interns_.insert(std::make_pair(hash_code, GcRoot<mirror::String>(s)));
return s;
}
@@ -151,9 +149,7 @@ void InternTable::RemoveWeak(mirror::String* s, int32_t hash_code) {
void InternTable::Remove(Table* table, mirror::String* s, int32_t hash_code) {
for (auto it = table->lower_bound(hash_code), end = table->end();
it != end && it->first == hash_code; ++it) {
- mirror::String* existing_string;
- mirror::String** root = &it->second;
- existing_string = ReadBarrier::BarrierForRoot<mirror::String, kWithReadBarrier>(root);
+ mirror::String* existing_string = it->second.Read();
if (existing_string == s) {
table->erase(it);
return;
@@ -308,13 +304,13 @@ void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
for (auto it = weak_interns_.begin(), end = weak_interns_.end(); it != end;) {
// This does not need a read barrier because this is called by GC.
- mirror::Object* object = it->second;
+ mirror::Object* object = it->second.Read<kWithoutReadBarrier>();
mirror::Object* new_object = callback(object, arg);
if (new_object == nullptr) {
// TODO: use it = weak_interns_.erase(it) when we get a c++11 stl.
weak_interns_.erase(it++);
} else {
- it->second = down_cast<mirror::String*>(new_object);
+ it->second = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object));
++it;
}
}
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 6dc7f7b606..435cc430b1 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -20,6 +20,7 @@
#include <map>
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -59,7 +60,8 @@ class InternTable {
// Interns a potentially new string in the 'weak' table. (See above.)
mirror::String* InternWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg);
+ void SweepInternTableWeaks(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool ContainsWeak(mirror::String* s) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -67,7 +69,8 @@ class InternTable {
size_t StrongSize() const;
size_t WeakSize() const;
- void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags);
+ void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) const;
@@ -75,7 +78,7 @@ class InternTable {
void AllowNewInterns() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- typedef std::multimap<int32_t, mirror::String*> Table;
+ typedef std::multimap<int32_t, GcRoot<mirror::String>> Table;
mirror::String* Insert(mirror::String* s, bool is_strong)
LOCKS_EXCLUDED(Locks::intern_table_lock_)
@@ -122,7 +125,7 @@ class InternTable {
// directly access the strings in it. Use functions that contain
// read barriers.
Table strong_interns_ GUARDED_BY(Locks::intern_table_lock_);
- std::vector<std::pair<int32_t, mirror::String*>> new_strong_intern_roots_
+ std::vector<std::pair<int32_t, GcRoot<mirror::String>>> new_strong_intern_roots_
GUARDED_BY(Locks::intern_table_lock_);
// Since this contains (weak) roots, they need a read barrier. Do
// not directly access the strings in it. Use functions that contain
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index e3068b338d..47a7f0d62e 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -397,7 +397,8 @@ static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::Code
void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method, Object* receiver,
uint32_t* args, JValue* result) {
DCHECK_EQ(self, Thread::Current());
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return;
}
@@ -509,7 +510,8 @@ void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, JVa
JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame) {
DCHECK_EQ(self, Thread::Current());
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return JValue();
}
@@ -520,7 +522,8 @@ JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::C
extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result) {
- if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
+ bool implicit_check = !Runtime::Current()->ExplicitStackOverflowChecks();
+ if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEndForInterpreter(implicit_check))) {
ThrowStackOverflowError(self);
return;
}
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 630b324c30..b35da0cf1b 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -37,7 +37,6 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -48,6 +47,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
// Report this field access to instrumentation if needed.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
@@ -213,7 +213,6 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
CHECK(self->IsExceptionPending());
return false;
}
- f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
Object* obj;
if (is_static) {
obj = f->GetDeclaringClass();
@@ -225,6 +224,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
return false;
}
}
+ f->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self);
uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
// Report this field access to instrumentation if needed. Since we only have the offset of
// the field from the base of the object, we need to look for it first.
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 1bcd27e2d4..5a1d01e3f7 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -140,7 +140,8 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- ArtMethod* const method = receiver->GetClass()->GetVTable()->GetWithoutChecks(vtable_idx);
+ CHECK(receiver->GetClass()->ShouldHaveEmbeddedImtAndVTable());
+ ArtMethod* const method = receiver->GetClass()->GetEmbeddedVTableEntry(vtable_idx);
if (UNLIKELY(method == nullptr)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
new file mode 100644
index 0000000000..9eab3fde13
--- /dev/null
+++ b/runtime/java_vm_ext.cc
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <dlfcn.h>
+
+#include "base/mutex.h"
+#include "base/stl_util.h"
+#include "check_jni.h"
+#include "indirect_reference_table-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "native_bridge.h"
+#include "java_vm_ext.h"
+#include "parsed_options.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+static const size_t kPinTableInitial = 16; // Arbitrary.
+static const size_t kPinTableMax = 1024; // Arbitrary sanity check.
+
+static size_t gGlobalsInitial = 512; // Arbitrary.
+static size_t gGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static const size_t kWeakGlobalsInitial = 16; // Arbitrary.
+static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static bool IsBadJniVersion(int version) {
+ // We don't support JNI_VERSION_1_1. These are the only other valid versions.
+ return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
+}
+
+class SharedLibrary {
+ public:
+ SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
+ jobject class_loader)
+ : path_(path),
+ handle_(handle),
+ needs_native_bridge_(false),
+ class_loader_(env->NewGlobalRef(class_loader)),
+ jni_on_load_lock_("JNI_OnLoad lock"),
+ jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
+ jni_on_load_thread_id_(self->GetThreadId()),
+ jni_on_load_result_(kPending) {
+ }
+
+ ~SharedLibrary() {
+ Thread* self = Thread::Current();
+ if (self != nullptr) {
+ self->GetJniEnv()->DeleteGlobalRef(class_loader_);
+ }
+ }
+
+ jobject GetClassLoader() const {
+ return class_loader_;
+ }
+
+ const std::string& GetPath() const {
+ return path_;
+ }
+
+ /*
+ * Check the result of an earlier call to JNI_OnLoad on this library.
+ * If the call has not yet finished in another thread, wait for it.
+ */
+ bool CheckOnLoadResult()
+ LOCKS_EXCLUDED(jni_on_load_lock_) {
+ Thread* self = Thread::Current();
+ bool okay;
+ {
+ MutexLock mu(self, jni_on_load_lock_);
+
+ if (jni_on_load_thread_id_ == self->GetThreadId()) {
+ // Check this so we don't end up waiting for ourselves. We need to return "true" so the
+ // caller can continue.
+ LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
+ okay = true;
+ } else {
+ while (jni_on_load_result_ == kPending) {
+ VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
+ jni_on_load_cond_.Wait(self);
+ }
+
+ okay = (jni_on_load_result_ == kOkay);
+ VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
+ << (okay ? "succeeded" : "failed") << "]";
+ }
+ }
+ return okay;
+ }
+
+ void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, jni_on_load_lock_);
+
+ jni_on_load_result_ = result ? kOkay : kFailed;
+ jni_on_load_thread_id_ = 0;
+
+ // Broadcast a wakeup to anybody sleeping on the condition variable.
+ jni_on_load_cond_.Broadcast(self);
+ }
+
+ void SetNeedsNativeBridge() {
+ needs_native_bridge_ = true;
+ }
+
+ bool NeedsNativeBridge() const {
+ return needs_native_bridge_;
+ }
+
+ void* FindSymbol(const std::string& symbol_name) {
+ return dlsym(handle_, symbol_name.c_str());
+ }
+
+ void* FindSymbolWithNativeBridge(const std::string& symbol_name, const char* shorty) {
+ CHECK(NeedsNativeBridge());
+
+ uint32_t len = 0;
+ return NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+ }
+
+ private:
+ enum JNI_OnLoadState {
+ kPending,
+ kFailed,
+ kOkay,
+ };
+
+ // Path to library "/system/lib/libjni.so".
+ const std::string path_;
+
+ // The void* returned by dlopen(3).
+ void* const handle_;
+
+ // True if a native bridge is required.
+ bool needs_native_bridge_;
+
+ // The ClassLoader this library is associated with, a global JNI reference that is
+ // created/deleted with the scope of the library.
+ const jobject class_loader_;
+
+ // Guards remaining items.
+ Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Wait for JNI_OnLoad in other thread.
+ ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
+ // Recursive invocation guard.
+ uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
+ // Result of earlier JNI_OnLoad call.
+ JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
+};
+
+// This exists mainly to keep implementation details out of the header file.
+class Libraries {
+ public:
+ Libraries() {
+ }
+
+ ~Libraries() {
+ STLDeleteValues(&libraries_);
+ }
+
+ void Dump(std::ostream& os) const {
+ bool first = true;
+ for (const auto& library : libraries_) {
+ if (!first) {
+ os << ' ';
+ }
+ first = false;
+ os << library.first;
+ }
+ }
+
+ size_t size() const {
+ return libraries_.size();
+ }
+
+ SharedLibrary* Get(const std::string& path) {
+ auto it = libraries_.find(path);
+ return (it == libraries_.end()) ? nullptr : it->second;
+ }
+
+ void Put(const std::string& path, SharedLibrary* library) {
+ libraries_.Put(path, library);
+ }
+
+ // See section 11.3 "Linking Native Methods" of the JNI spec.
+ void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string jni_short_name(JniShortName(m));
+ std::string jni_long_name(JniLongName(m));
+ const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ for (const auto& lib : libraries_) {
+ SharedLibrary* library = lib.second;
+ if (soa.Decode<mirror::ClassLoader*>(library->GetClassLoader()) != declaring_class_loader) {
+ // We only search libraries loaded by the appropriate ClassLoader.
+ continue;
+ }
+ // Try the short name then the long name...
+ void* fn;
+ if (library->NeedsNativeBridge()) {
+ const char* shorty = m->GetShorty();
+ fn = library->FindSymbolWithNativeBridge(jni_short_name, shorty);
+ if (fn == nullptr) {
+ fn = library->FindSymbolWithNativeBridge(jni_long_name, shorty);
+ }
+ } else {
+ fn = library->FindSymbol(jni_short_name);
+ if (fn == nullptr) {
+ fn = library->FindSymbol(jni_long_name);
+ }
+ }
+ if (fn == nullptr) {
+ fn = library->FindSymbol(jni_long_name);
+ }
+ if (fn != nullptr) {
+ VLOG(jni) << "[Found native code for " << PrettyMethod(m)
+ << " in \"" << library->GetPath() << "\"]";
+ return fn;
+ }
+ }
+ detail += "No implementation found for ";
+ detail += PrettyMethod(m);
+ detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
+ LOG(ERROR) << detail;
+ return nullptr;
+ }
+
+ private:
+ SafeMap<std::string, SharedLibrary*> libraries_;
+};
+
+
+class JII {
+ public:
+ static jint DestroyJavaVM(JavaVM* vm) {
+ if (vm == nullptr) {
+ return JNI_ERR;
+ }
+ JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+ delete raw_vm->GetRuntime();
+ return JNI_OK;
+ }
+
+ static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+ return AttachCurrentThreadInternal(vm, p_env, thr_args, false);
+ }
+
+ static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+ return AttachCurrentThreadInternal(vm, p_env, thr_args, true);
+ }
+
+ static jint DetachCurrentThread(JavaVM* vm) {
+ if (vm == nullptr || Thread::Current() == nullptr) {
+ return JNI_ERR;
+ }
+ JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+ Runtime* runtime = raw_vm->GetRuntime();
+ runtime->DetachCurrentThread();
+ return JNI_OK;
+ }
+
+ static jint GetEnv(JavaVM* vm, void** env, jint version) {
+ // GetEnv always returns a JNIEnv* for the most current supported JNI version,
+ // and unlike other calls that take a JNI version doesn't care if you supply
+ // JNI_VERSION_1_1, which we don't otherwise support.
+ if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
+ LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
+ return JNI_EVERSION;
+ }
+ if (vm == nullptr || env == nullptr) {
+ return JNI_ERR;
+ }
+ Thread* thread = Thread::Current();
+ if (thread == nullptr) {
+ *env = nullptr;
+ return JNI_EDETACHED;
+ }
+ *env = thread->GetJniEnv();
+ return JNI_OK;
+ }
+
+ private:
+ static jint AttachCurrentThreadInternal(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
+ if (vm == nullptr || p_env == nullptr) {
+ return JNI_ERR;
+ }
+
+ // Return immediately if we're already attached.
+ Thread* self = Thread::Current();
+ if (self != nullptr) {
+ *p_env = self->GetJniEnv();
+ return JNI_OK;
+ }
+
+ Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->GetRuntime();
+
+ // No threads allowed in zygote mode.
+ if (runtime->IsZygote()) {
+ LOG(ERROR) << "Attempt to attach a thread in the zygote";
+ return JNI_ERR;
+ }
+
+ JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
+ const char* thread_name = nullptr;
+ jobject thread_group = nullptr;
+ if (args != nullptr) {
+ if (IsBadJniVersion(args->version)) {
+ LOG(ERROR) << "Bad JNI version passed to "
+ << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
+ << args->version;
+ return JNI_EVERSION;
+ }
+ thread_name = args->name;
+ thread_group = args->group;
+ }
+
+ if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
+ *p_env = nullptr;
+ return JNI_ERR;
+ } else {
+ *p_env = Thread::Current()->GetJniEnv();
+ return JNI_OK;
+ }
+ }
+};
+
+const JNIInvokeInterface gJniInvokeInterface = {
+ nullptr, // reserved0
+ nullptr, // reserved1
+ nullptr, // reserved2
+ JII::DestroyJavaVM,
+ JII::AttachCurrentThread,
+ JII::DetachCurrentThread,
+ JII::GetEnv,
+ JII::AttachCurrentThreadAsDaemon
+};
+
+JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
+ : runtime_(runtime),
+ check_jni_abort_hook_(nullptr),
+ check_jni_abort_hook_data_(nullptr),
+ check_jni_(false), // Initialized properly in the constructor body below.
+ force_copy_(options->force_copy_),
+ tracing_enabled_(!options->jni_trace_.empty() || VLOG_IS_ON(third_party_jni)),
+ trace_(options->jni_trace_),
+ pins_lock_("JNI pin table lock", kPinTableLock),
+ pin_table_("pin table", kPinTableInitial, kPinTableMax),
+ globals_lock_("JNI global reference table lock"),
+ globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
+ libraries_(new Libraries),
+ unchecked_functions_(&gJniInvokeInterface),
+ weak_globals_lock_("JNI weak global reference table lock"),
+ weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
+ allow_new_weak_globals_(true),
+ weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
+ functions = unchecked_functions_;
+ if (options->check_jni_) {
+ SetCheckJniEnabled(true);
+ }
+}
+
+JavaVMExt::~JavaVMExt() {
+}
+
+void JavaVMExt::JniAbort(const char* jni_function_name, const char* msg) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
+
+ std::ostringstream os;
+ os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
+
+ if (jni_function_name != nullptr) {
+ os << "\n in call to " << jni_function_name;
+ }
+ // TODO: is this useful given that we're about to dump the calling thread's stack?
+ if (current_method != nullptr) {
+ os << "\n from " << PrettyMethod(current_method);
+ }
+ os << "\n";
+ self->Dump(os);
+
+ if (check_jni_abort_hook_ != nullptr) {
+ check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
+ } else {
+ // Ensure that we get a native stack trace for this thread.
+ self->TransitionFromRunnableToSuspended(kNative);
+ LOG(FATAL) << os.str();
+ self->TransitionFromSuspendedToRunnable(); // Unreachable, keep annotalysis happy.
+ }
+}
+
+void JavaVMExt::JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
+ std::string msg;
+ StringAppendV(&msg, fmt, ap);
+ JniAbort(jni_function_name, msg.c_str());
+}
+
+void JavaVMExt::JniAbortF(const char* jni_function_name, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ JniAbortV(jni_function_name, fmt, args);
+ va_end(args);
+}
+
+bool JavaVMExt::ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Fast where no tracing is enabled.
+ if (trace_.empty() && !VLOG_IS_ON(third_party_jni)) {
+ return false;
+ }
+ // Perform checks based on class name.
+ StringPiece class_name(method->GetDeclaringClassDescriptor());
+ if (!trace_.empty() && class_name.find(trace_) != std::string::npos) {
+ return true;
+ }
+ if (!VLOG_IS_ON(third_party_jni)) {
+ return false;
+ }
+ // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
+ // like part of Android.
+ static const char* gBuiltInPrefixes[] = {
+ "Landroid/",
+ "Lcom/android/",
+ "Lcom/google/android/",
+ "Ldalvik/",
+ "Ljava/",
+ "Ljavax/",
+ "Llibcore/",
+ "Lorg/apache/harmony/",
+ };
+ for (size_t i = 0; i < arraysize(gBuiltInPrefixes); ++i) {
+ if (class_name.starts_with(gBuiltInPrefixes[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+jobject JavaVMExt::AddGlobalRef(Thread* self, mirror::Object* obj) {
+ // Check for null after decoding the object to handle cleared weak globals.
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ WriterMutexLock mu(self, globals_lock_);
+ IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+ return reinterpret_cast<jobject>(ref);
+}
+
+jweak JavaVMExt::AddWeakGlobalRef(Thread* self, mirror::Object* obj) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+ return reinterpret_cast<jweak>(ref);
+}
+
+void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ WriterMutexLock mu(self, globals_lock_);
+ if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
+}
+
+void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
+ if (obj == nullptr) {
+ return;
+ }
+ MutexLock mu(self, weak_globals_lock_);
+ if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+ LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
+ << "failed to find entry";
+ }
+}
+
+static void ThreadEnableCheckJni(Thread* thread, void* arg) {
+ bool* check_jni = reinterpret_cast<bool*>(arg);
+ thread->GetJniEnv()->SetCheckJniEnabled(*check_jni);
+}
+
+bool JavaVMExt::SetCheckJniEnabled(bool enabled) {
+ bool old_check_jni = check_jni_;
+ check_jni_ = enabled;
+ functions = enabled ? GetCheckJniInvokeInterface() : unchecked_functions_;
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime_->GetThreadList()->ForEach(ThreadEnableCheckJni, &check_jni_);
+ return old_check_jni;
+}
+
+void JavaVMExt::DumpForSigQuit(std::ostream& os) {
+ os << "JNI: CheckJNI is " << (check_jni_ ? "on" : "off");
+ if (force_copy_) {
+ os << " (with forcecopy)";
+ }
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, pins_lock_);
+ os << "; pins=" << pin_table_.Size();
+ }
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ os << "; globals=" << globals_.Capacity();
+ }
+ {
+ MutexLock mu(self, weak_globals_lock_);
+ if (weak_globals_.Capacity() > 0) {
+ os << " (plus " << weak_globals_.Capacity() << " weak)";
+ }
+ }
+ os << '\n';
+
+ {
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ os << "Libraries: " << Dumpable<Libraries>(*libraries_) << " (" << libraries_->size() << ")\n";
+ }
+}
+
+void JavaVMExt::DisallowNewWeakGlobals() {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ allow_new_weak_globals_ = false;
+}
+
+void JavaVMExt::AllowNewWeakGlobals() {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, weak_globals_lock_);
+ allow_new_weak_globals_ = true;
+ weak_globals_add_condition_.Broadcast(self);
+}
+
+mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) {
+ return globals_.SynchronizedGet(self, &globals_lock_, ref);
+}
+
+mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+ MutexLock mu(self, weak_globals_lock_);
+ while (UNLIKELY(!allow_new_weak_globals_)) {
+ weak_globals_add_condition_.WaitHoldingLocks(self);
+ }
+ return weak_globals_.Get(ref);
+}
+
+void JavaVMExt::PinPrimitiveArray(Thread* self, mirror::Array* array) {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Add(array);
+}
+
+void JavaVMExt::UnpinPrimitiveArray(Thread* self, mirror::Array* array) {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Remove(array);
+}
+
+void JavaVMExt::DumpReferenceTables(std::ostream& os) {
+ Thread* self = Thread::Current();
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ globals_.Dump(os);
+ }
+ {
+ MutexLock mu(self, weak_globals_lock_);
+ weak_globals_.Dump(os);
+ }
+ {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.Dump(os);
+ }
+}
+
+bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
+ std::string* error_msg) {
+ error_msg->clear();
+
+ // See if we've already loaded this library. If we have, and the class loader
+ // matches, return successfully without doing anything.
+ // TODO: for better results we should canonicalize the pathname (or even compare
+ // inodes). This implementation is fine if everybody is using System.loadLibrary.
+ SharedLibrary* library;
+ Thread* self = Thread::Current();
+ {
+ // TODO: move the locking (and more of this logic) into Libraries.
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ library = libraries_->Get(path);
+ }
+ if (library != nullptr) {
+ if (env->IsSameObject(library->GetClassLoader(), class_loader) == JNI_FALSE) {
+ // The library will be associated with class_loader. The JNI
+ // spec says we can't load the same library into more than one
+ // class loader.
+ StringAppendF(error_msg, "Shared library \"%s\" already opened by "
+ "ClassLoader %p; can't open in ClassLoader %p",
+ path.c_str(), library->GetClassLoader(), class_loader);
+ LOG(WARNING) << error_msg;
+ return false;
+ }
+ VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
+ << " ClassLoader " << class_loader << "]";
+ if (!library->CheckOnLoadResult()) {
+ StringAppendF(error_msg, "JNI_OnLoad failed on a previous attempt "
+ "to load \"%s\"", path.c_str());
+ return false;
+ }
+ return true;
+ }
+
+ // Open the shared library. Because we're using a full path, the system
+ // doesn't have to search through LD_LIBRARY_PATH. (It may do so to
+ // resolve this library's dependencies though.)
+
+ // Failures here are expected when java.library.path has several entries
+ // and we have to hunt for the lib.
+
+ // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
+ // class unloading. Libraries will only be unloaded when the reference count (incremented by
+ // dlopen) becomes zero from dlclose.
+
+ Locks::mutator_lock_->AssertNotHeld(self);
+ const char* path_str = path.empty() ? nullptr : path.c_str();
+ void* handle = dlopen(path_str, RTLD_LAZY);
+ bool needs_native_bridge = false;
+ if (handle == nullptr) {
+ if (NativeBridgeIsSupported(path_str)) {
+ handle = NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+ needs_native_bridge = true;
+ }
+ }
+
+ VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
+
+ if (handle == nullptr) {
+ *error_msg = dlerror();
+ LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *error_msg;
+ return false;
+ }
+
+ if (env->ExceptionCheck() == JNI_TRUE) {
+ LOG(ERROR) << "Unexpected exception:";
+ env->ExceptionDescribe();
+ env->ExceptionClear();
+ }
+ // Create a new entry.
+ // TODO: move the locking (and more of this logic) into Libraries.
+ bool created_library = false;
+ {
+ // Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
+ std::unique_ptr<SharedLibrary> new_library(
+ new SharedLibrary(env, self, path, handle, class_loader));
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ library = libraries_->Get(path);
+ if (library == nullptr) { // We won race to get libraries_lock.
+ library = new_library.release();
+ libraries_->Put(path, library);
+ created_library = true;
+ }
+ }
+ if (!created_library) {
+ LOG(INFO) << "WOW: we lost a race to add shared library: "
+ << "\"" << path << "\" ClassLoader=" << class_loader;
+ return library->CheckOnLoadResult();
+ }
+ VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
+
+ bool was_successful = false;
+ void* sym;
+ if (needs_native_bridge) {
+ library->SetNeedsNativeBridge();
+ sym = library->FindSymbolWithNativeBridge("JNI_OnLoad", nullptr);
+ } else {
+ sym = dlsym(handle, "JNI_OnLoad");
+ }
+ if (sym == nullptr) {
+ VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
+ was_successful = true;
+ } else {
+ // Call JNI_OnLoad. We have to override the current class
+ // loader, which will always be "null" since the stuff at the
+ // top of the stack is around Runtime.loadLibrary(). (See
+ // the comments in the JNI FindClass function.)
+ ScopedLocalRef<jobject> old_class_loader(env, env->NewLocalRef(self->GetClassLoaderOverride()));
+ self->SetClassLoaderOverride(class_loader);
+
+ VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
+ typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
+ JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
+ int version = (*jni_on_load)(this, nullptr);
+
+ self->SetClassLoaderOverride(old_class_loader.get());
+
+ if (version == JNI_ERR) {
+ StringAppendF(error_msg, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
+ } else if (IsBadJniVersion(version)) {
+ StringAppendF(error_msg, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
+ path.c_str(), version);
+ // It's unwise to call dlclose() here, but we can mark it
+ // as bad and ensure that future load attempts will fail.
+ // We don't know how far JNI_OnLoad got, so there could
+ // be some partially-initialized stuff accessible through
+ // newly-registered native method calls. We could try to
+ // unregister them, but that doesn't seem worthwhile.
+ } else {
+ was_successful = true;
+ }
+ VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
+ << " from JNI_OnLoad in \"" << path << "\"]";
+ }
+
+ library->SetResult(was_successful);
+ return was_successful;
+}
+
+void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
+ CHECK(m->IsNative());
+ mirror::Class* c = m->GetDeclaringClass();
+ // If this is a static method, it could be called before the class has been initialized.
+ CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
+ std::string detail;
+ void* native_method;
+ Thread* self = Thread::Current();
+ {
+ MutexLock mu(self, *Locks::jni_libraries_lock_);
+ native_method = libraries_->FindNativeMethod(m, detail);
+ }
+ // Throwing can cause libraries_lock to be reacquired.
+ if (native_method == nullptr) {
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
+ }
+ return native_method;
+}
+
+void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
+ MutexLock mu(Thread::Current(), weak_globals_lock_);
+ for (mirror::Object** entry : weak_globals_) {
+ // Since this is called by the GC, we don't need a read barrier.
+ mirror::Object* obj = *entry;
+ mirror::Object* new_obj = callback(obj, arg);
+ if (new_obj == nullptr) {
+ new_obj = kClearedJniWeakGlobal;
+ }
+ *entry = new_obj;
+ }
+}
+
+void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
+ Thread* self = Thread::Current();
+ {
+ ReaderMutexLock mu(self, globals_lock_);
+ globals_.VisitRoots(callback, arg, 0, kRootJNIGlobal);
+ }
+ {
+ MutexLock mu(self, pins_lock_);
+ pin_table_.VisitRoots(callback, arg, 0, kRootVMInternal);
+ }
+ // The weak_globals table is visited by the GC itself (because it mutates the table).
+}
+
+// JNI Invocation interface.
+
+extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
+ const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
+ if (IsBadJniVersion(args->version)) {
+ LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
+ return JNI_EVERSION;
+ }
+ RuntimeOptions options;
+ for (int i = 0; i < args->nOptions; ++i) {
+ JavaVMOption* option = &args->options[i];
+ options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
+ }
+ bool ignore_unrecognized = args->ignoreUnrecognized;
+ if (!Runtime::Create(options, ignore_unrecognized)) {
+ return JNI_ERR;
+ }
+ Runtime* runtime = Runtime::Current();
+ bool started = runtime->Start();
+ if (!started) {
+ delete Thread::Current()->GetJniEnv();
+ delete runtime->GetJavaVM();
+ LOG(WARNING) << "CreateJavaVM failed";
+ return JNI_ERR;
+ }
+ *p_env = Thread::Current()->GetJniEnv();
+ *p_vm = runtime->GetJavaVM();
+ return JNI_OK;
+}
+
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+ Runtime* runtime = Runtime::Current();
+ if (runtime == nullptr) {
+ *vm_count = 0;
+ } else {
+ *vm_count = 1;
+ vms[0] = runtime->GetJavaVM();
+ }
+ return JNI_OK;
+}
+
+// Historically unsupported.
+extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
+ return JNI_ERR;
+}
+
+} // namespace art
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
new file mode 100644
index 0000000000..da0b8e3257
--- /dev/null
+++ b/runtime/java_vm_ext.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JAVA_VM_EXT_H_
+#define ART_RUNTIME_JAVA_VM_EXT_H_
+
+#include "jni.h"
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "reference_table.h"
+
+namespace art {
+
+namespace mirror {
+ class ArtMethod;
+ class Array;
+} // namespace mirror
+
+class Libraries;
+class ParsedOptions;
+class Runtime;
+
+class JavaVMExt : public JavaVM {
+ public:
+ JavaVMExt(Runtime* runtime, ParsedOptions* options);
+ ~JavaVMExt();
+
+ bool ForceCopy() const {
+ return force_copy_;
+ }
+
+ bool IsCheckJniEnabled() const {
+ return check_jni_;
+ }
+
+ bool IsTracingEnabled() const {
+ return tracing_enabled_;
+ }
+
+ Runtime* GetRuntime() const {
+ return runtime_;
+ }
+
+ void SetCheckJniAbortHook(void (*hook)(void*, const std::string&), void* data) {
+ check_jni_abort_hook_ = hook;
+ check_jni_abort_hook_data_ = data;
+ }
+
+ // Aborts execution unless there is an abort handler installed in which case it will return. Its
+ // therefore important that callers return after aborting as otherwise code following the abort
+ // will be executed in the abort handler case.
+ void JniAbort(const char* jni_function_name, const char* msg);
+
+ void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap);
+
+ void JniAbortF(const char* jni_function_name, const char* fmt, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+
+ // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
+ // when a native method that matches the -Xjnitrace argument calls a JNI function
+ // such as NewByteArray.
+ // If -verbose:third-party-jni is on, we want to log any JNI function calls
+ // made by a third-party native method.
+ bool ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ /**
+ * Loads the given shared library. 'path' is an absolute pathname.
+ *
+ * Returns 'true' on success. On failure, sets 'detail' to a
+ * human-readable description of the error.
+ */
+ bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject javaLoader,
+ std::string* error_msg);
+
+ /**
+ * Returns a pointer to the code for the native method 'm', found
+ * using dlsym(3) on every native library that's been loaded so far.
+ */
+ void* FindCodeForNativeMethod(mirror::ArtMethod* m)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DumpForSigQuit(std::ostream& os)
+ LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_, pins_lock_);
+
+ void DumpReferenceTables(std::ostream& os)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ bool SetCheckJniEnabled(bool enabled);
+
+ void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ jobject AddGlobalRef(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DeleteGlobalRef(Thread* self, jobject obj);
+
+ void DeleteWeakGlobalRef(Thread* self, jweak obj);
+
+ void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void PinPrimitiveArray(Thread* self, mirror::Array* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(pins_lock_);
+
+ void UnpinPrimitiveArray(Thread* self, mirror::Array* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ LOCKS_EXCLUDED(pins_lock_);
+
+ const JNIInvokeInterface* GetUncheckedFunctions() const {
+ return unchecked_functions_;
+ }
+
+ private:
+ Runtime* const runtime_;
+
+ // Used for testing. By default, we'll LOG(FATAL) the reason.
+ void (*check_jni_abort_hook_)(void* data, const std::string& reason);
+ void* check_jni_abort_hook_data_;
+
+ // Extra checking.
+ bool check_jni_;
+ bool force_copy_;
+ const bool tracing_enabled_;
+
+ // Extra diagnostics.
+ const std::string trace_;
+
+ // Used to hold references to pinned primitive arrays.
+ Mutex pins_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ ReferenceTable pin_table_ GUARDED_BY(pins_lock_);
+
+ // JNI global references.
+ ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
+ IndirectReferenceTable globals_;
+
+ std::unique_ptr<Libraries> libraries_ GUARDED_BY(Locks::jni_libraries_lock_);
+
+ // Used by -Xcheck:jni.
+ const JNIInvokeInterface* const unchecked_functions_;
+
+ // JNI weak global references.
+ Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ // Since weak_globals_ contain weak roots, be careful not to
+ // directly access the object references in it. Use Get() with the
+ // read barrier enabled.
+ IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
+ bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+ ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JAVA_VM_EXT_H_
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index 325b089a48..8fd07cc11b 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -339,7 +339,7 @@ struct JdwpState {
ConditionVariable attach_cond_ GUARDED_BY(attach_lock_);
// Time of last debugger activity, in milliseconds.
- int64_t last_activity_time_ms_;
+ Atomic<int64_t> last_activity_time_ms_;
// Global counters and a mutex to protect them.
AtomicInteger request_serial_;
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 05bfe0ddf9..b9379f5e7f 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1671,7 +1671,7 @@ size_t JdwpState::ProcessRequest(Request& request, ExpandBuf* pReply) {
* so waitForDebugger() doesn't return if we stall for a bit here.
*/
Dbg::GoActive();
- QuasiAtomic::Write64(&last_activity_time_ms_, 0);
+ last_activity_time_ms_.StoreSequentiallyConsistent(0);
}
/*
@@ -1751,7 +1751,7 @@ size_t JdwpState::ProcessRequest(Request& request, ExpandBuf* pReply) {
* the initial setup. Only update if this is a non-DDMS packet.
*/
if (request.GetCommandSet() != kJDWPDdmCmdSet) {
- QuasiAtomic::Write64(&last_activity_time_ms_, MilliTime());
+ last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime());
}
/* tell the VM that GC is okay again */
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 64e9f370aa..7795b7c5df 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -577,7 +577,7 @@ int64_t JdwpState::LastDebuggerActivity() {
return -1;
}
- int64_t last = QuasiAtomic::Read64(&last_activity_time_ms_);
+ int64_t last = last_activity_time_ms_.LoadSequentiallyConsistent();
/* initializing or in the middle of something? */
if (last == 0) {
diff --git a/runtime/jni_internal-inl.h b/runtime/jni_env_ext-inl.h
index 6cf9a61896..dc6a3e8f62 100644
--- a/runtime/jni_internal-inl.h
+++ b/runtime/jni_env_ext-inl.h
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_JNI_INTERNAL_INL_H_
-#define ART_RUNTIME_JNI_INTERNAL_INL_H_
+#ifndef ART_RUNTIME_JNI_ENV_EXT_INL_H_
+#define ART_RUNTIME_JNI_ENV_EXT_INL_H_
-#include "jni_internal.h"
+#include "jni_env_ext.h"
#include "utils.h"
@@ -44,4 +44,4 @@ inline T JNIEnvExt::AddLocalReference(mirror::Object* obj) {
} // namespace art
-#endif // ART_RUNTIME_JNI_INTERNAL_INL_H_
+#endif // ART_RUNTIME_JNI_ENV_EXT_INL_H_
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
new file mode 100644
index 0000000000..180e3d7865
--- /dev/null
+++ b/runtime/jni_env_ext.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_env_ext.h"
+
+#include "check_jni.h"
+#include "indirect_reference_table.h"
+#include "java_vm_ext.h"
+#include "jni_internal.h"
+
+namespace art {
+
+static constexpr size_t kMonitorsInitial = 32; // Arbitrary.
+static constexpr size_t kMonitorsMax = 4096; // Arbitrary sanity check.
+
+static constexpr size_t kLocalsInitial = 64; // Arbitrary.
+
+JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
+ : self(self),
+ vm(vm),
+ local_ref_cookie(IRT_FIRST_SEGMENT),
+ locals(kLocalsInitial, kLocalsMax, kLocal),
+ check_jni(false),
+ critical(0),
+ monitors("monitors", kMonitorsInitial, kMonitorsMax) {
+ functions = unchecked_functions = GetJniNativeInterface();
+ if (vm->IsCheckJniEnabled()) {
+ SetCheckJniEnabled(true);
+ }
+}
+
+JNIEnvExt::~JNIEnvExt() {
+}
+
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
+}
+
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (obj != nullptr) {
+ locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
+ }
+}
+
+void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
+ check_jni = enabled;
+ functions = enabled ? GetCheckJniNativeInterface() : GetJniNativeInterface();
+}
+
+void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
+ locals.Dump(os);
+ monitors.Dump(os);
+}
+
+void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+ // TODO: take 'capacity' into account.
+ stacked_local_ref_cookies.push_back(local_ref_cookie);
+ local_ref_cookie = locals.GetSegmentState();
+}
+
+void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ locals.SetSegmentState(local_ref_cookie);
+ local_ref_cookie = stacked_local_ref_cookies.back();
+ stacked_local_ref_cookies.pop_back();
+}
+
+Offset JNIEnvExt::SegmentStateOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
+ IndirectReferenceTable::SegmentStateOffset().Int32Value());
+}
+
+} // namespace art
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
new file mode 100644
index 0000000000..af87cb4226
--- /dev/null
+++ b/runtime/jni_env_ext.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_ENV_EXT_H_
+#define ART_RUNTIME_JNI_ENV_EXT_H_
+
+#include <jni.h>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "object_callbacks.h"
+#include "reference_table.h"
+
+namespace art {
+
+class JavaVMExt;
+
+// Maximum number of local references in the indirect reference table. The value is arbitrary but
+// low enough that it forces sanity checks.
+static constexpr size_t kLocalsMax = 512;
+
+struct JNIEnvExt : public JNIEnv {
+ JNIEnvExt(Thread* self, JavaVMExt* vm);
+ ~JNIEnvExt();
+
+ void DumpReferenceTables(std::ostream& os)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetCheckJniEnabled(bool enabled);
+
+ void PushFrame(int capacity);
+ void PopFrame();
+
+ template<typename T>
+ T AddLocalReference(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ static Offset SegmentStateOffset();
+
+ static Offset LocalRefCookieOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
+ }
+
+ static Offset SelfOffset() {
+ return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
+ }
+
+ jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Thread* const self;
+ JavaVMExt* const vm;
+
+ // Cookie used when using the local indirect reference table.
+ uint32_t local_ref_cookie;
+
+ // JNI local references.
+ IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
+
+ // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
+ // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
+ // to a native method.
+ std::vector<uint32_t> stacked_local_ref_cookies;
+
+ // Frequently-accessed fields cached from JavaVM.
+ bool check_jni;
+
+ // How many nested "critical" JNI calls are we in?
+ int critical;
+
+ // Entered JNI monitors, for bulk exit on thread detach.
+ ReferenceTable monitors;
+
+ // Used by -Xcheck:jni.
+ const JNINativeInterface* unchecked_functions;
+};
+
+// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
+// compiler.
+class ScopedJniEnvLocalRefState {
+ public:
+ explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
+ saved_local_ref_cookie_ = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ }
+
+ ~ScopedJniEnvLocalRefState() {
+ env_->locals.SetSegmentState(env_->local_ref_cookie);
+ env_->local_ref_cookie = saved_local_ref_cookie_;
+ }
+
+ private:
+ JNIEnvExt* const env_;
+ uint32_t saved_local_ref_cookie_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_JNI_ENV_EXT_H_
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index f9c7ec692c..d5e92a40cb 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -29,10 +29,12 @@
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
+#include "gc_root.h"
#include "gc/accounting/card_table-inl.h"
#include "indirect_reference_table-inl.h"
#include "interpreter/interpreter.h"
-#include "jni.h"
+#include "jni_env_ext.h"
+#include "java_vm_ext.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -41,6 +43,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
+#include "native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
@@ -53,31 +56,6 @@
namespace art {
-static const size_t kMonitorsInitial = 32; // Arbitrary.
-static const size_t kMonitorsMax = 4096; // Arbitrary sanity check.
-
-static const size_t kLocalsInitial = 64; // Arbitrary.
-static const size_t kLocalsMax = 512; // Arbitrary sanity check.
-
-static const size_t kPinTableInitial = 16; // Arbitrary.
-static const size_t kPinTableMax = 1024; // Arbitrary sanity check.
-
-static size_t gGlobalsInitial = 512; // Arbitrary.
-static size_t gGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static const size_t kWeakGlobalsInitial = 16; // Arbitrary.
-static const size_t kWeakGlobalsMax = 51200; // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return soa.Vm()->AddWeakGlobalReference(soa.Self(), obj);
-}
-
-static bool IsBadJniVersion(int version) {
- // We don't support JNI_VERSION_1_1. These are the only other valid versions.
- return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
-}
-
// Section 12.3.2 of the JNI spec describes JNI class descriptors. They're
// separated with slashes but aren't wrapped with "L;" like regular descriptors
// (i.e. "a/b/C" rather than "La/b/C;"). Arrays of reference types are an
@@ -167,7 +145,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
mirror::ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
- return soa.Self()->GetClassLoaderOverride();
+ return soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
}
// If we have a method, use its ClassLoader for context.
if (method != nullptr) {
@@ -180,7 +158,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
return class_loader;
}
// See if the override ClassLoader is set for gtests.
- class_loader = soa.Self()->GetClassLoaderOverride();
+ class_loader = soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
if (class_loader != nullptr) {
// If so, CommonCompilerTest should have set UseCompileTimeClassPath.
CHECK(Runtime::Current()->UseCompileTimeClassPath());
@@ -238,20 +216,6 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
return soa.EncodeField(field);
}
-static void PinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- JavaVMExt* vm = soa.Vm();
- MutexLock mu(soa.Self(), vm->pins_lock);
- vm->pin_table.Add(array);
-}
-
-static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- JavaVMExt* vm = soa.Vm();
- MutexLock mu(soa.Self(), vm->pins_lock);
- vm->pin_table.Remove(array);
-}
-
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
jsize length, const char* identifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -314,224 +278,10 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj
return JNI_OK;
}
-static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
- if (vm == nullptr || p_env == nullptr) {
- return JNI_ERR;
- }
-
- // Return immediately if we're already attached.
- Thread* self = Thread::Current();
- if (self != nullptr) {
- *p_env = self->GetJniEnv();
- return JNI_OK;
- }
-
- Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->runtime;
-
- // No threads allowed in zygote mode.
- if (runtime->IsZygote()) {
- LOG(ERROR) << "Attempt to attach a thread in the zygote";
- return JNI_ERR;
- }
-
- JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
- const char* thread_name = nullptr;
- jobject thread_group = nullptr;
- if (args != nullptr) {
- if (IsBadJniVersion(args->version)) {
- LOG(ERROR) << "Bad JNI version passed to "
- << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
- << args->version;
- return JNI_EVERSION;
- }
- thread_name = args->name;
- thread_group = args->group;
- }
-
- if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
- *p_env = nullptr;
- return JNI_ERR;
- } else {
- *p_env = Thread::Current()->GetJniEnv();
- return JNI_OK;
- }
+static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) {
+ return reinterpret_cast<JNIEnvExt*>(env)->vm;
}
-class SharedLibrary {
- public:
- SharedLibrary(const std::string& path, void* handle, mirror::Object* class_loader)
- : path_(path),
- handle_(handle),
- class_loader_(class_loader),
- jni_on_load_lock_("JNI_OnLoad lock"),
- jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
- jni_on_load_thread_id_(Thread::Current()->GetThreadId()),
- jni_on_load_result_(kPending) {
- }
-
- mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object** root = &class_loader_;
- return ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(root);
- }
-
- std::string GetPath() {
- return path_;
- }
-
- /*
- * Check the result of an earlier call to JNI_OnLoad on this library.
- * If the call has not yet finished in another thread, wait for it.
- */
- bool CheckOnLoadResult()
- LOCKS_EXCLUDED(jni_on_load_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
- bool okay;
- {
- MutexLock mu(self, jni_on_load_lock_);
-
- if (jni_on_load_thread_id_ == self->GetThreadId()) {
- // Check this so we don't end up waiting for ourselves. We need to return "true" so the
- // caller can continue.
- LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
- okay = true;
- } else {
- while (jni_on_load_result_ == kPending) {
- VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
- jni_on_load_cond_.Wait(self);
- }
-
- okay = (jni_on_load_result_ == kOkay);
- VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
- << (okay ? "succeeded" : "failed") << "]";
- }
- }
- self->TransitionFromSuspendedToRunnable();
- return okay;
- }
-
- void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
- Thread* self = Thread::Current();
- MutexLock mu(self, jni_on_load_lock_);
-
- jni_on_load_result_ = result ? kOkay : kFailed;
- jni_on_load_thread_id_ = 0;
-
- // Broadcast a wakeup to anybody sleeping on the condition variable.
- jni_on_load_cond_.Broadcast(self);
- }
-
- void* FindSymbol(const std::string& symbol_name) {
- return dlsym(handle_, symbol_name.c_str());
- }
-
- void VisitRoots(RootCallback* visitor, void* arg) {
- if (class_loader_ != nullptr) {
- visitor(&class_loader_, arg, 0, kRootVMInternal);
- }
- }
-
- private:
- enum JNI_OnLoadState {
- kPending,
- kFailed,
- kOkay,
- };
-
- // Path to library "/system/lib/libjni.so".
- std::string path_;
-
- // The void* returned by dlopen(3).
- void* handle_;
-
- // The ClassLoader this library is associated with.
- mirror::Object* class_loader_;
-
- // Guards remaining items.
- Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Wait for JNI_OnLoad in other thread.
- ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
- // Recursive invocation guard.
- uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
- // Result of earlier JNI_OnLoad call.
- JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
-};
-
-// This exists mainly to keep implementation details out of the header file.
-class Libraries {
- public:
- Libraries() {
- }
-
- ~Libraries() {
- STLDeleteValues(&libraries_);
- }
-
- void Dump(std::ostream& os) const {
- bool first = true;
- for (const auto& library : libraries_) {
- if (!first) {
- os << ' ';
- }
- first = false;
- os << library.first;
- }
- }
-
- size_t size() const {
- return libraries_.size();
- }
-
- SharedLibrary* Get(const std::string& path) {
- auto it = libraries_.find(path);
- return (it == libraries_.end()) ? nullptr : it->second;
- }
-
- void Put(const std::string& path, SharedLibrary* library) {
- libraries_.Put(path, library);
- }
-
- // See section 11.3 "Linking Native Methods" of the JNI spec.
- void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string jni_short_name(JniShortName(m));
- std::string jni_long_name(JniLongName(m));
- const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
- for (const auto& lib : libraries_) {
- SharedLibrary* library = lib.second;
- if (library->GetClassLoader() != declaring_class_loader) {
- // We only search libraries loaded by the appropriate ClassLoader.
- continue;
- }
- // Try the short name then the long name...
- void* fn = library->FindSymbol(jni_short_name);
- if (fn == nullptr) {
- fn = library->FindSymbol(jni_long_name);
- }
- if (fn != nullptr) {
- VLOG(jni) << "[Found native code for " << PrettyMethod(m)
- << " in \"" << library->GetPath() << "\"]";
- return fn;
- }
- }
- detail += "No implementation found for ";
- detail += PrettyMethod(m);
- detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
- LOG(ERROR) << detail;
- return nullptr;
- }
-
- void VisitRoots(RootCallback* callback, void* arg) {
- for (auto& lib_pair : libraries_) {
- lib_pair.second->VisitRoots(callback, arg);
- }
- }
-
- private:
- SafeMap<std::string, SharedLibrary*> libraries_;
-};
-
#define CHECK_NON_NULL_ARGUMENT(value) \
CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, nullptr)
@@ -546,13 +296,13 @@ class Libraries {
#define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value, return_val) \
if (UNLIKELY(value == nullptr)) { \
- JniAbortF(name, #value " == null"); \
+ JavaVmExtFromEnv(env)->JniAbortF(name, #value " == null"); \
return return_val; \
}
#define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
if (UNLIKELY(length != 0 && value == nullptr)) { \
- JniAbortF(__FUNCTION__, #value " == null"); \
+ JavaVmExtFromEnv(env)->JniAbortF(__FUNCTION__, #value " == null"); \
return; \
}
@@ -644,13 +394,15 @@ class JNI {
return soa.AddLocalReference<jclass>(c->GetSuperClass());
}
+ // Note: java_class1 should be safely castable to java_class2, and
+ // not the other way around.
static jboolean IsAssignableFrom(JNIEnv* env, jclass java_class1, jclass java_class2) {
CHECK_NON_NULL_ARGUMENT_RETURN(java_class1, JNI_FALSE);
CHECK_NON_NULL_ARGUMENT_RETURN(java_class2, JNI_FALSE);
ScopedObjectAccess soa(env);
mirror::Class* c1 = soa.Decode<mirror::Class*>(java_class1);
mirror::Class* c2 = soa.Decode<mirror::Class*>(java_class2);
- return c1->IsAssignableFrom(c2) ? JNI_TRUE : JNI_FALSE;
+ return c2->IsAssignableFrom(c1) ? JNI_TRUE : JNI_FALSE;
}
static jboolean IsInstanceOf(JNIEnv* env, jobject jobj, jclass java_class) {
@@ -751,10 +503,10 @@ class JNI {
static jint PushLocalFrame(JNIEnv* env, jint capacity) {
// TODO: SOA may not be necessary but I do it to please lock annotations.
ScopedObjectAccess soa(env);
- if (EnsureLocalCapacity(soa, capacity, "PushLocalFrame") != JNI_OK) {
+ if (EnsureLocalCapacityInternal(soa, capacity, "PushLocalFrame") != JNI_OK) {
return JNI_ERR;
}
- static_cast<JNIEnvExt*>(env)->PushFrame(capacity);
+ down_cast<JNIEnvExt*>(env)->PushFrame(capacity);
return JNI_OK;
}
@@ -768,48 +520,31 @@ class JNI {
static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity) {
// TODO: SOA may not be necessary but I do it to please lock annotations.
ScopedObjectAccess soa(env);
- return EnsureLocalCapacity(soa, desired_capacity, "EnsureLocalCapacity");
+ return EnsureLocalCapacityInternal(soa, desired_capacity, "EnsureLocalCapacity");
}
static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
ScopedObjectAccess soa(env);
mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
- // Check for null after decoding the object to handle cleared weak globals.
- if (decoded_obj == nullptr) {
- return nullptr;
- }
- JavaVMExt* vm = soa.Vm();
- IndirectReferenceTable& globals = vm->globals;
- WriterMutexLock mu(soa.Self(), vm->globals_lock);
- IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, decoded_obj);
- return reinterpret_cast<jobject>(ref);
+ return soa.Vm()->AddGlobalRef(soa.Self(), decoded_obj);
}
static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
- if (obj == nullptr) {
- return;
- }
- JavaVMExt* vm = reinterpret_cast<JNIEnvExt*>(env)->vm;
- IndirectReferenceTable& globals = vm->globals;
- Thread* self = reinterpret_cast<JNIEnvExt*>(env)->self;
- WriterMutexLock mu(self, vm->globals_lock);
-
- if (!globals.Remove(IRT_FIRST_SEGMENT, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
- << "failed to find entry";
- }
+ JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+ Thread* self = down_cast<JNIEnvExt*>(env)->self;
+ vm->DeleteGlobalRef(self, obj);
}
static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
ScopedObjectAccess soa(env);
- return AddWeakGlobalReference(soa, soa.Decode<mirror::Object*>(obj));
+ mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
+ return soa.Vm()->AddWeakGlobalRef(soa.Self(), decoded_obj);
}
static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
- if (obj != nullptr) {
- ScopedObjectAccess soa(env);
- soa.Vm()->DeleteWeakGlobalRef(soa.Self(), obj);
- }
+ JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+ Thread* self = down_cast<JNIEnvExt*>(env)->self;
+ vm->DeleteWeakGlobalRef(self, obj);
}
static jobject NewLocalRef(JNIEnv* env, jobject obj) {
@@ -826,7 +561,6 @@ class JNI {
if (obj == nullptr) {
return;
}
- ScopedObjectAccess soa(env);
IndirectReferenceTable& locals = reinterpret_cast<JNIEnvExt*>(env)->locals;
uint32_t cookie = reinterpret_cast<JNIEnvExt*>(env)->local_ref_cookie;
@@ -1892,11 +1626,11 @@ class JNI {
static jstring NewString(JNIEnv* env, const jchar* chars, jsize char_count) {
if (UNLIKELY(char_count < 0)) {
- JniAbortF("NewString", "char_count < 0: %d", char_count);
+ JavaVmExtFromEnv(env)->JniAbortF("NewString", "char_count < 0: %d", char_count);
return nullptr;
}
if (UNLIKELY(chars == nullptr && char_count > 0)) {
- JniAbortF("NewString", "chars == null && char_count > 0");
+ JavaVmExtFromEnv(env)->JniAbortF("NewString", "chars == null && char_count > 0");
return nullptr;
}
ScopedObjectAccess soa(env);
@@ -1958,7 +1692,7 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* chars = s->GetCharArray();
- PinPrimitiveArray(soa, chars);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(chars)) {
if (is_copy != nullptr) {
@@ -1987,7 +1721,7 @@ class JNI {
if (chars != (s_chars->GetData() + s->GetOffset())) {
delete[] chars;
}
- UnpinPrimitiveArray(soa, s->GetCharArray());
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(), s->GetCharArray());
}
static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) {
@@ -1996,7 +1730,7 @@ class JNI {
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* chars = s->GetCharArray();
int32_t offset = s->GetOffset();
- PinPrimitiveArray(soa, chars);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(chars)) {
StackHandleScope<1> hs(soa.Self());
@@ -2012,7 +1746,8 @@ class JNI {
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
- UnpinPrimitiveArray(soa, soa.Decode<mirror::String*>(java_string)->GetCharArray());
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(),
+ soa.Decode<mirror::String*>(java_string)->GetCharArray());
gc::Heap* heap = Runtime::Current()->GetHeap();
mirror::String* s = soa.Decode<mirror::String*>(java_string);
mirror::CharArray* s_chars = s->GetCharArray();
@@ -2048,7 +1783,8 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(java_array);
if (UNLIKELY(!obj->IsArrayInstance())) {
- JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+ soa.Vm()->JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+ return 0;
}
mirror::Array* array = obj->AsArray();
return array->GetLength();
@@ -2103,7 +1839,7 @@ class JNI {
static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_jclass,
jobject initial_element) {
if (UNLIKELY(length < 0)) {
- JniAbortF("NewObjectArray", "negative array length: %d", length);
+ JavaVmExtFromEnv(env)->JniAbortF("NewObjectArray", "negative array length: %d", length);
return nullptr;
}
CHECK_NON_NULL_ARGUMENT(element_jclass);
@@ -2114,8 +1850,8 @@ class JNI {
{
mirror::Class* element_class = soa.Decode<mirror::Class*>(element_jclass);
if (UNLIKELY(element_class->IsPrimitive())) {
- JniAbortF("NewObjectArray", "not an object type: %s",
- PrettyDescriptor(element_class).c_str());
+ soa.Vm()->JniAbortF("NewObjectArray", "not an object type: %s",
+ PrettyDescriptor(element_class).c_str());
return nullptr;
}
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -2133,10 +1869,11 @@ class JNI {
if (initial_object != nullptr) {
mirror::Class* element_class = result->GetClass()->GetComponentType();
if (UNLIKELY(!element_class->IsAssignableFrom(initial_object->GetClass()))) {
- JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with element "
- "type of '%s'", PrettyDescriptor(initial_object->GetClass()).c_str(),
- PrettyDescriptor(element_class).c_str());
-
+ soa.Vm()->JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with "
+ "element type of '%s'",
+ PrettyDescriptor(initial_object->GetClass()).c_str(),
+ PrettyDescriptor(element_class).c_str());
+ return nullptr;
} else {
for (jsize i = 0; i < length; ++i) {
result->SetWithoutChecks<false>(i, initial_object);
@@ -2156,8 +1893,8 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
- JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
return nullptr;
}
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -2166,7 +1903,7 @@ class JNI {
// Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
array = soa.Decode<mirror::Array*>(java_array);
}
- PinPrimitiveArray(soa, array);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), array);
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
@@ -2179,8 +1916,8 @@ class JNI {
ScopedObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
- JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
+ PrettyDescriptor(array->GetClass()).c_str());
return;
}
const size_t component_size = array->GetClass()->GetComponentSize();
@@ -2352,8 +2089,9 @@ class JNI {
static jint RegisterNativeMethods(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
jint method_count, bool return_errors) {
if (UNLIKELY(method_count < 0)) {
- JniAbortF("RegisterNatives", "negative method count: %d", method_count);
- return JNI_ERR; // Not reached.
+ JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d",
+ method_count);
+ return JNI_ERR; // Not reached except in unit tests.
}
CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR);
ScopedObjectAccess soa(env);
@@ -2477,17 +2215,21 @@ class JNI {
static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
if (capacity < 0) {
- JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64,
+ capacity);
return nullptr;
}
if (address == nullptr && capacity != 0) {
- JniAbortF("NewDirectByteBuffer", "non-zero capacity for nullptr pointer: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+ "non-zero capacity for nullptr pointer: %" PRId64, capacity);
return nullptr;
}
// At the moment, the capacity of DirectByteBuffer is limited to a signed int.
if (capacity > INT_MAX) {
- JniAbortF("NewDirectByteBuffer", "buffer capacity greater than maximum jint: %" PRId64, capacity);
+ JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+ "buffer capacity greater than maximum jint: %" PRId64,
+ capacity);
return nullptr;
}
jlong address_arg = reinterpret_cast<jlong>(address);
@@ -2541,8 +2283,9 @@ class JNI {
}
private:
- static jint EnsureLocalCapacity(ScopedObjectAccess& soa, jint desired_capacity,
- const char* caller) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
+ const char* caller)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO: we should try to expand the table if necessary.
if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2559,11 +2302,11 @@ class JNI {
template<typename JniT, typename ArtT>
static JniT NewPrimitiveArray(JNIEnv* env, jsize length) {
+ ScopedObjectAccess soa(env);
if (UNLIKELY(length < 0)) {
- JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
+ soa.Vm()->JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
return nullptr;
}
- ScopedObjectAccess soa(env);
ArtT* result = ArtT::Alloc(soa.Self(), length);
return soa.AddLocalReference<JniT>(result);
}
@@ -2574,9 +2317,11 @@ class JNI {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
- JniAbortF(fn_name, "attempt to %s %s primitive array elements with an object of type %s",
- operation, PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
- PrettyDescriptor(array->GetClass()).c_str());
+ soa.Vm()->JniAbortF(fn_name,
+ "attempt to %s %s primitive array elements with an object of type %s",
+ operation,
+ PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
+ PrettyDescriptor(array->GetClass()).c_str());
return nullptr;
}
DCHECK_EQ(sizeof(ElementT), array->GetClass()->GetComponentSize());
@@ -2593,7 +2338,7 @@ class JNI {
if (UNLIKELY(array == nullptr)) {
return nullptr;
}
- PinPrimitiveArray(soa, array);
+ soa.Vm()->PinPrimitiveArray(soa.Self(), array);
// Only make a copy if necessary.
if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
if (is_copy != nullptr) {
@@ -2639,8 +2384,9 @@ class JNI {
// heap address. TODO: This might be slow to check, may be worth keeping track of which
// copies we make?
if (heap->IsNonDiscontinuousSpaceHeapAddress(reinterpret_cast<mirror::Object*>(elements))) {
- JniAbortF("ReleaseArrayElements", "invalid element pointer %p, array elements are %p",
- reinterpret_cast<void*>(elements), array_data);
+ soa.Vm()->JniAbortF("ReleaseArrayElements",
+ "invalid element pointer %p, array elements are %p",
+ reinterpret_cast<void*>(elements), array_data);
return;
}
}
@@ -2655,7 +2401,7 @@ class JNI {
// Non copy to a movable object must means that we had disabled the moving GC.
heap->DecrementDisableMovingGC(soa.Self());
}
- UnpinPrimitiveArray(soa, array);
+ soa.Vm()->UnpinPrimitiveArray(soa.Self(), array);
}
}
@@ -2936,472 +2682,8 @@ const JNINativeInterface gJniNativeInterface = {
JNI::GetObjectRefType,
};
-JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
- : self(self),
- vm(vm),
- local_ref_cookie(IRT_FIRST_SEGMENT),
- locals(kLocalsInitial, kLocalsMax, kLocal),
- check_jni(false),
- critical(0),
- monitors("monitors", kMonitorsInitial, kMonitorsMax) {
- functions = unchecked_functions = &gJniNativeInterface;
- if (vm->check_jni) {
- SetCheckJniEnabled(true);
- }
-}
-
-JNIEnvExt::~JNIEnvExt() {
-}
-
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj == nullptr) {
- return nullptr;
- }
- return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
-}
-
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (obj != nullptr) {
- locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
- }
-}
-void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
- check_jni = enabled;
- functions = enabled ? GetCheckJniNativeInterface() : &gJniNativeInterface;
-}
-
-void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
- locals.Dump(os);
- monitors.Dump(os);
-}
-
-void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
- // TODO: take 'capacity' into account.
- stacked_local_ref_cookies.push_back(local_ref_cookie);
- local_ref_cookie = locals.GetSegmentState();
-}
-
-void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- locals.SetSegmentState(local_ref_cookie);
- local_ref_cookie = stacked_local_ref_cookies.back();
- stacked_local_ref_cookies.pop_back();
-}
-
-Offset JNIEnvExt::SegmentStateOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
- IndirectReferenceTable::SegmentStateOffset().Int32Value());
-}
-
-// JNI Invocation interface.
-
-extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
- const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
- if (IsBadJniVersion(args->version)) {
- LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
- return JNI_EVERSION;
- }
- RuntimeOptions options;
- for (int i = 0; i < args->nOptions; ++i) {
- JavaVMOption* option = &args->options[i];
- options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
- }
- bool ignore_unrecognized = args->ignoreUnrecognized;
- if (!Runtime::Create(options, ignore_unrecognized)) {
- return JNI_ERR;
- }
- Runtime* runtime = Runtime::Current();
- bool started = runtime->Start();
- if (!started) {
- delete Thread::Current()->GetJniEnv();
- delete runtime->GetJavaVM();
- LOG(WARNING) << "CreateJavaVM failed";
- return JNI_ERR;
- }
- *p_env = Thread::Current()->GetJniEnv();
- *p_vm = runtime->GetJavaVM();
- return JNI_OK;
-}
-
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
- Runtime* runtime = Runtime::Current();
- if (runtime == nullptr) {
- *vm_count = 0;
- } else {
- *vm_count = 1;
- vms[0] = runtime->GetJavaVM();
- }
- return JNI_OK;
-}
-
-// Historically unsupported.
-extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
- return JNI_ERR;
-}
-
-class JII {
- public:
- static jint DestroyJavaVM(JavaVM* vm) {
- if (vm == nullptr) {
- return JNI_ERR;
- }
- JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
- delete raw_vm->runtime;
- return JNI_OK;
- }
-
- static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- return JII_AttachCurrentThread(vm, p_env, thr_args, false);
- }
-
- static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
- return JII_AttachCurrentThread(vm, p_env, thr_args, true);
- }
-
- static jint DetachCurrentThread(JavaVM* vm) {
- if (vm == nullptr || Thread::Current() == nullptr) {
- return JNI_ERR;
- }
- JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
- Runtime* runtime = raw_vm->runtime;
- runtime->DetachCurrentThread();
- return JNI_OK;
- }
-
- static jint GetEnv(JavaVM* vm, void** env, jint version) {
- // GetEnv always returns a JNIEnv* for the most current supported JNI version,
- // and unlike other calls that take a JNI version doesn't care if you supply
- // JNI_VERSION_1_1, which we don't otherwise support.
- if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
- LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
- return JNI_EVERSION;
- }
- if (vm == nullptr || env == nullptr) {
- return JNI_ERR;
- }
- Thread* thread = Thread::Current();
- if (thread == nullptr) {
- *env = nullptr;
- return JNI_EDETACHED;
- }
- *env = thread->GetJniEnv();
- return JNI_OK;
- }
-};
-
-const JNIInvokeInterface gJniInvokeInterface = {
- nullptr, // reserved0
- nullptr, // reserved1
- nullptr, // reserved2
- JII::DestroyJavaVM,
- JII::AttachCurrentThread,
- JII::DetachCurrentThread,
- JII::GetEnv,
- JII::AttachCurrentThreadAsDaemon
-};
-
-JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
- : runtime(runtime),
- check_jni_abort_hook(nullptr),
- check_jni_abort_hook_data(nullptr),
- check_jni(false),
- force_copy(false), // TODO: add a way to enable this
- trace(options->jni_trace_),
- pins_lock("JNI pin table lock", kPinTableLock),
- pin_table("pin table", kPinTableInitial, kPinTableMax),
- globals_lock("JNI global reference table lock"),
- globals(gGlobalsInitial, gGlobalsMax, kGlobal),
- libraries_lock("JNI shared libraries map lock", kLoadLibraryLock),
- libraries(new Libraries),
- weak_globals_lock_("JNI weak global reference table lock"),
- weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
- allow_new_weak_globals_(true),
- weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
- functions = unchecked_functions = &gJniInvokeInterface;
- if (options->check_jni_) {
- SetCheckJniEnabled(true);
- }
-}
-
-JavaVMExt::~JavaVMExt() {
- delete libraries;
-}
-
-jweak JavaVMExt::AddWeakGlobalReference(Thread* self, mirror::Object* obj) {
- if (obj == nullptr) {
- return nullptr;
- }
- MutexLock mu(self, weak_globals_lock_);
- while (UNLIKELY(!allow_new_weak_globals_)) {
- weak_globals_add_condition_.WaitHoldingLocks(self);
- }
- IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
- return reinterpret_cast<jweak>(ref);
-}
-
-void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
- MutexLock mu(self, weak_globals_lock_);
- if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
- LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
- << "failed to find entry";
- }
-}
-
-void JavaVMExt::SetCheckJniEnabled(bool enabled) {
- check_jni = enabled;
- functions = enabled ? GetCheckJniInvokeInterface() : &gJniInvokeInterface;
-}
-
-void JavaVMExt::DumpForSigQuit(std::ostream& os) {
- os << "JNI: CheckJNI is " << (check_jni ? "on" : "off");
- if (force_copy) {
- os << " (with forcecopy)";
- }
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, pins_lock);
- os << "; pins=" << pin_table.Size();
- }
- {
- ReaderMutexLock mu(self, globals_lock);
- os << "; globals=" << globals.Capacity();
- }
- {
- MutexLock mu(self, weak_globals_lock_);
- if (weak_globals_.Capacity() > 0) {
- os << " (plus " << weak_globals_.Capacity() << " weak)";
- }
- }
- os << '\n';
-
- {
- MutexLock mu(self, libraries_lock);
- os << "Libraries: " << Dumpable<Libraries>(*libraries) << " (" << libraries->size() << ")\n";
- }
-}
-
-void JavaVMExt::DisallowNewWeakGlobals() {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- allow_new_weak_globals_ = false;
-}
-
-void JavaVMExt::AllowNewWeakGlobals() {
- Thread* self = Thread::Current();
- MutexLock mu(self, weak_globals_lock_);
- allow_new_weak_globals_ = true;
- weak_globals_add_condition_.Broadcast(self);
-}
-
-mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
- MutexLock mu(self, weak_globals_lock_);
- while (UNLIKELY(!allow_new_weak_globals_)) {
- weak_globals_add_condition_.WaitHoldingLocks(self);
- }
- return weak_globals_.Get(ref);
-}
-
-void JavaVMExt::DumpReferenceTables(std::ostream& os) {
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, globals_lock);
- globals.Dump(os);
- }
- {
- MutexLock mu(self, weak_globals_lock_);
- weak_globals_.Dump(os);
- }
- {
- MutexLock mu(self, pins_lock);
- pin_table.Dump(os);
- }
-}
-
-bool JavaVMExt::LoadNativeLibrary(const std::string& path,
- Handle<mirror::ClassLoader> class_loader,
- std::string* detail) {
- detail->clear();
-
- // See if we've already loaded this library. If we have, and the class loader
- // matches, return successfully without doing anything.
- // TODO: for better results we should canonicalize the pathname (or even compare
- // inodes). This implementation is fine if everybody is using System.loadLibrary.
- SharedLibrary* library;
- Thread* self = Thread::Current();
- {
- // TODO: move the locking (and more of this logic) into Libraries.
- MutexLock mu(self, libraries_lock);
- library = libraries->Get(path);
- }
- if (library != nullptr) {
- if (library->GetClassLoader() != class_loader.Get()) {
- // The library will be associated with class_loader. The JNI
- // spec says we can't load the same library into more than one
- // class loader.
- StringAppendF(detail, "Shared library \"%s\" already opened by "
- "ClassLoader %p; can't open in ClassLoader %p",
- path.c_str(), library->GetClassLoader(), class_loader.Get());
- LOG(WARNING) << detail;
- return false;
- }
- VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
- << "ClassLoader " << class_loader.Get() << "]";
- if (!library->CheckOnLoadResult()) {
- StringAppendF(detail, "JNI_OnLoad failed on a previous attempt "
- "to load \"%s\"", path.c_str());
- return false;
- }
- return true;
- }
-
- // Open the shared library. Because we're using a full path, the system
- // doesn't have to search through LD_LIBRARY_PATH. (It may do so to
- // resolve this library's dependencies though.)
-
- // Failures here are expected when java.library.path has several entries
- // and we have to hunt for the lib.
-
- // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
- // class unloading. Libraries will only be unloaded when the reference count (incremented by
- // dlopen) becomes zero from dlclose.
-
- // This can execute slowly for a large library on a busy system, so we
- // want to switch from kRunnable while it executes. This allows the GC to ignore us.
- self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
- void* handle = dlopen(path.empty() ? nullptr : path.c_str(), RTLD_LAZY);
- self->TransitionFromSuspendedToRunnable();
-
- VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
-
- if (handle == nullptr) {
- *detail = dlerror();
- LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *detail;
- return false;
- }
-
- // Create a new entry.
- // TODO: move the locking (and more of this logic) into Libraries.
- bool created_library = false;
- {
- MutexLock mu(self, libraries_lock);
- library = libraries->Get(path);
- if (library == nullptr) { // We won race to get libraries_lock
- library = new SharedLibrary(path, handle, class_loader.Get());
- libraries->Put(path, library);
- created_library = true;
- }
- }
- if (!created_library) {
- LOG(INFO) << "WOW: we lost a race to add shared library: "
- << "\"" << path << "\" ClassLoader=" << class_loader.Get();
- return library->CheckOnLoadResult();
- }
-
- VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.Get()
- << "]";
-
- bool was_successful = false;
- void* sym = dlsym(handle, "JNI_OnLoad");
- if (sym == nullptr) {
- VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
- was_successful = true;
- } else {
- // Call JNI_OnLoad. We have to override the current class
- // loader, which will always be "null" since the stuff at the
- // top of the stack is around Runtime.loadLibrary(). (See
- // the comments in the JNI FindClass function.)
- typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
- JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
- StackHandleScope<1> hs(self);
- Handle<mirror::ClassLoader> old_class_loader(hs.NewHandle(self->GetClassLoaderOverride()));
- self->SetClassLoaderOverride(class_loader.Get());
-
- int version = 0;
- {
- ScopedThreadStateChange tsc(self, kNative);
- VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
- version = (*jni_on_load)(this, nullptr);
- }
-
- self->SetClassLoaderOverride(old_class_loader.Get());
-
- if (version == JNI_ERR) {
- StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
- } else if (IsBadJniVersion(version)) {
- StringAppendF(detail, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
- path.c_str(), version);
- // It's unwise to call dlclose() here, but we can mark it
- // as bad and ensure that future load attempts will fail.
- // We don't know how far JNI_OnLoad got, so there could
- // be some partially-initialized stuff accessible through
- // newly-registered native method calls. We could try to
- // unregister them, but that doesn't seem worthwhile.
- } else {
- was_successful = true;
- }
- VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
- << " from JNI_OnLoad in \"" << path << "\"]";
- }
-
- library->SetResult(was_successful);
- return was_successful;
-}
-
-void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
- CHECK(m->IsNative());
- mirror::Class* c = m->GetDeclaringClass();
- // If this is a static method, it could be called before the class has been initialized.
- if (m->IsStatic()) {
- c = EnsureInitialized(Thread::Current(), c);
- if (c == nullptr) {
- return nullptr;
- }
- } else {
- CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
- }
- std::string detail;
- void* native_method;
- Thread* self = Thread::Current();
- {
- MutexLock mu(self, libraries_lock);
- native_method = libraries->FindNativeMethod(m, detail);
- }
- // Throwing can cause libraries_lock to be reacquired.
- if (native_method == nullptr) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
- }
- return native_method;
-}
-
-void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
- MutexLock mu(Thread::Current(), weak_globals_lock_);
- for (mirror::Object** entry : weak_globals_) {
- // Since this is called by the GC, we don't need a read barrier.
- mirror::Object* obj = *entry;
- mirror::Object* new_obj = callback(obj, arg);
- if (new_obj == nullptr) {
- new_obj = kClearedJniWeakGlobal;
- }
- *entry = new_obj;
- }
-}
-
-void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
- Thread* self = Thread::Current();
- {
- ReaderMutexLock mu(self, globals_lock);
- globals.VisitRoots(callback, arg, 0, kRootJNIGlobal);
- }
- {
- MutexLock mu(self, pins_lock);
- pin_table.VisitRoots(callback, arg, 0, kRootVMInternal);
- }
- {
- MutexLock mu(self, libraries_lock);
- // Libraries contains shared libraries which hold a pointer to a class loader.
- libraries->VisitRoots(callback, arg);
- }
- // The weak_globals table is visited by the GC itself (because it mutates the table).
+const JNINativeInterface* GetJniNativeInterface() {
+ return &gJniNativeInterface;
}
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index abb71b7fbf..48b10f5825 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -17,16 +17,8 @@
#ifndef ART_RUNTIME_JNI_INTERNAL_H_
#define ART_RUNTIME_JNI_INTERNAL_H_
-#include "jni.h"
-
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "indirect_reference_table.h"
-#include "object_callbacks.h"
-#include "reference_table.h"
-
+#include <jni.h>
#include <iosfwd>
-#include <string>
#ifndef NATIVE_METHOD
#define NATIVE_METHOD(className, functionName, signature) \
@@ -36,187 +28,18 @@
RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods))
namespace art {
-namespace mirror {
- class ArtField;
- class ArtMethod;
- class ClassLoader;
-} // namespace mirror
-union JValue;
-class Libraries;
-class ParsedOptions;
-class Runtime;
-class ScopedObjectAccess;
-template<class T> class Handle;
-class Thread;
-void JniAbortF(const char* jni_function_name, const char* fmt, ...)
- __attribute__((__format__(__printf__, 2, 3)));
+const JNINativeInterface* GetJniNativeInterface();
+
+// Similar to RegisterNatives except its passed a descriptor for a class name and failures are
+// fatal.
void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
jint method_count);
int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
-class JavaVMExt : public JavaVM {
- public:
- JavaVMExt(Runtime* runtime, ParsedOptions* options);
- ~JavaVMExt();
-
- /**
- * Loads the given shared library. 'path' is an absolute pathname.
- *
- * Returns 'true' on success. On failure, sets 'detail' to a
- * human-readable description of the error.
- */
- bool LoadNativeLibrary(const std::string& path, Handle<mirror::ClassLoader> class_loader,
- std::string* detail)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- /**
- * Returns a pointer to the code for the native method 'm', found
- * using dlsym(3) on every native library that's been loaded so far.
- */
- void* FindCodeForNativeMethod(mirror::ArtMethod* m)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void DumpForSigQuit(std::ostream& os);
-
- void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetCheckJniEnabled(bool enabled);
-
- void VisitRoots(RootCallback* callback, void* arg);
-
- void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- jweak AddWeakGlobalReference(Thread* self, mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeleteWeakGlobalRef(Thread* self, jweak obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg);
- mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Runtime* runtime;
-
- // Used for testing. By default, we'll LOG(FATAL) the reason.
- void (*check_jni_abort_hook)(void* data, const std::string& reason);
- void* check_jni_abort_hook_data;
-
- // Extra checking.
- bool check_jni;
- bool force_copy;
-
- // Extra diagnostics.
- std::string trace;
-
- // Used to hold references to pinned primitive arrays.
- Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- ReferenceTable pin_table GUARDED_BY(pins_lock);
-
- // JNI global references.
- ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
- IndirectReferenceTable globals;
-
- Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
- Libraries* libraries GUARDED_BY(libraries_lock);
-
- // Used by -Xcheck:jni.
- const JNIInvokeInterface* unchecked_functions;
-
- private:
- // TODO: Make the other members of this class also private.
- // JNI weak global references.
- Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // Since weak_globals_ contain weak roots, be careful not to
- // directly access the object references in it. Use Get() with the
- // read barrier enabled.
- IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
- bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
- ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
-};
-
-struct JNIEnvExt : public JNIEnv {
- JNIEnvExt(Thread* self, JavaVMExt* vm);
- ~JNIEnvExt();
-
- void DumpReferenceTables(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void SetCheckJniEnabled(bool enabled);
-
- void PushFrame(int capacity);
- void PopFrame();
-
- template<typename T>
- T AddLocalReference(mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- static Offset SegmentStateOffset();
-
- static Offset LocalRefCookieOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
- }
-
- static Offset SelfOffset() {
- return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
- }
-
- jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- Thread* const self;
- JavaVMExt* vm;
-
- // Cookie used when using the local indirect reference table.
- uint32_t local_ref_cookie;
-
- // JNI local references.
- IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
-
- // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
- // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
- // to a native method.
- std::vector<uint32_t> stacked_local_ref_cookies;
-
- // Frequently-accessed fields cached from JavaVM.
- bool check_jni;
-
- // How many nested "critical" JNI calls are we in?
- int critical;
-
- // Entered JNI monitors, for bulk exit on thread detach.
- ReferenceTable monitors;
-
- // Used by -Xcheck:jni.
- const JNINativeInterface* unchecked_functions;
-};
-
-const JNINativeInterface* GetCheckJniNativeInterface();
-const JNIInvokeInterface* GetCheckJniInvokeInterface();
-
-// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
-// compiler
-class ScopedJniEnvLocalRefState {
- public:
- explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
- saved_local_ref_cookie_ = env->local_ref_cookie;
- env->local_ref_cookie = env->locals.GetSegmentState();
- }
-
- ~ScopedJniEnvLocalRefState() {
- env_->locals.SetSegmentState(env_->local_ref_cookie);
- env_->local_ref_cookie = saved_local_ref_cookie_;
- }
-
- private:
- JNIEnvExt* env_;
- uint32_t saved_local_ref_cookie_;
- DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
-};
-
} // namespace art
std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
+
#endif // ART_RUNTIME_JNI_INTERNAL_H_
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 7c7e60c5ee..844d14a063 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -17,6 +17,7 @@
#include "jni_internal.h"
#include "common_compiler_test.h"
+#include "java_vm_ext.h"
#include "mirror/art_method-inl.h"
#include "mirror/string-inl.h"
#include "scoped_thread_state_change.h"
@@ -53,24 +54,15 @@ class JniInternalTest : public CommonCompilerTest {
}
void ExpectException(jclass exception_class) {
- EXPECT_TRUE(env_->ExceptionCheck());
+ ScopedObjectAccess soa(env_);
+ EXPECT_TRUE(env_->ExceptionCheck())
+ << PrettyDescriptor(soa.Decode<mirror::Class*>(exception_class));
jthrowable exception = env_->ExceptionOccurred();
EXPECT_NE(nullptr, exception);
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(exception, exception_class));
}
- void ExpectClassFound(const char* name) {
- EXPECT_NE(env_->FindClass(name), nullptr) << name;
- EXPECT_FALSE(env_->ExceptionCheck()) << name;
- }
-
- void ExpectClassNotFound(const char* name) {
- EXPECT_EQ(env_->FindClass(name), nullptr) << name;
- EXPECT_TRUE(env_->ExceptionCheck()) << name;
- env_->ExceptionClear();
- }
-
void CleanUpJniEnv() {
if (aioobe_ != nullptr) {
env_->DeleteGlobalRef(aioobe_);
@@ -98,6 +90,510 @@ class JniInternalTest : public CommonCompilerTest {
return soa.AddLocalReference<jclass>(c);
}
+ void ExpectClassFound(const char* name) {
+ EXPECT_NE(env_->FindClass(name), nullptr) << name;
+ EXPECT_FALSE(env_->ExceptionCheck()) << name;
+ }
+
+ void ExpectClassNotFound(const char* name, bool check_jni, const char* check_jni_msg,
+ CheckJniAbortCatcher* abort_catcher) {
+ EXPECT_EQ(env_->FindClass(name), nullptr) << name;
+ if (!check_jni || check_jni_msg == nullptr) {
+ EXPECT_TRUE(env_->ExceptionCheck()) << name;
+ env_->ExceptionClear();
+ } else {
+ abort_catcher->Check(check_jni_msg);
+ }
+ }
+
+ void FindClassTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ // Null argument is always an abort.
+ env_->FindClass(nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+
+ // Reference types...
+ ExpectClassFound("java/lang/String");
+ // ...for arrays too, where you must include "L;".
+ ExpectClassFound("[Ljava/lang/String;");
+ // Primitive arrays are okay too, if the primitive type is valid.
+ ExpectClassFound("[C");
+
+ // But primitive types aren't allowed...
+ ExpectClassNotFound("C", check_jni, nullptr, &check_jni_abort_catcher);
+ ExpectClassNotFound("V", check_jni, nullptr, &check_jni_abort_catcher);
+ ExpectClassNotFound("K", check_jni, nullptr, &check_jni_abort_catcher);
+
+ if (check_jni) {
+ // Check JNI will reject invalid class names as aborts but without pending exceptions.
+ EXPECT_EQ(env_->FindClass("java.lang.String"), nullptr);
+ EXPECT_FALSE(env_->ExceptionCheck());
+ check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
+
+ EXPECT_EQ(env_->FindClass("[Ljava.lang.String;"), nullptr);
+ EXPECT_FALSE(env_->ExceptionCheck());
+ check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
+ } else {
+ // Without check JNI we're tolerant and replace '.' with '/'.
+ ExpectClassFound("java.lang.String");
+ ExpectClassFound("[Ljava.lang.String;");
+ }
+
+ ExpectClassNotFound("Ljava.lang.String;", check_jni, "illegal class name 'Ljava.lang.String;'",
+ &check_jni_abort_catcher);
+ ExpectClassNotFound("[java.lang.String", check_jni, "illegal class name '[java.lang.String'",
+ &check_jni_abort_catcher);
+
+ // You can't include the "L;" in a JNI class descriptor.
+ ExpectClassNotFound("Ljava/lang/String;", check_jni, "illegal class name 'Ljava/lang/String;'",
+ &check_jni_abort_catcher);
+
+ // But you must include it for an array of any reference type.
+ ExpectClassNotFound("[java/lang/String", check_jni, "illegal class name '[java/lang/String'",
+ &check_jni_abort_catcher);
+
+ ExpectClassNotFound("[K", check_jni, "illegal class name '[K'", &check_jni_abort_catcher);
+
+ // Void arrays aren't allowed.
+ ExpectClassNotFound("[V", check_jni, "illegal class name '[V'", &check_jni_abort_catcher);
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFieldIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+
+ jfieldID fid = env_->GetFieldID(nullptr, "count", "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "GetFieldID received NULL jclass"
+ : "java_class == null");
+ fid = env_->GetFieldID(c, nullptr, "I");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ fid = env_->GetFieldID(c, "count", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetStaticFieldIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+
+ jfieldID fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "GetStaticFieldID received NULL jclass"
+ : "java_class == null");
+ fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
+ EXPECT_EQ(nullptr, fid);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetMethodIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jmethodID method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "GetMethodID received NULL jclass"
+ : "java_class == null");
+ jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
+ ASSERT_TRUE(jlnsme != nullptr);
+ method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ method = env_->GetMethodID(jlnsme, "<init>", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetStaticMethodIdBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jmethodID method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "GetStaticMethodID received NULL jclass"
+ : "java_class == null");
+ jclass jlstring = env_->FindClass("java/lang/String");
+ method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "name == null");
+ method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
+ EXPECT_EQ(nullptr, method);
+ check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+ : "sig == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFromReflectedField_ToReflectedFieldBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+ jfieldID fid = env_->GetFieldID(c, "count", "I");
+ ASSERT_NE(fid, nullptr);
+
+ // Check class argument for null argument, not checked in non-check JNI.
+ jobject field = env_->ToReflectedField(nullptr, fid, JNI_FALSE);
+ if (check_jni) {
+ EXPECT_EQ(field, nullptr);
+ check_jni_abort_catcher.Check("ToReflectedField received NULL jclass");
+ } else {
+ EXPECT_NE(field, nullptr);
+ }
+
+ field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(field, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "jfieldID was NULL"
+ : "fid == null");
+
+ fid = env_->FromReflectedField(nullptr);
+ ASSERT_EQ(fid, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "expected non-null java.lang.reflect.Field"
+ : "jlr_field == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+
+ jclass c = env_->FindClass("java/lang/String");
+ ASSERT_NE(c, nullptr);
+ jmethodID mid = env_->GetMethodID(c, "<init>", "()V");
+ ASSERT_NE(mid, nullptr);
+
+ // Check class argument for null argument, not checked in non-check JNI.
+ jobject method = env_->ToReflectedMethod(nullptr, mid, JNI_FALSE);
+ if (check_jni) {
+ EXPECT_EQ(method, nullptr);
+ check_jni_abort_catcher.Check("ToReflectedMethod received NULL jclass");
+ } else {
+ EXPECT_NE(method, nullptr);
+ }
+
+ method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
+ EXPECT_EQ(method, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "jmethodID was NULL"
+ : "mid == null");
+ mid = env_->FromReflectedMethod(method);
+ ASSERT_EQ(mid, nullptr);
+ check_jni_abort_catcher.Check(check_jni ? "expected non-null method" : "jlr_method == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void RegisterAndUnregisterNativesBadArguments(bool check_jni,
+ CheckJniAbortCatcher* check_jni_abort_catcher) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ // Passing a class of null is a failure.
+ {
+ JNINativeMethod methods[] = { };
+ EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
+ check_jni_abort_catcher->Check(check_jni ? "RegisterNatives received NULL jclass"
+ : "java_class == null");
+ }
+
+ // Passing methods as null is a failure.
+ jclass jlobject = env_->FindClass("java/lang/Object");
+ EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
+ check_jni_abort_catcher->Check("methods == null");
+
+ // Unregisters null is a failure.
+ EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
+ check_jni_abort_catcher->Check(check_jni ? "UnregisterNatives received NULL jclass"
+ : "java_class == null");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+
+ void GetPrimitiveArrayElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jbooleanArray array = env_->NewBooleanArray(10);
+ jboolean is_copy;
+ EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to get byte primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to get short primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to get char primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to get int primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to get long primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to get float primitive array elements with an object of type boolean[]");
+ EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to get double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
+ nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to get boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
+ nullptr);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to get boolean primitive array elements with an object of type java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void ReleasePrimitiveArrayElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jbooleanArray array = env_->NewBooleanArray(10);
+ ASSERT_TRUE(array != nullptr);
+ jboolean is_copy;
+ jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+ ASSERT_TRUE(elements != nullptr);
+ env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+ reinterpret_cast<jbyte*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to release byte primitive array elements with an object of type boolean[]");
+ env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+ reinterpret_cast<jshort*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to release short primitive array elements with an object of type boolean[]");
+ env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+ reinterpret_cast<jchar*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to release char primitive array elements with an object of type boolean[]");
+ env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+ reinterpret_cast<jint*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to release int primitive array elements with an object of type boolean[]");
+ env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+ reinterpret_cast<jlong*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to release long primitive array elements with an object of type boolean[]");
+ env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+ reinterpret_cast<jfloat*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to release float primitive array elements with an object of type boolean[]");
+ env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+ reinterpret_cast<jdouble*>(elements), 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to release double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to release boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
+ jni_abort_catcher.Check(
+ check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to release boolean primitive array elements with an object of type "
+ "java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetReleasePrimitiveArrayCriticalOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jobject object = env_->NewStringUTF("Test String");
+ jboolean is_copy;
+ void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "expected primitive array, given java.lang.String");
+ env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "expected primitive array, given java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void GetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to get region of byte primitive array elements with an object of type boolean[]");
+ env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to get region of short primitive array elements with an object of type boolean[]");
+ env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to get region of char primitive array elements with an object of type boolean[]");
+ env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to get region of int primitive array elements with an object of type boolean[]");
+ env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to get region of long primitive array elements with an object of type boolean[]");
+ env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to get region of float primitive array elements with an object of type boolean[]");
+ env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to get region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to get region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to get region of boolean primitive array elements with an object of type "
+ "java.lang.String");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void SetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+ constexpr size_t kLength = 10;
+ jbooleanArray array = env_->NewBooleanArray(kLength);
+ ASSERT_TRUE(array != nullptr);
+ jboolean elements[kLength];
+ env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+ reinterpret_cast<jbyte*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected byte[]"
+ : "attempt to set region of byte primitive array elements with an object of type boolean[]");
+ env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+ reinterpret_cast<jshort*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected short[]"
+ : "attempt to set region of short primitive array elements with an object of type boolean[]");
+ env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+ reinterpret_cast<jchar*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected char[]"
+ : "attempt to set region of char primitive array elements with an object of type boolean[]");
+ env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+ reinterpret_cast<jint*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected int[]"
+ : "attempt to set region of int primitive array elements with an object of type boolean[]");
+ env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+ reinterpret_cast<jlong*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected long[]"
+ : "attempt to set region of long primitive array elements with an object of type boolean[]");
+ env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+ reinterpret_cast<jfloat*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected float[]"
+ : "attempt to set region of float primitive array elements with an object of type boolean[]");
+ env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+ reinterpret_cast<jdouble*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type boolean[] expected double[]"
+ : "attempt to set region of double primitive array elements with an object of type boolean[]");
+ jbyteArray array2 = env_->NewByteArray(10);
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(
+ check_jni ? "incompatible array type byte[] expected boolean[]"
+ : "attempt to set region of boolean primitive array elements with an object of type byte[]");
+ jobject object = env_->NewStringUTF("Test String");
+ env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+ reinterpret_cast<jboolean*>(elements));
+ jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+ : "attempt to set region of boolean primitive array elements with an object of type "
+ "java.lang.String");
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
+ void NewObjectArrayBadArguments(bool check_jni) {
+ bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+ CheckJniAbortCatcher jni_abort_catcher;
+
+ jclass element_class = env_->FindClass("java/lang/String");
+ ASSERT_NE(element_class, nullptr);
+
+ env_->NewObjectArray(-1, element_class, nullptr);
+ jni_abort_catcher.Check(check_jni ? "negative jsize: -1" : "negative array length: -1");
+
+ env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
+ jni_abort_catcher.Check(check_jni ? "negative jsize: -2147483648"
+ : "negative array length: -2147483648");
+
+ EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+ }
+
JavaVMExt* vm_;
JNIEnv* env_;
jclass aioobe_;
@@ -125,48 +621,8 @@ TEST_F(JniInternalTest, GetVersion) {
}
TEST_F(JniInternalTest, FindClass) {
- // Reference types...
- ExpectClassFound("java/lang/String");
- // ...for arrays too, where you must include "L;".
- ExpectClassFound("[Ljava/lang/String;");
- // Primitive arrays are okay too, if the primitive type is valid.
- ExpectClassFound("[C");
-
- {
- CheckJniAbortCatcher check_jni_abort_catcher;
- env_->FindClass(nullptr);
- check_jni_abort_catcher.Check("name == null");
-
- // We support . as well as / for compatibility, if -Xcheck:jni is off.
- ExpectClassFound("java.lang.String");
- check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
- ExpectClassNotFound("Ljava.lang.String;");
- check_jni_abort_catcher.Check("illegal class name 'Ljava.lang.String;'");
- ExpectClassFound("[Ljava.lang.String;");
- check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
- ExpectClassNotFound("[java.lang.String");
- check_jni_abort_catcher.Check("illegal class name '[java.lang.String'");
-
- // You can't include the "L;" in a JNI class descriptor.
- ExpectClassNotFound("Ljava/lang/String;");
- check_jni_abort_catcher.Check("illegal class name 'Ljava/lang/String;'");
-
- // But you must include it for an array of any reference type.
- ExpectClassNotFound("[java/lang/String");
- check_jni_abort_catcher.Check("illegal class name '[java/lang/String'");
-
- ExpectClassNotFound("[K");
- check_jni_abort_catcher.Check("illegal class name '[K'");
-
- // Void arrays aren't allowed.
- ExpectClassNotFound("[V");
- check_jni_abort_catcher.Check("illegal class name '[V'");
- }
-
- // But primitive types aren't allowed...
- ExpectClassNotFound("C");
- ExpectClassNotFound("V");
- ExpectClassNotFound("K");
+ FindClassTest(false);
+ FindClassTest(true);
}
TEST_F(JniInternalTest, GetFieldID) {
@@ -208,16 +664,8 @@ TEST_F(JniInternalTest, GetFieldID) {
ExpectException(jlnsfe);
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- fid = env_->GetFieldID(nullptr, "count", "I");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("java_class == null");
- fid = env_->GetFieldID(c, nullptr, "I");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("name == null");
- fid = env_->GetFieldID(c, "count", nullptr);
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("sig == null");
+ GetFieldIdBadArgumentTest(false);
+ GetFieldIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetStaticFieldID) {
@@ -253,16 +701,8 @@ TEST_F(JniInternalTest, GetStaticFieldID) {
ExpectException(jlnsfe);
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("java_class == null");
- fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("name == null");
- fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
- EXPECT_EQ(nullptr, fid);
- check_jni_abort_catcher.Check("sig == null");
+ GetStaticFieldIdBadArgumentTest(false);
+ GetStaticFieldIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetMethodID) {
@@ -302,16 +742,8 @@ TEST_F(JniInternalTest, GetMethodID) {
EXPECT_FALSE(env_->ExceptionCheck());
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("java_class == null");
- method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("name == null");
- method = env_->GetMethodID(jlnsme, "<init>", nullptr);
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("sig == null");
+ GetMethodIdBadArgumentTest(false);
+ GetMethodIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, GetStaticMethodID) {
@@ -340,16 +772,8 @@ TEST_F(JniInternalTest, GetStaticMethodID) {
EXPECT_FALSE(env_->ExceptionCheck());
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("java_class == null");
- method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("name == null");
- method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
- EXPECT_EQ(nullptr, method);
- check_jni_abort_catcher.Check("sig == null");
+ GetStaticMethodIdBadArgumentTest(false);
+ GetStaticMethodIdBadArgumentTest(true);
}
TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
@@ -370,13 +794,8 @@ TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
ASSERT_EQ(4, env_->GetIntField(s, fid2));
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
- EXPECT_EQ(field, nullptr);
- check_jni_abort_catcher.Check("fid == null");
- fid2 = env_->FromReflectedField(nullptr);
- ASSERT_EQ(fid2, nullptr);
- check_jni_abort_catcher.Check("jlr_field == null");
+ GetFromReflectedField_ToReflectedFieldBadArgumentTest(false);
+ GetFromReflectedField_ToReflectedFieldBadArgumentTest(true);
}
TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
@@ -417,13 +836,8 @@ TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
ASSERT_EQ(4, env_->CallIntMethod(s, mid2));
// Bad arguments.
- CheckJniAbortCatcher check_jni_abort_catcher;
- method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
- EXPECT_EQ(method, nullptr);
- check_jni_abort_catcher.Check("mid == null");
- mid2 = env_->FromReflectedMethod(method);
- ASSERT_EQ(mid2, nullptr);
- check_jni_abort_catcher.Check("jlr_method == null");
+ GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(false);
+ GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(true);
}
static void BogusMethod() {
@@ -498,23 +912,11 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
}
EXPECT_FALSE(env_->ExceptionCheck());
- // Passing a class of null is a failure.
- {
- JNINativeMethod methods[] = { };
- EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
- check_jni_abort_catcher.Check("java_class == null");
- }
-
- // Passing methods as null is a failure.
- EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
- check_jni_abort_catcher.Check("methods == null");
-
- // Unregisters null is a failure.
- EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
- check_jni_abort_catcher.Check("java_class == null");
-
// Unregistering a class with no natives is a warning.
EXPECT_EQ(env_->UnregisterNatives(jlnsme), JNI_OK);
+
+ RegisterAndUnregisterNativesBadArguments(false, &check_jni_abort_catcher);
+ RegisterAndUnregisterNativesBadArguments(true, &check_jni_abort_catcher);
}
#define EXPECT_PRIMITIVE_ARRAY(new_fn, \
@@ -528,6 +930,7 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
\
{ \
CheckJniAbortCatcher jni_abort_catcher; \
+ down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(false); \
/* Allocate an negative sized array and check it has the right failure type. */ \
EXPECT_EQ(env_->new_fn(-1), nullptr); \
jni_abort_catcher.Check("negative array length: -1"); \
@@ -550,6 +953,7 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
jni_abort_catcher.Check("buf == null"); \
env_->set_region_fn(a, 0, size, nullptr); \
jni_abort_catcher.Check("buf == null"); \
+ down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(true); \
} \
/* Allocate an array and check it has the right type and length. */ \
scalar_type ## Array a = env_->new_fn(size); \
@@ -654,189 +1058,28 @@ TEST_F(JniInternalTest, ShortArrays) {
}
TEST_F(JniInternalTest, GetPrimitiveArrayElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jbooleanArray array = env_->NewBooleanArray(10);
- jboolean is_copy;
- EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get byte primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get short primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get char primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get int primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get long primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get float primitive array elements with an object of type boolean[]");
- EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
- jni_abort_catcher.Check(
- "attempt to get double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
- nullptr);
- jni_abort_catcher.Check(
- "attempt to get boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
- nullptr);
- jni_abort_catcher.Check(
- "attempt to get boolean primitive array elements with an object of type java.lang.String");
+ GetPrimitiveArrayElementsOfWrongType(false);
+ GetPrimitiveArrayElementsOfWrongType(true);
}
TEST_F(JniInternalTest, ReleasePrimitiveArrayElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jbooleanArray array = env_->NewBooleanArray(10);
- ASSERT_TRUE(array != nullptr);
- jboolean is_copy;
- jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
- ASSERT_TRUE(elements != nullptr);
- env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
- reinterpret_cast<jbyte*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release byte primitive array elements with an object of type boolean[]");
- env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
- reinterpret_cast<jshort*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release short primitive array elements with an object of type boolean[]");
- env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
- reinterpret_cast<jchar*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release char primitive array elements with an object of type boolean[]");
- env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
- reinterpret_cast<jint*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release int primitive array elements with an object of type boolean[]");
- env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
- reinterpret_cast<jlong*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release long primitive array elements with an object of type boolean[]");
- env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
- reinterpret_cast<jfloat*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release float primitive array elements with an object of type boolean[]");
- env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
- reinterpret_cast<jdouble*>(elements), 0);
- jni_abort_catcher.Check(
- "attempt to release double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
- jni_abort_catcher.Check(
- "attempt to release boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
- jni_abort_catcher.Check(
- "attempt to release boolean primitive array elements with an object of type "
- "java.lang.String");
+ ReleasePrimitiveArrayElementsOfWrongType(false);
+ ReleasePrimitiveArrayElementsOfWrongType(true);
}
+
TEST_F(JniInternalTest, GetReleasePrimitiveArrayCriticalOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- jobject object = env_->NewStringUTF("Test String");
- jboolean is_copy;
- void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
- jni_abort_catcher.Check("expected primitive array, given java.lang.String");
- env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
- jni_abort_catcher.Check("expected primitive array, given java.lang.String");
+ GetReleasePrimitiveArrayCriticalOfWrongType(false);
+ GetReleasePrimitiveArrayCriticalOfWrongType(true);
}
TEST_F(JniInternalTest, GetPrimitiveArrayRegionElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- constexpr size_t kLength = 10;
- jbooleanArray array = env_->NewBooleanArray(kLength);
- ASSERT_TRUE(array != nullptr);
- jboolean elements[kLength];
- env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
- reinterpret_cast<jbyte*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of byte primitive array elements with an object of type boolean[]");
- env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
- reinterpret_cast<jshort*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of short primitive array elements with an object of type boolean[]");
- env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
- reinterpret_cast<jchar*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of char primitive array elements with an object of type boolean[]");
- env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
- reinterpret_cast<jint*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of int primitive array elements with an object of type boolean[]");
- env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
- reinterpret_cast<jlong*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of long primitive array elements with an object of type boolean[]");
- env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
- reinterpret_cast<jfloat*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of float primitive array elements with an object of type boolean[]");
- env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
- reinterpret_cast<jdouble*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to get region of boolean primitive array elements with an object of type "
- "java.lang.String");
+ GetPrimitiveArrayRegionElementsOfWrongType(false);
+ GetPrimitiveArrayRegionElementsOfWrongType(true);
}
TEST_F(JniInternalTest, SetPrimitiveArrayRegionElementsOfWrongType) {
- CheckJniAbortCatcher jni_abort_catcher;
- constexpr size_t kLength = 10;
- jbooleanArray array = env_->NewBooleanArray(kLength);
- ASSERT_TRUE(array != nullptr);
- jboolean elements[kLength];
- env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
- reinterpret_cast<jbyte*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of byte primitive array elements with an object of type boolean[]");
- env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
- reinterpret_cast<jshort*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of short primitive array elements with an object of type boolean[]");
- env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
- reinterpret_cast<jchar*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of char primitive array elements with an object of type boolean[]");
- env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
- reinterpret_cast<jint*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of int primitive array elements with an object of type boolean[]");
- env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
- reinterpret_cast<jlong*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of long primitive array elements with an object of type boolean[]");
- env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
- reinterpret_cast<jfloat*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of float primitive array elements with an object of type boolean[]");
- env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
- reinterpret_cast<jdouble*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of double primitive array elements with an object of type boolean[]");
- jbyteArray array2 = env_->NewByteArray(10);
- env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of boolean primitive array elements with an object of type byte[]");
- jobject object = env_->NewStringUTF("Test String");
- env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
- reinterpret_cast<jboolean*>(elements));
- jni_abort_catcher.Check(
- "attempt to set region of boolean primitive array elements with an object of type "
- "java.lang.String");
+ SetPrimitiveArrayRegionElementsOfWrongType(false);
+ SetPrimitiveArrayRegionElementsOfWrongType(true);
}
TEST_F(JniInternalTest, NewObjectArray) {
@@ -857,12 +1100,8 @@ TEST_F(JniInternalTest, NewObjectArray) {
EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(a, 0), nullptr));
// Negative array length checks.
- CheckJniAbortCatcher jni_abort_catcher;
- env_->NewObjectArray(-1, element_class, nullptr);
- jni_abort_catcher.Check("negative array length: -1");
-
- env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
- jni_abort_catcher.Check("negative array length: -2147483648");
+ NewObjectArrayBadArguments(false);
+ NewObjectArrayBadArguments(true);
}
TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
@@ -872,6 +1111,7 @@ TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
};
ASSERT_EQ(strlen(primitive_descriptors), arraysize(primitive_names));
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
CheckJniAbortCatcher jni_abort_catcher;
for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
env_->NewObjectArray(0, nullptr, nullptr);
@@ -881,6 +1121,16 @@ TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
jni_abort_catcher.Check(error_msg.c_str());
}
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
+ env_->NewObjectArray(0, nullptr, nullptr);
+ jni_abort_catcher.Check("NewObjectArray received NULL jclass");
+ jclass primitive_class = GetPrimitiveClass(primitive_descriptors[i]);
+ env_->NewObjectArray(1, primitive_class, nullptr);
+ std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
+ jni_abort_catcher.Check(error_msg.c_str());
+ }
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
@@ -940,8 +1190,13 @@ TEST_F(JniInternalTest, GetSuperclass) {
// Null as class should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
jni_abort_catcher.Check("java_class == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
+ jni_abort_catcher.Check("GetSuperclass received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, IsAssignableFrom) {
@@ -950,15 +1205,42 @@ TEST_F(JniInternalTest, IsAssignableFrom) {
jclass string_class = env_->FindClass("java/lang/String");
ASSERT_NE(string_class, nullptr);
- ASSERT_TRUE(env_->IsAssignableFrom(object_class, string_class));
- ASSERT_FALSE(env_->IsAssignableFrom(string_class, object_class));
+ // A superclass is assignable from an instance of its
+ // subclass but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, object_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_class, string_class));
+
+ jclass charsequence_interface = env_->FindClass("java/lang/CharSequence");
+ ASSERT_NE(charsequence_interface, nullptr);
+
+ // An interface is assignable from an instance of an implementing
+ // class but not vice versa.
+ ASSERT_TRUE(env_->IsAssignableFrom(string_class, charsequence_interface));
+ ASSERT_FALSE(env_->IsAssignableFrom(charsequence_interface, string_class));
+
+ // Check that arrays are covariant.
+ jclass string_array_class = env_->FindClass("[Ljava/lang/String;");
+ ASSERT_NE(string_array_class, nullptr);
+ jclass object_array_class = env_->FindClass("[Ljava/lang/Object;");
+ ASSERT_NE(object_array_class, nullptr);
+ ASSERT_TRUE(env_->IsAssignableFrom(string_array_class, object_array_class));
+ ASSERT_FALSE(env_->IsAssignableFrom(object_array_class, string_array_class));
+
+ // Primitive types are tested in 004-JniTest.
// Null as either class should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
jni_abort_catcher.Check("java_class1 == null");
EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
jni_abort_catcher.Check("java_class2 == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
+ jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+ EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
+ jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetObjectRefType) {
@@ -1043,10 +1325,17 @@ TEST_F(JniInternalTest, NewStringNullCharsNonzeroLength) {
TEST_F(JniInternalTest, NewStringNegativeLength) {
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
env_->NewString(nullptr, -1);
jni_abort_catcher.Check("char_count < 0: -1");
env_->NewString(nullptr, std::numeric_limits<jint>::min());
jni_abort_catcher.Check("char_count < 0: -2147483648");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ env_->NewString(nullptr, -1);
+ jni_abort_catcher.Check("negative jsize: -1");
+ env_->NewString(nullptr, std::numeric_limits<jint>::min());
+ jni_abort_catcher.Check("negative jsize: -2147483648");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, GetStringLength_GetStringUTFLength) {
@@ -1104,10 +1393,17 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
// Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
+ }
{
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
- check_jni_abort_catcher.Check("GetStringUTFChars received null jstring");
+ check_jni_abort_catcher.Check("GetStringUTFChars received NULL jstring");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jstring s = env_->NewStringUTF("hello");
@@ -1202,10 +1498,17 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
// Null as array should fail.
CheckJniAbortCatcher jni_abort_catcher;
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
jni_abort_catcher.Check("java_array == null");
env_->SetObjectArrayElement(nullptr, 0, nullptr);
jni_abort_catcher.Check("java_array == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
+ jni_abort_catcher.Check("jarray was NULL");
+ env_->SetObjectArrayElement(nullptr, 0, nullptr);
+ jni_abort_catcher.Check("jarray was NULL");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
#define EXPECT_STATIC_PRIMITIVE_FIELD(type, field_name, sig, value1, value2) \
@@ -1217,15 +1520,28 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
env_->SetStatic ## type ## Field(c, fid, value2); \
EXPECT_EQ(value2, env_->GetStatic ## type ## Field(c, fid)); \
\
+ bool old_check_jni = vm_->SetCheckJniEnabled(false); \
+ { \
+ CheckJniAbortCatcher jni_abort_catcher; \
+ env_->GetStatic ## type ## Field(nullptr, fid); \
+ env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+ } \
CheckJniAbortCatcher jni_abort_catcher; \
- env_->GetStatic ## type ## Field(nullptr, fid); \
- jni_abort_catcher.Check("received null jclass"); \
- env_->SetStatic ## type ## Field(nullptr, fid, value1); \
- jni_abort_catcher.Check("received null jclass"); \
env_->GetStatic ## type ## Field(c, nullptr); \
jni_abort_catcher.Check("fid == null"); \
env_->SetStatic ## type ## Field(c, nullptr, value1); \
jni_abort_catcher.Check("fid == null"); \
+ \
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+ env_->GetStatic ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("received NULL jclass"); \
+ env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("received NULL jclass"); \
+ env_->GetStatic ## type ## Field(c, nullptr); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ env_->SetStatic ## type ## Field(c, nullptr, value1); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
} while (false)
#define EXPECT_PRIMITIVE_FIELD(instance, type, field_name, sig, value1, value2) \
@@ -1237,6 +1553,7 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
env_->Set ## type ## Field(instance, fid, value2); \
EXPECT_EQ(value2, env_->Get ## type ## Field(instance, fid)); \
\
+ bool old_check_jni = vm_->SetCheckJniEnabled(false); \
CheckJniAbortCatcher jni_abort_catcher; \
env_->Get ## type ## Field(nullptr, fid); \
jni_abort_catcher.Check("obj == null"); \
@@ -1246,6 +1563,16 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
jni_abort_catcher.Check("fid == null"); \
env_->Set ## type ## Field(instance, nullptr, value1); \
jni_abort_catcher.Check("fid == null"); \
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+ env_->Get ## type ## Field(nullptr, fid); \
+ jni_abort_catcher.Check("field operation on NULL object:"); \
+ env_->Set ## type ## Field(nullptr, fid, value1); \
+ jni_abort_catcher.Check("field operation on NULL object:"); \
+ env_->Get ## type ## Field(instance, nullptr); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ env_->Set ## type ## Field(instance, nullptr, value1); \
+ jni_abort_catcher.Check("jfieldID was NULL"); \
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
} while (false)
@@ -1337,12 +1664,17 @@ TEST_F(JniInternalTest, DeleteLocalRef) {
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteLocalRef(s);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteLocalRef(s);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid local reference: %p", s));
+ std::string expected(StringPrintf("jobject is an invalid local reference: %p", s));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
s = env_->NewStringUTF("");
@@ -1433,12 +1765,17 @@ TEST_F(JniInternalTest, DeleteGlobalRef) {
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteGlobalRef(o);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteGlobalRef(o);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid global reference: %p", o));
+ std::string expected(StringPrintf("jobject is an invalid global reference: %p", o));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jobject o1 = env_->NewGlobalRef(s);
@@ -1478,12 +1815,17 @@ TEST_F(JniInternalTest, DeleteWeakGlobalRef) {
// Currently, deleting an already-deleted reference is just a CheckJNI warning.
{
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ {
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->DeleteWeakGlobalRef(o);
+ }
CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
env_->DeleteWeakGlobalRef(o);
-
- std::string expected(StringPrintf("native code passing in reference to "
- "invalid weak global reference: %p", o));
+ std::string expected(StringPrintf("jobject is an invalid weak global reference: %p", o));
check_jni_abort_catcher.Check(expected.c_str());
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
jobject o1 = env_->NewWeakGlobalRef(s);
@@ -1502,8 +1844,6 @@ TEST_F(JniInternalTest, ExceptionDescribe) {
}
TEST_F(JniInternalTest, Throw) {
- EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
-
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
ASSERT_TRUE(exception_class != nullptr);
jthrowable exception = reinterpret_cast<jthrowable>(env_->AllocObject(exception_class));
@@ -1514,11 +1854,18 @@ TEST_F(JniInternalTest, Throw) {
jthrowable thrown_exception = env_->ExceptionOccurred();
env_->ExceptionClear();
EXPECT_TRUE(env_->IsSameObject(exception, thrown_exception));
-}
-TEST_F(JniInternalTest, ThrowNew) {
+ // Bad argument.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+ check_jni_abort_catcher.Check("Throw received NULL jthrowable");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
+}
+TEST_F(JniInternalTest, ThrowNew) {
jclass exception_class = env_->FindClass("java/lang/RuntimeException");
ASSERT_TRUE(exception_class != nullptr);
@@ -1535,6 +1882,16 @@ TEST_F(JniInternalTest, ThrowNew) {
thrown_exception = env_->ExceptionOccurred();
env_->ExceptionClear();
EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, exception_class));
+
+ // Bad argument.
+ bool old_check_jni = vm_->SetCheckJniEnabled(false);
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+ check_jni_abort_catcher.Check("c == null");
+ EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+ EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+ check_jni_abort_catcher.Check("ThrowNew received NULL jclass");
+ EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
}
TEST_F(JniInternalTest, NewDirectBuffer_GetDirectBufferAddress_GetDirectBufferCapacity) {
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index ab86eaac7d..e585412d03 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -65,7 +65,7 @@ class LockWord {
kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
// Count in higher bits.
kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
- kThinLockCountMask = (1 << kThinLockCountShift) - 1,
+ kThinLockCountMask = (1 << kThinLockCountSize) - 1,
kThinLockMaxCount = kThinLockCountMask,
// State in the highest bits.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 1074253fea..c281b2200f 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -130,8 +130,67 @@ static uintptr_t GenerateNextMemPos() {
uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
#endif
+// Return true if the address range is contained in a single /proc/self/map entry.
+static bool CheckOverlapping(uintptr_t begin,
+ uintptr_t end,
+ std::string* error_msg) {
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ if (map.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to build process map");
+ return false;
+ }
+ for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+ if ((begin >= it->start && begin < it->end) // start of new within old
+ && (end > it->start && end <= it->end)) { // end of new within old
+ return true;
+ }
+ }
+ std::string maps;
+ ReadFileToString("/proc/self/maps", &maps);
+ *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
+ "any existing map:\n%s\n",
+ begin, end, maps.c_str());
+ return false;
+}
+
+// Return true if the address range does not conflict with any /proc/self/maps entry.
+static bool CheckNonOverlapping(uintptr_t begin,
+ uintptr_t end,
+ std::string* error_msg) {
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ if (map.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to build process map");
+ return false;
+ }
+ for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
+ if ((begin >= it->start && begin < it->end) // start of new within old
+ || (end > it->start && end < it->end) // end of new within old
+ || (begin <= it->start && end > it->end)) { // start/end of new includes all of old
+ std::ostringstream map_info;
+ map_info << std::make_pair(it, map->end());
+ *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
+ "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
+ begin, end,
+ static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
+ it->name.c_str(),
+ map_info.str().c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+// CheckMapRequest to validate a non-MAP_FAILED mmap result based on
+// the expected value, calling munmap if validation fails, giving the
+// reason in error_msg.
+//
+// If the expected_ptr is nullptr, nothing is checked beyond the fact
+// that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
+// non-null, we check that pointer is the actual_ptr == expected_ptr,
+// and if not, report in error_msg what the conflict mapping was if
+// found, or a generic error in other cases.
static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
- std::ostringstream* error_msg) {
+ std::string* error_msg) {
// Handled first by caller for more specific error messages.
CHECK(actual_ptr != MAP_FAILED);
@@ -139,6 +198,10 @@ static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_co
return true;
}
+ uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
+ uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+ uintptr_t limit = expected + byte_count;
+
if (expected_ptr == actual_ptr) {
return true;
}
@@ -149,40 +212,19 @@ static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_co
PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
}
- uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
- uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
- uintptr_t limit = expected + byte_count;
-
- std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
- if (!map->Build()) {
- *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
- "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
-
+ if (!CheckNonOverlapping(expected, limit, error_msg)) {
return false;
}
- for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
- if ((expected >= it->start && expected < it->end) // start of new within old
- || (limit > it->start && limit < it->end) // end of new within old
- || (expected <= it->start && limit > it->end)) { // start/end of new includes all of old
- *error_msg
- << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
- "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
- expected, limit,
- static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
- it->name.c_str())
- << std::make_pair(it, map->end());
- return false;
- }
- }
- *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
- "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
+
+ *error_msg = StringPrintf("Failed to mmap at expected address, mapped at "
+ "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
return false;
}
-MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
+MemMap* MemMap::MapAnonymous(const char* name, byte* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
if (byte_count == 0) {
- return new MemMap(name, nullptr, 0, nullptr, 0, prot);
+ return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
@@ -222,11 +264,11 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
// 4GB.
if (low_4gb && (
// Start out of bounds.
- (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
+ (reinterpret_cast<uintptr_t>(expected_ptr) >> 32) != 0 ||
// End out of bounds. For simplicity, this will fail for the last page of memory.
- (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
+ (reinterpret_cast<uintptr_t>(expected_ptr + page_aligned_byte_count) >> 32) != 0)) {
*error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
- expected, expected + page_aligned_byte_count);
+ expected_ptr, expected_ptr + page_aligned_byte_count);
return nullptr;
}
#endif
@@ -238,7 +280,7 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
#if USE_ART_LOW_4G_ALLOCATOR
// MAP_32BIT only available on x86_64.
void* actual = MAP_FAILED;
- if (low_4gb && expected == nullptr) {
+ if (low_4gb && expected_ptr == nullptr) {
bool first_run = true;
for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
@@ -294,18 +336,18 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
saved_errno = ENOMEM;
}
} else {
- actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+ actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
saved_errno = errno;
}
#else
#if defined(__LP64__)
- if (low_4gb && expected == nullptr) {
+ if (low_4gb && expected_ptr == nullptr) {
flags |= MAP_32BIT;
}
#endif
- void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
+ void* actual = mmap(expected_ptr, page_aligned_byte_count, prot, flags, fd.get(), 0);
saved_errno = errno;
#endif
@@ -314,44 +356,51 @@ MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count
ReadFileToString("/proc/self/maps", &maps);
*error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
- expected, page_aligned_byte_count, prot, flags, fd.get(),
+ expected_ptr, page_aligned_byte_count, prot, flags, fd.get(),
strerror(saved_errno), maps.c_str());
return nullptr;
}
std::ostringstream check_map_request_error_msg;
- if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
- *error_msg = check_map_request_error_msg.str();
+ if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
- page_aligned_byte_count, prot);
+ page_aligned_byte_count, prot, false);
}
-MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
+MemMap* MemMap::MapFileAtAddress(byte* expected_ptr, size_t byte_count, int prot, int flags, int fd,
off_t start, bool reuse, const char* filename,
std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
+ uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
+ uintptr_t limit = expected + byte_count;
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
- CHECK(expected != nullptr);
+ CHECK(expected_ptr != nullptr);
+ if (!CheckOverlapping(expected, limit, error_msg)) {
+ return nullptr;
+ }
flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
+ if (expected_ptr != nullptr && !CheckNonOverlapping(expected, limit, error_msg)) {
+ return nullptr;
+ }
}
if (byte_count == 0) {
- return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
+ return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
}
// Adjust 'offset' to be page-aligned as required by mmap.
int page_offset = start % kPageSize;
off_t page_aligned_offset = start - page_offset;
// Adjust 'byte_count' to be page-aligned as we will map this anyway.
size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
- // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
- // necessarily to virtual memory. mmap will page align 'expected' for us.
- byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
+ // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
+ // not necessarily to virtual memory. mmap will page align 'expected' for us.
+ byte* page_aligned_expected = (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
page_aligned_byte_count,
@@ -373,21 +422,22 @@ MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, in
return nullptr;
}
std::ostringstream check_map_request_error_msg;
- if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
- *error_msg = check_map_request_error_msg.str();
+ if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
return nullptr;
}
return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
- prot);
+ prot, reuse);
}
MemMap::~MemMap() {
if (base_begin_ == nullptr && base_size_ == 0) {
return;
}
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!reuse_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
// Remove it from maps_.
@@ -405,9 +455,9 @@ MemMap::~MemMap() {
}
MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
- size_t base_size, int prot)
+ size_t base_size, int prot, bool reuse)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot) {
+ prot_(prot), reuse_(reuse) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -437,7 +487,7 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
byte* new_base_end = new_end;
DCHECK_LE(new_base_end, old_base_end);
if (new_base_end == old_base_end) {
- return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
+ return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
}
size_ = new_end - reinterpret_cast<byte*>(begin_);
base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
@@ -489,7 +539,7 @@ MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
maps.c_str());
return nullptr;
}
- return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
+ return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
void MemMap::MadviseDontNeedAndZero() {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index defa6a52fd..872c63b193 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -73,7 +73,9 @@ class MemMap {
// Map part of a file, taking care of non-page aligned offsets. The
// "start" offset is absolute, not relative. This version allows
- // requesting a specific address for the base of the mapping.
+ // requesting a specific address for the base of the
+ // mapping. "reuse" allows us to create a view into an existing
+ // mapping where we do not take ownership of the memory.
//
// On success, returns returns a MemMap instance. On failure, returns a NULL;
static MemMap* MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd,
@@ -134,7 +136,7 @@ class MemMap {
private:
MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
- int prot) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
+ int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
static void DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps)
LOCKS_EXCLUDED(Locks::mem_maps_lock_);
@@ -145,7 +147,7 @@ class MemMap {
static MemMap* GetLargestMemMapAt(void* address)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
- std::string name_;
+ const std::string name_;
byte* const begin_; // Start of data.
size_t size_; // Length of data.
@@ -153,6 +155,11 @@ class MemMap {
size_t base_size_; // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
int prot_; // Protection of the map.
+ // When reuse_ is true, this is just a view of an existing mapping
+ // and we do not take ownership and are not responsible for
+ // unmapping.
+ const bool reuse_;
+
#if USE_ART_LOW_4G_ALLOCATOR
static uintptr_t next_mem_pos_; // Next memory location to check for low_4g extent.
#endif
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 849ab1c420..bab2e862b9 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -56,14 +56,31 @@ class MemoryRegion {
return ComputeInternalPointer<T>(offset);
}
+ // Load a single bit in the region. The bit at offset 0 is the least
+ // significant bit in the first byte.
+ bool LoadBit(uintptr_t bit_offset) const {
+ uint8_t bit_mask;
+ uint8_t byte = *ComputeBitPointer(bit_offset, &bit_mask);
+ return byte & bit_mask;
+ }
+
+ void StoreBit(uintptr_t bit_offset, bool value) const {
+ uint8_t bit_mask;
+ uint8_t* byte = ComputeBitPointer(bit_offset, &bit_mask);
+ if (value) {
+ *byte |= bit_mask;
+ } else {
+ *byte &= ~bit_mask;
+ }
+ }
+
void CopyFrom(size_t offset, const MemoryRegion& from) const;
// Compute a sub memory region based on an existing one.
- void Subregion(const MemoryRegion& from, uintptr_t offset, uintptr_t size) {
- CHECK_GE(from.size(), size);
- CHECK_LE(offset, from.size() - size);
- pointer_ = reinterpret_cast<void*>(from.start() + offset);
- size_ = size;
+ MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const {
+ CHECK_GE(this->size(), size);
+ CHECK_LE(offset, this->size() - size);
+ return MemoryRegion(reinterpret_cast<void*>(start() + offset), size);
}
// Compute an extended memory region based on an existing one.
@@ -90,8 +107,6 @@ class MemoryRegion {
void* pointer_;
size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryRegion);
};
} // namespace art
diff --git a/runtime/method_helper-inl.h b/runtime/method_helper-inl.h
index 42a60896a0..3a5056adce 100644
--- a/runtime/method_helper-inl.h
+++ b/runtime/method_helper-inl.h
@@ -19,13 +19,16 @@
#include "method_helper.h"
+#include "class_linker.h"
+#include "mirror/object_array.h"
#include "runtime.h"
+#include "thread-inl.h"
namespace art {
inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool resolve) {
mirror::ArtMethod* method = GetMethod();
- mirror::Class* type = method->GetDexCacheResolvedTypes()->Get(type_idx);
+ mirror::Class* type = method->GetDexCacheResolvedType(type_idx);
if (type == nullptr && resolve) {
type = Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method);
CHECK(type != nullptr || Thread::Current()->IsExceptionPending());
@@ -33,6 +36,15 @@ inline mirror::Class* MethodHelper::GetClassFromTypeIdx(uint16_t type_idx, bool
return type;
}
+inline mirror::Class* MethodHelper::GetReturnType(bool resolve) {
+ mirror::ArtMethod* method = GetMethod();
+ const DexFile* dex_file = method->GetDexFile();
+ const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
+ const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
+ uint16_t return_type_idx = proto_id.return_type_idx_;
+ return GetClassFromTypeIdx(return_type_idx, resolve);
+}
+
inline mirror::String* MethodHelper::ResolveString(uint32_t string_idx) {
mirror::ArtMethod* method = GetMethod();
mirror::String* s = method->GetDexCacheStrings()->Get(string_idx);
diff --git a/runtime/method_helper.cc b/runtime/method_helper.cc
index 4b1b1daa9e..1bd2f9020c 100644
--- a/runtime/method_helper.cc
+++ b/runtime/method_helper.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "class_linker.h"
#include "dex_file-inl.h"
@@ -53,6 +53,32 @@ bool MethodHelper::HasSameNameAndSignature(MethodHelper* other) {
return dex_file->GetMethodSignature(mid) == other_dex_file->GetMethodSignature(other_mid);
}
+bool MethodHelper::HasSameSignatureWithDifferentClassLoaders(MethodHelper* other) {
+ if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
+ return false;
+ }
+ const DexFile::TypeList* types = method_->GetParameterTypeList();
+ const DexFile::TypeList* other_types = other->method_->GetParameterTypeList();
+ if (types == nullptr) {
+ return (other_types == nullptr) || (other_types->Size() == 0);
+ } else if (UNLIKELY(other_types == nullptr)) {
+ return types->Size() == 0;
+ }
+ uint32_t num_types = types->Size();
+ if (UNLIKELY(num_types != other_types->Size())) {
+ return false;
+ }
+ for (uint32_t i = 0; i < num_types; ++i) {
+ mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
+ mirror::Class* other_param_type =
+ other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_);
+ if (UNLIKELY(param_type != other_param_type)) {
+ return false;
+ }
+ }
+ return true;
+}
+
uint32_t MethodHelper::FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = GetMethod();
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index 012695e4d1..62465be513 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -75,14 +75,7 @@ class MethodHelper {
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
- mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
- const DexFile* dex_file = method->GetDexFile();
- const DexFile::MethodId& method_id = dex_file->GetMethodId(method->GetDexMethodIndex());
- const DexFile::ProtoId& proto_id = dex_file->GetMethodPrototype(method_id);
- uint16_t return_type_idx = proto_id.return_type_idx_;
- return GetClassFromTypeIdx(return_type_idx, resolve);
- }
+ mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t NumArgs() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// "1 +" because the first in Args is the receiver.
@@ -115,31 +108,7 @@ class MethodHelper {
bool HasSameNameAndSignature(MethodHelper* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasSameSignatureWithDifferentClassLoaders(MethodHelper* other)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(GetReturnType() != other->GetReturnType())) {
- return false;
- }
- const DexFile::TypeList* types = method_->GetParameterTypeList();
- const DexFile::TypeList* other_types = other->method_->GetParameterTypeList();
- if (types == nullptr) {
- return (other_types == nullptr) || (other_types->Size() == 0);
- } else if (UNLIKELY(other_types == nullptr)) {
- return types->Size() == 0;
- }
- uint32_t num_types = types->Size();
- if (UNLIKELY(num_types != other_types->Size())) {
- return false;
- }
- for (uint32_t i = 0; i < num_types; ++i) {
- mirror::Class* param_type = GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
- mirror::Class* other_param_type =
- other->GetClassFromTypeIdx(other_types->GetTypeItem(i).type_idx_);
- if (UNLIKELY(param_type != other_param_type)) {
- return false;
- }
- }
- return true;
- }
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index f3c8250db3..2c0ea367cc 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -166,8 +166,8 @@ inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_c
template<class T>
inline void PrimitiveArray<T>::VisitRoots(RootCallback* callback, void* arg) {
- if (array_class_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&array_class_), arg, 0, kRootStickyClass);
+ if (!array_class_.IsNull()) {
+ array_class_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 63f9860278..f54af855b4 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -124,7 +124,7 @@ void Array::ThrowArrayStoreException(Object* object) {
art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
}
-template <typename T> Class* PrimitiveArray<T>::array_class_ = NULL;
+template <typename T> GcRoot<Class> PrimitiveArray<T>::array_class_;
// Explicitly instantiate all the primitive array types.
template class PrimitiveArray<uint8_t>; // BooleanArray
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 6588b57b8a..7af88d6d86 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,10 +17,10 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "gc_root.h"
#include "gc/allocator_type.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -159,27 +159,26 @@ class MANAGED PrimitiveArray : public Array {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
- CHECK(array_class_ == nullptr);
+ CHECK(array_class_.IsNull());
CHECK(array_class != nullptr);
- array_class_ = array_class;
+ array_class_ = GcRoot<Class>(array_class);
}
static Class* GetArrayClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(array_class_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &array_class_);
+ DCHECK(!array_class_.IsNull());
+ return array_class_.Read();
}
static void ResetArrayClass() {
- CHECK(array_class_ != nullptr);
- array_class_ = nullptr;
+ CHECK(!array_class_.IsNull());
+ array_class_ = GcRoot<Class>(nullptr);
}
static void VisitRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- static Class* array_class_;
+ static GcRoot<Class> array_class_;
DISALLOW_IMPLICIT_CONSTRUCTORS(PrimitiveArray);
};
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index da21dfef06..3c7c6ce39a 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -29,7 +29,7 @@ namespace art {
namespace mirror {
// TODO: Get global references for these
-Class* ArtField::java_lang_reflect_ArtField_ = NULL;
+GcRoot<Class> ArtField::java_lang_reflect_ArtField_;
ArtField* ArtField::FromReflectedField(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_field) {
@@ -40,14 +40,14 @@ ArtField* ArtField::FromReflectedField(const ScopedObjectAccessAlreadyRunnable&
}
void ArtField::SetClass(Class* java_lang_reflect_ArtField) {
- CHECK(java_lang_reflect_ArtField_ == NULL);
+ CHECK(java_lang_reflect_ArtField_.IsNull());
CHECK(java_lang_reflect_ArtField != NULL);
- java_lang_reflect_ArtField_ = java_lang_reflect_ArtField;
+ java_lang_reflect_ArtField_ = GcRoot<Class>(java_lang_reflect_ArtField);
}
void ArtField::ResetClass() {
- CHECK(java_lang_reflect_ArtField_ != NULL);
- java_lang_reflect_ArtField_ = NULL;
+ CHECK(!java_lang_reflect_ArtField_.IsNull());
+ java_lang_reflect_ArtField_ = GcRoot<Class>(nullptr);
}
void ArtField::SetOffset(MemberOffset num_bytes) {
@@ -64,9 +64,8 @@ void ArtField::SetOffset(MemberOffset num_bytes) {
}
void ArtField::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_reflect_ArtField_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_reflect_ArtField_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_reflect_ArtField_.IsNull()) {
+ java_lang_reflect_ArtField_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index 741c6eb8a8..f3dfa15004 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -19,11 +19,12 @@
#include <jni.h>
+#include "gc_root.h"
#include "modifiers.h"
#include "object.h"
#include "object_callbacks.h"
#include "primitive.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
namespace art {
@@ -135,9 +136,8 @@ class MANAGED ArtField FINAL : public Object {
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangReflectArtField() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_reflect_ArtField_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_reflect_ArtField_);
+ DCHECK(!java_lang_reflect_ArtField_.IsNull());
+ return java_lang_reflect_ArtField_.Read<kReadBarrierOption>();
}
static void SetClass(Class* java_lang_reflect_ArtField);
@@ -180,7 +180,7 @@ class MANAGED ArtField FINAL : public Object {
// Offset of field within an instance or in the Class' static fields
uint32_t offset_;
- static Class* java_lang_reflect_ArtField_;
+ static GcRoot<Class> java_lang_reflect_ArtField_;
friend struct art::ArtFieldOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtField);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 01b05a6e6f..0dd158822b 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -19,6 +19,8 @@
#include "art_method.h"
+#include "art_field.h"
+#include "class.h"
#include "class_linker.h"
#include "dex_cache.h"
#include "dex_file.h"
@@ -41,9 +43,8 @@ inline uint32_t ArtMethod::ClassSize() {
template<ReadBarrierOption kReadBarrierOption>
inline Class* ArtMethod::GetJavaLangReflectArtMethod() {
- DCHECK(java_lang_reflect_ArtMethod_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_reflect_ArtMethod_);
+ DCHECK(!java_lang_reflect_ArtMethod_.IsNull());
+ return java_lang_reflect_ArtMethod_.Read<kReadBarrierOption>();
}
inline Class* ArtMethod::GetDeclaringClass() {
@@ -88,11 +89,60 @@ inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() {
OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_));
}
+inline ArtMethod* ArtMethod::GetDexCacheResolvedMethod(uint16_t method_index) {
+ ArtMethod* method = GetDexCacheResolvedMethods()->Get(method_index);
+ if (method != nullptr && !method->GetDeclaringClass()->IsErroneous()) {
+ return method;
+ } else {
+ return nullptr;
+ }
+}
+
+inline void ArtMethod::SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method) {
+ GetDexCacheResolvedMethods()->Set<false>(method_idx, new_method);
+}
+
+inline bool ArtMethod::HasDexCacheResolvedMethods() {
+ return GetDexCacheResolvedMethods() != nullptr;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache) {
+ return GetDexCacheResolvedMethods() == other_cache;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedMethods(ArtMethod* other) {
+ return GetDexCacheResolvedMethods() == other->GetDexCacheResolvedMethods();
+}
+
+
inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
return GetFieldObject<ObjectArray<Class>>(
OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_));
}
+template <bool kWithCheck>
+inline Class* ArtMethod::GetDexCacheResolvedType(uint32_t type_index) {
+ Class* klass;
+ if (kWithCheck) {
+ klass = GetDexCacheResolvedTypes()->Get(type_index);
+ } else {
+ klass = GetDexCacheResolvedTypes()->GetWithoutChecks(type_index);
+ }
+ return (klass != nullptr && !klass->IsErroneous()) ? klass : nullptr;
+}
+
+inline bool ArtMethod::HasDexCacheResolvedTypes() {
+ return GetDexCacheResolvedTypes() != nullptr;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache) {
+ return GetDexCacheResolvedTypes() == other_cache;
+}
+
+inline bool ArtMethod::HasSameDexCacheResolvedTypes(ArtMethod* other) {
+ return GetDexCacheResolvedTypes() == other->GetDexCacheResolvedTypes();
+}
+
inline uint32_t ArtMethod::GetCodeSize() {
DCHECK(!IsRuntimeMethod() && !IsProxyMethod()) << PrettyMethod(this);
const void* code = EntryPointToCodePointer(GetEntryPointFromQuickCompiledCode());
@@ -397,7 +447,7 @@ inline const DexFile::CodeItem* ArtMethod::GetCodeItem() {
inline bool ArtMethod::IsResolvedTypeIdx(uint16_t type_idx) {
mirror::ArtMethod* method = GetInterfaceMethodIfProxy();
- return method->GetDexCacheResolvedTypes()->Get(type_idx) != nullptr;
+ return method->GetDexCacheResolvedType(type_idx) != nullptr;
}
inline int32_t ArtMethod::GetLineNumFromDexPC(uint32_t dex_pc) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 167f848824..8eacb1c3d7 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -47,7 +47,7 @@ extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Th
#endif
// TODO: get global references for these
-Class* ArtMethod::java_lang_reflect_ArtMethod_ = NULL;
+GcRoot<Class> ArtMethod::java_lang_reflect_ArtMethod_;
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
@@ -60,9 +60,8 @@ ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnabl
void ArtMethod::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_reflect_ArtMethod_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_reflect_ArtMethod_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_reflect_ArtMethod_.IsNull()) {
+ java_lang_reflect_ArtMethod_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
@@ -80,14 +79,14 @@ InvokeType ArtMethod::GetInvokeType() {
}
void ArtMethod::SetClass(Class* java_lang_reflect_ArtMethod) {
- CHECK(java_lang_reflect_ArtMethod_ == NULL);
+ CHECK(java_lang_reflect_ArtMethod_.IsNull());
CHECK(java_lang_reflect_ArtMethod != NULL);
- java_lang_reflect_ArtMethod_ = java_lang_reflect_ArtMethod;
+ java_lang_reflect_ArtMethod_ = GcRoot<Class>(java_lang_reflect_ArtMethod);
}
void ArtMethod::ResetClass() {
- CHECK(java_lang_reflect_ArtMethod_ != NULL);
- java_lang_reflect_ArtMethod_ = NULL;
+ CHECK(!java_lang_reflect_ArtMethod_.IsNull());
+ java_lang_reflect_ArtMethod_ = GcRoot<Class>(nullptr);
}
void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
@@ -130,12 +129,11 @@ ArtMethod* ArtMethod::FindOverriddenMethod() {
Class* declaring_class = GetDeclaringClass();
Class* super_class = declaring_class->GetSuperClass();
uint16_t method_index = GetMethodIndex();
- ObjectArray<ArtMethod>* super_class_vtable = super_class->GetVTable();
ArtMethod* result = NULL;
// Did this method override a super class method? If so load the result from the super class'
// vtable
- if (super_class_vtable != NULL && method_index < super_class_vtable->GetLength()) {
- result = super_class_vtable->Get(method_index);
+ if (super_class->HasVTable() && method_index < super_class->GetVTableLength()) {
+ result = super_class->GetVTableEntry(method_index);
} else {
// Method didn't override superclass method so search interfaces
if (IsProxyMethod()) {
@@ -159,12 +157,12 @@ ArtMethod* ArtMethod::FindOverriddenMethod() {
}
}
}
-#ifndef NDEBUG
- StackHandleScope<2> hs(Thread::Current());
- MethodHelper result_mh(hs.NewHandle(result));
- MethodHelper this_mh(hs.NewHandle(this));
- DCHECK(result == NULL || this_mh.HasSameNameAndSignature(&result_mh));
-#endif
+ if (kIsDebugBuild) {
+ StackHandleScope<2> hs(Thread::Current());
+ MethodHelper result_mh(hs.NewHandle(result));
+ MethodHelper this_mh(hs.NewHandle(this));
+ DCHECK(result == nullptr || this_mh.HasSameNameAndSignature(&result_mh));
+ }
return result;
}
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 081bee1d88..4ebceff155 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_ART_METHOD_H_
#include "dex_file.h"
+#include "gc_root.h"
#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
@@ -215,13 +216,25 @@ class MANAGED ArtMethod FINAL : public Object {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
}
- ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedMethods(ObjectArray<ArtMethod>* other_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ template <bool kWithCheck = true>
+ Class* GetDexCacheResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool HasSameDexCacheResolvedTypes(ObjectArray<Class>* other_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Find the method that this method overrides
ArtMethod* FindOverriddenMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -514,9 +527,13 @@ class MANAGED ArtMethod FINAL : public Object {
// ifTable.
uint32_t method_index_;
- static Class* java_lang_reflect_ArtMethod_;
+ static GcRoot<Class> java_lang_reflect_ArtMethod_;
private:
+ ObjectArray<ArtMethod>* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ObjectArray<Class>* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
friend struct art::ArtMethodOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
};
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 329a984842..c3754d7967 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -161,6 +161,37 @@ inline void Class::SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) {
CHECK(method == GetImTable()->Get(i));
}
+inline bool Class::HasVTable() {
+ return (GetVTable() != nullptr) || ShouldHaveEmbeddedImtAndVTable();
+}
+
+inline int32_t Class::GetVTableLength() {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ return GetEmbeddedVTableLength();
+ }
+ return (GetVTable() != nullptr) ? GetVTable()->GetLength() : 0;
+}
+
+inline ArtMethod* Class::GetVTableEntry(uint32_t i) {
+ if (ShouldHaveEmbeddedImtAndVTable()) {
+ return GetEmbeddedVTableEntry(i);
+ }
+ return (GetVTable() != nullptr) ? GetVTable()->Get(i) : nullptr;
+}
+
+inline int32_t Class::GetEmbeddedVTableLength() {
+ return GetField32(EmbeddedVTableLengthOffset());
+}
+
+inline void Class::SetEmbeddedVTableLength(int32_t len) {
+ SetField32<false>(EmbeddedVTableLengthOffset(), len);
+}
+
+inline ArtMethod* Class::GetEmbeddedVTableEntry(uint32_t i) {
+ uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
+ return GetFieldObject<mirror::ArtMethod>(MemberOffset(offset));
+}
+
inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) {
uint32_t offset = EmbeddedVTableOffset().Uint32Value() + i * sizeof(VTableEntry);
SetFieldObject<false>(MemberOffset(offset), method);
@@ -340,12 +371,12 @@ inline ArtMethod* Class::FindVirtualMethodForVirtual(ArtMethod* method) {
DCHECK(!method->GetDeclaringClass()->IsInterface() || method->IsMiranda());
// The argument method may from a super class.
// Use the index to a potentially overridden one for this instance's class.
- return GetVTable()->Get(method->GetMethodIndex());
+ return GetVTableEntry(method->GetMethodIndex());
}
inline ArtMethod* Class::FindVirtualMethodForSuper(ArtMethod* method) {
DCHECK(!method->GetDeclaringClass()->IsInterface());
- return GetSuperClass()->GetVTable()->Get(method->GetMethodIndex());
+ return GetSuperClass()->GetVTableEntry(method->GetMethodIndex());
}
inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* method) {
@@ -534,13 +565,19 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
if (has_embedded_tables) {
uint32_t embedded_imt_size = kImtSize * sizeof(ImTableEntry);
uint32_t embedded_vtable_size = num_vtable_entries * sizeof(VTableEntry);
- size += embedded_imt_size + embedded_vtable_size;
+ size += embedded_imt_size +
+ sizeof(int32_t) /* vtable len */ +
+ embedded_vtable_size;
}
// Space used by reference statics.
size += num_ref_static_fields * sizeof(HeapReference<Object>);
// Possible pad for alignment.
- if (((size & 7) != 0) && (num_64bit_static_fields > 0) && (num_32bit_static_fields == 0)) {
+ if (((size & 7) != 0) && (num_64bit_static_fields > 0)) {
size += sizeof(uint32_t);
+ if (num_32bit_static_fields != 0) {
+ // Shuffle one 32 bit static field forward.
+ num_32bit_static_fields--;
+ }
}
// Space used for primitive static fields.
size += (num_32bit_static_fields * sizeof(uint32_t)) +
@@ -574,7 +611,10 @@ inline void Class::VisitEmbeddedImtAndVTable(const Visitor& visitor) {
pos += sizeof(ImTableEntry);
}
- count = ((GetVTable() != NULL) ? GetVTable()->GetLength() : 0);
+ // Skip vtable length.
+ pos += sizeof(int32_t);
+
+ count = GetEmbeddedVTableLength();
for (size_t i = 0; i < count; ++i) {
MemberOffset offset = MemberOffset(pos);
visitor(this, offset, true);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index fadf80ebcf..f29ba73d56 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -36,24 +36,24 @@
namespace art {
namespace mirror {
-Class* Class::java_lang_Class_ = nullptr;
+GcRoot<Class> Class::java_lang_Class_;
void Class::SetClassClass(Class* java_lang_Class) {
- CHECK(java_lang_Class_ == nullptr)
- << ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(&java_lang_Class_)
+ CHECK(java_lang_Class_.IsNull())
+ << java_lang_Class_.Read()
<< " " << java_lang_Class;
CHECK(java_lang_Class != nullptr);
- java_lang_Class_ = java_lang_Class;
+ java_lang_Class_ = GcRoot<Class>(java_lang_Class);
}
void Class::ResetClass() {
- CHECK(java_lang_Class_ != nullptr);
- java_lang_Class_ = nullptr;
+ CHECK(!java_lang_Class_.IsNull());
+ java_lang_Class_ = GcRoot<Class>(nullptr);
}
void Class::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_Class_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_Class_), arg, 0, kRootStickyClass);
+ if (!java_lang_Class_.IsNull()) {
+ java_lang_Class_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
@@ -827,39 +827,67 @@ void Class::PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_
}
table = GetVTableDuringLinking();
- CHECK(table != nullptr);
+ CHECK(table != nullptr) << PrettyClass(this);
+ SetEmbeddedVTableLength(table->GetLength());
for (int32_t i = 0; i < table->GetLength(); i++) {
SetEmbeddedVTableEntry(i, table->Get(i));
}
+
+ SetImTable(nullptr);
+ // Keep java.lang.Object class's vtable around for since it's easier
+ // to be reused by array classes during their linking.
+ if (!IsObjectClass()) {
+ SetVTable(nullptr);
+ }
}
+// The pre-fence visitor for Class::CopyOf().
+class CopyClassVisitor {
+ public:
+ explicit CopyClassVisitor(Thread* self, Handle<mirror::Class>* orig,
+ size_t new_length, size_t copy_bytes)
+ : self_(self), orig_(orig), new_length_(new_length),
+ copy_bytes_(copy_bytes) {
+ }
+
+ void operator()(Object* obj, size_t usable_size) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(usable_size);
+ mirror::Class* new_class_obj = obj->AsClass();
+ mirror::Object::CopyObject(self_, new_class_obj, orig_->Get(), copy_bytes_);
+ new_class_obj->SetStatus(Class::kStatusResolving, self_);
+ new_class_obj->PopulateEmbeddedImtAndVTable();
+ new_class_obj->SetClassSize(new_length_);
+ }
+
+ private:
+ Thread* const self_;
+ Handle<mirror::Class>* const orig_;
+ const size_t new_length_;
+ const size_t copy_bytes_;
+ DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
+};
+
Class* Class::CopyOf(Thread* self, int32_t new_length) {
DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
// We may get copied by a compacting GC.
StackHandleScope<1> hs(self);
Handle<mirror::Class> h_this(hs.NewHandle(this));
gc::Heap* heap = Runtime::Current()->GetHeap();
- InitializeClassVisitor visitor(new_length);
+ // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
+ // to skip copying the tail part that we will overwrite here.
+ CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class));
mirror::Object* new_class =
- kMovingClasses ? heap->AllocObject<true>(self, java_lang_Class_, new_length, visitor)
- : heap->AllocNonMovableObject<true>(self, java_lang_Class_, new_length, visitor);
+ kMovingClasses
+ ? heap->AllocObject<true>(self, java_lang_Class_.Read(), new_length, visitor)
+ : heap->AllocNonMovableObject<true>(self, java_lang_Class_.Read(), new_length, visitor);
if (UNLIKELY(new_class == nullptr)) {
CHECK(self->IsExceptionPending()); // Expect an OOME.
return NULL;
}
- mirror::Class* new_class_obj = new_class->AsClass();
- memcpy(new_class_obj, h_this.Get(), sizeof(Class));
-
- new_class_obj->SetStatus(kStatusResolving, self);
- new_class_obj->PopulateEmbeddedImtAndVTable();
- // Correct some fields.
- new_class_obj->SetLockWord(LockWord(), false);
- new_class_obj->SetClassSize(new_length);
-
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(new_class_obj);
- return new_class_obj;
+ return new_class->AsClass();
}
} // namespace mirror
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 648bddeca5..519685a92b 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_CLASS_H_
#include "dex_file.h"
+#include "gc_root.h"
#include "gc/allocator_type.h"
#include "invoke_type.h"
#include "modifiers.h"
@@ -25,7 +26,7 @@
#include "object_array.h"
#include "object_callbacks.h"
#include "primitive.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
/*
* A magic value for refOffsets. Ignore the bits and walk the super
@@ -448,8 +449,14 @@ class MANAGED Class FINAL : public Object {
bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == NULL;
}
+
+ bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
+ }
+
bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || ((IsAbstract()) && IsArrayClass());
+ return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
+ ((IsAbstract()) && IsArrayClass());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -692,18 +699,34 @@ class MANAGED Class FINAL : public Object {
return MemberOffset(sizeof(Class));
}
- static MemberOffset EmbeddedVTableOffset() {
+ static MemberOffset EmbeddedVTableLengthOffset() {
return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry));
}
+ static MemberOffset EmbeddedVTableOffset() {
+ return MemberOffset(sizeof(Class) + kImtSize * sizeof(mirror::Class::ImTableEntry) + sizeof(int32_t));
+ }
+
bool ShouldHaveEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsInstantiable();
}
+ bool HasVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
ArtMethod* GetEmbeddedImTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetEmbeddedImTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ int32_t GetVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* GetVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ int32_t GetEmbeddedVTableLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void SetEmbeddedVTableLength(int32_t len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ ArtMethod* GetEmbeddedVTableEntry(uint32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PopulateEmbeddedImtAndVTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -920,9 +943,8 @@ class MANAGED Class FINAL : public Object {
}
static Class* GetJavaLangClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_Class_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_Class_);
+ DCHECK(!java_lang_Class_.IsNull());
+ return java_lang_Class_.Read();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
@@ -1140,7 +1162,7 @@ class MANAGED Class FINAL : public Object {
uint32_t fields_[0];
// java.lang.Class
- static Class* java_lang_Class_;
+ static GcRoot<Class> java_lang_Class_;
friend struct art::ClassOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Class);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 08cff999b1..d3fcb550c6 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -19,6 +19,8 @@
#include "dex_cache.h"
+#include "base/logging.h"
+#include "mirror/class.h"
#include "runtime.h"
namespace art {
@@ -41,6 +43,12 @@ inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx)
}
}
+inline void DexCache::SetResolvedType(uint32_t type_idx, Class* resolved) {
+ // TODO default transaction support.
+ DCHECK(resolved == nullptr || !resolved->IsErroneous());
+ GetResolvedTypes()->Set(type_idx, resolved);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index bfd603a185..3c947ab37b 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -17,7 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_DEX_CACHE_H_
#define ART_RUNTIME_MIRROR_DEX_CACHE_H_
+#include "art_field.h"
#include "art_method.h"
+#include "class.h"
#include "object.h"
#include "object_array.h"
@@ -30,9 +32,6 @@ union JValue;
namespace mirror {
-class ArtField;
-class ArtMethod;
-class Class;
class String;
// C++ mirror of java.lang.DexCache.
@@ -103,11 +102,8 @@ class MANAGED DexCache FINAL : public Object {
return GetResolvedTypes()->Get(type_idx);
}
- void SetResolvedType(uint32_t type_idx, Class* resolved) ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO default transaction support.
- GetResolvedTypes()->Set(type_idx, resolved);
- }
+ void SetResolvedType(uint32_t type_idx, Class* resolved)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -118,7 +114,12 @@ class MANAGED DexCache FINAL : public Object {
ArtField* GetResolvedField(uint32_t field_idx) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetResolvedFields()->Get(field_idx);
+ ArtField* field = GetResolvedFields()->Get(field_idx);
+ if (UNLIKELY(field == nullptr || field->GetDeclaringClass()->IsErroneous())) {
+ return nullptr;
+ } else {
+ return field;
+ }
}
void SetResolvedField(uint32_t field_idx, ArtField* resolved) ALWAYS_INLINE
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 961bc64819..3543654868 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -65,8 +65,8 @@ class CopyReferenceFieldsWithReadBarrierVisitor {
Object* const dest_obj_;
};
-static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+Object* Object::CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+ size_t num_bytes) {
// Copy instance data. We assume memcpy copies by words.
// TODO: expose and use move32.
byte* src_bytes = reinterpret_cast<byte*>(src);
@@ -107,7 +107,7 @@ class CopyObjectVisitor {
void operator()(Object* obj, size_t usable_size) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
UNUSED(usable_size);
- CopyObject(self_, obj, orig_->Get(), num_bytes_);
+ Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
private:
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4fae4704bf..a6b622719e 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -370,6 +370,13 @@ class MANAGED LOCKABLE Object {
// Generate an identity hash code.
static int32_t GenerateIdentityHashCode();
+ // A utility function that copies an object in a read barrier and
+ // write barrier-aware way. This is internally used by Clone() and
+ // Class::CopyOf().
+ static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
+ size_t num_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// The Class representing the type of the object.
HeapReference<Class> klass_;
// Monitor and hash code information.
@@ -386,6 +393,8 @@ class MANAGED LOCKABLE Object {
friend class art::ImageWriter;
friend class art::Monitor;
friend struct art::ObjectOffsets; // for verifying offset information
+ friend class CopyObjectVisitor; // for CopyObject().
+ friend class CopyClassVisitor; // for CopyObject().
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index a7ea6c9c08..da3c36cb06 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -73,7 +73,12 @@ class ObjectTest : public CommonRuntimeTest {
}
};
-// Keep the assembly code in sync
+// Keep constants in sync.
+TEST_F(ObjectTest, Constants) {
+ EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::HeapReference<mirror::Object>));
+}
+
+// Keep the assembly code constats in sync.
TEST_F(ObjectTest, AsmConstants) {
EXPECT_EQ(CLASS_OFFSET, Object::ClassOffset().Int32Value());
EXPECT_EQ(LOCK_WORD_OFFSET, Object::MonitorOffset().Int32Value());
diff --git a/runtime/mirror/reference.cc b/runtime/mirror/reference.cc
index 077cd4b913..c36bd980f9 100644
--- a/runtime/mirror/reference.cc
+++ b/runtime/mirror/reference.cc
@@ -19,23 +19,22 @@
namespace art {
namespace mirror {
-Class* Reference::java_lang_ref_Reference_ = nullptr;
+GcRoot<Class> Reference::java_lang_ref_Reference_;
void Reference::SetClass(Class* java_lang_ref_Reference) {
- CHECK(java_lang_ref_Reference_ == nullptr);
+ CHECK(java_lang_ref_Reference_.IsNull());
CHECK(java_lang_ref_Reference != nullptr);
- java_lang_ref_Reference_ = java_lang_ref_Reference;
+ java_lang_ref_Reference_ = GcRoot<Class>(java_lang_ref_Reference);
}
void Reference::ResetClass() {
- CHECK(java_lang_ref_Reference_ != nullptr);
- java_lang_ref_Reference_ = nullptr;
+ CHECK(!java_lang_ref_Reference_.IsNull());
+ java_lang_ref_Reference_ = GcRoot<Class>(nullptr);
}
void Reference::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_ref_Reference_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_ref_Reference_),
- arg, 0, kRootStickyClass);
+ if (!java_lang_ref_Reference_.IsNull()) {
+ java_lang_ref_Reference_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 07d47d31e7..7345448ed7 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -18,9 +18,10 @@
#define ART_RUNTIME_MIRROR_REFERENCE_H_
#include "class.h"
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
#include "thread.h"
namespace art {
@@ -94,9 +95,8 @@ class MANAGED Reference : public Object {
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
static Class* GetJavaLangRefReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_ref_Reference_ != nullptr);
- return ReadBarrier::BarrierForRoot<mirror::Class, kReadBarrierOption>(
- &java_lang_ref_Reference_);
+ DCHECK(!java_lang_ref_Reference_.IsNull());
+ return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass(void);
@@ -114,7 +114,7 @@ class MANAGED Reference : public Object {
HeapReference<Reference> queue_next_; // Note this is Java volatile:
HeapReference<Object> referent_; // Note this is Java volatile:
- static Class* java_lang_ref_Reference_;
+ static GcRoot<Class> java_lang_ref_Reference_;
friend struct art::ReferenceOffsets; // for verifying offset information
friend class gc::ReferenceProcessor;
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index b1de2b6f7d..1eb20f71a6 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -26,17 +26,17 @@
namespace art {
namespace mirror {
-Class* StackTraceElement::java_lang_StackTraceElement_ = NULL;
+GcRoot<Class> StackTraceElement::java_lang_StackTraceElement_;
void StackTraceElement::SetClass(Class* java_lang_StackTraceElement) {
- CHECK(java_lang_StackTraceElement_ == NULL);
+ CHECK(java_lang_StackTraceElement_.IsNull());
CHECK(java_lang_StackTraceElement != NULL);
- java_lang_StackTraceElement_ = java_lang_StackTraceElement;
+ java_lang_StackTraceElement_ = GcRoot<Class>(java_lang_StackTraceElement);
}
void StackTraceElement::ResetClass() {
- CHECK(java_lang_StackTraceElement_ != NULL);
- java_lang_StackTraceElement_ = NULL;
+ CHECK(!java_lang_StackTraceElement_.IsNull());
+ java_lang_StackTraceElement_ = GcRoot<Class>(nullptr);
}
StackTraceElement* StackTraceElement::Alloc(Thread* self, Handle<String> declaring_class,
@@ -68,9 +68,8 @@ void StackTraceElement::Init(Handle<String> declaring_class, Handle<String> meth
}
void StackTraceElement::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_StackTraceElement_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_StackTraceElement_), arg, 0,
- kRootStickyClass);
+ if (!java_lang_StackTraceElement_.IsNull()) {
+ java_lang_StackTraceElement_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 52b0927756..70acd1ce55 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
#define ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -57,9 +57,8 @@ class MANAGED StackTraceElement FINAL : public Object {
static void VisitRoots(RootCallback* callback, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetStackTraceElement() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_StackTraceElement_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_StackTraceElement_);
+ DCHECK(!java_lang_StackTraceElement_.IsNull());
+ return java_lang_StackTraceElement_.Read();
}
private:
@@ -74,7 +73,7 @@ class MANAGED StackTraceElement FINAL : public Object {
int32_t line_number)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static Class* java_lang_StackTraceElement_;
+ static GcRoot<Class> java_lang_StackTraceElement_;
friend struct art::StackTraceElementOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(StackTraceElement);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 5c57dcef45..e81e4312e7 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -31,7 +31,7 @@ namespace art {
namespace mirror {
// TODO: get global references for these
-Class* String::java_lang_String_ = NULL;
+GcRoot<Class> String::java_lang_String_;
int32_t String::FastIndexOf(int32_t ch, int32_t start) {
int32_t count = GetLength();
@@ -52,14 +52,14 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
}
void String::SetClass(Class* java_lang_String) {
- CHECK(java_lang_String_ == NULL);
+ CHECK(java_lang_String_.IsNull());
CHECK(java_lang_String != NULL);
- java_lang_String_ = java_lang_String;
+ java_lang_String_ = GcRoot<Class>(java_lang_String);
}
void String::ResetClass() {
- CHECK(java_lang_String_ != NULL);
- java_lang_String_ = NULL;
+ CHECK(!java_lang_String_.IsNull());
+ java_lang_String_ = GcRoot<Class>(nullptr);
}
int32_t String::GetHashCode() {
@@ -233,8 +233,8 @@ int32_t String::CompareTo(String* rhs) {
}
void String::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_String_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_String_), arg, 0, kRootStickyClass);
+ if (!java_lang_String_.IsNull()) {
+ java_lang_String_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 46bdd59b5c..66a5dd827d 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -19,9 +19,9 @@
#include <gtest/gtest.h>
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
namespace art {
@@ -111,9 +111,8 @@ class MANAGED String FINAL : public Object {
int32_t CompareTo(String* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetJavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_String_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_String_);
+ DCHECK(!java_lang_String_.IsNull());
+ return java_lang_String_.Read();
}
static void SetClass(Class* java_lang_String);
@@ -160,7 +159,7 @@ class MANAGED String FINAL : public Object {
int32_t offset_;
- static Class* java_lang_String_;
+ static GcRoot<Class> java_lang_String_;
friend struct art::StringOffsets; // for verifying offset information
FRIEND_TEST(ObjectTest, StringLength); // for SetOffset and SetCount
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 1c3f1ed5bf..93ed4d4daf 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -30,7 +30,7 @@
namespace art {
namespace mirror {
-Class* Throwable::java_lang_Throwable_ = NULL;
+GcRoot<Class> Throwable::java_lang_Throwable_;
void Throwable::SetDetailMessage(String* new_detail_message) {
if (Runtime::Current()->IsActiveTransaction()) {
@@ -127,19 +127,19 @@ std::string Throwable::Dump() {
}
void Throwable::SetClass(Class* java_lang_Throwable) {
- CHECK(java_lang_Throwable_ == NULL);
+ CHECK(java_lang_Throwable_.IsNull());
CHECK(java_lang_Throwable != NULL);
- java_lang_Throwable_ = java_lang_Throwable;
+ java_lang_Throwable_ = GcRoot<Class>(java_lang_Throwable);
}
void Throwable::ResetClass() {
- CHECK(java_lang_Throwable_ != NULL);
- java_lang_Throwable_ = NULL;
+ CHECK(!java_lang_Throwable_.IsNull());
+ java_lang_Throwable_ = GcRoot<Class>(nullptr);
}
void Throwable::VisitRoots(RootCallback* callback, void* arg) {
- if (java_lang_Throwable_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&java_lang_Throwable_), arg, 0, kRootStickyClass);
+ if (!java_lang_Throwable_.IsNull()) {
+ java_lang_Throwable_.VisitRoot(callback, arg, 0, kRootStickyClass);
}
}
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index cf54ad69a9..f90812d2ec 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_MIRROR_THROWABLE_H_
#define ART_RUNTIME_MIRROR_THROWABLE_H_
+#include "gc_root.h"
#include "object.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
#include "string.h"
namespace art {
@@ -47,9 +47,8 @@ class MANAGED Throwable : public Object {
bool IsCheckedException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static Class* GetJavaLangThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(java_lang_Throwable_ != NULL);
- return ReadBarrier::BarrierForRoot<mirror::Class, kWithReadBarrier>(
- &java_lang_Throwable_);
+ DCHECK(!java_lang_Throwable_.IsNull());
+ return java_lang_Throwable_.Read();
}
static void SetClass(Class* java_lang_Throwable);
@@ -72,7 +71,7 @@ class MANAGED Throwable : public Object {
HeapReference<Object> stack_trace_;
HeapReference<Object> suppressed_exceptions_;
- static Class* java_lang_Throwable_;
+ static GcRoot<Class> java_lang_Throwable_;
friend struct art::ThrowableOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(Throwable);
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 4b26edac61..433c1b2d6d 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -84,7 +84,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
num_waiters_(0),
owner_(owner),
lock_count_(0),
- obj_(obj),
+ obj_(GcRoot<mirror::Object>(obj)),
wait_set_(NULL),
hash_code_(hash_code),
locking_method_(NULL),
@@ -107,7 +107,7 @@ Monitor::Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_
num_waiters_(0),
owner_(owner),
lock_count_(0),
- obj_(obj),
+ obj_(GcRoot<mirror::Object>(obj)),
wait_set_(NULL),
hash_code_(hash_code),
locking_method_(NULL),
@@ -165,7 +165,9 @@ bool Monitor::Install(Thread* self) {
bool success = GetObject()->CasLockWordWeakSequentiallyConsistent(lw, fat);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
- locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
+ // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
+ // abort.
+ locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_, false);
}
return success;
}
@@ -223,7 +225,7 @@ void Monitor::RemoveFromWaitSet(Thread *thread) {
}
void Monitor::SetObject(mirror::Object* object) {
- obj_ = object;
+ obj_ = GcRoot<mirror::Object>(object);
}
void Monitor::Lock(Thread* self) {
@@ -634,7 +636,7 @@ bool Monitor::Deflate(Thread* self, mirror::Object* obj) {
}
// The monitor is deflated, mark the object as nullptr so that we know to delete it during the
// next GC.
- monitor->obj_ = nullptr;
+ monitor->obj_ = GcRoot<mirror::Object>(nullptr);
}
return true;
}
@@ -680,6 +682,8 @@ void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWo
Thread* owner;
{
ScopedThreadStateChange tsc(self, kBlocked);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
owner = thread_list->SuspendThreadByThreadId(owner_thread_id, false, &timed_out);
}
if (owner != nullptr) {
@@ -745,10 +749,10 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) {
contention_count++;
Runtime* runtime = Runtime::Current();
if (contention_count <= runtime->GetMaxSpinsBeforeThinkLockInflation()) {
- // TODO: Consider switch thread state to kBlocked when we are yielding.
+ // TODO: Consider switching the thread state to kBlocked when we are yielding.
// Use sched_yield instead of NanoSleep since NanoSleep can wait much longer than the
// parameter you pass in. This can cause thread suspension to take excessively long
- // make long pauses. See b/16307460.
+ // and make long pauses. See b/16307460.
sched_yield();
} else {
contention_count = 0;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 0d0ad0b13d..26d43c953b 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -26,8 +26,9 @@
#include "atomic.h"
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
-#include "read_barrier.h"
+#include "read_barrier_option.h"
#include "thread_state.h"
namespace art {
@@ -95,7 +96,7 @@ class Monitor {
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ReadBarrier::BarrierForRoot<mirror::Object, kReadBarrierOption>(&obj_);
+ return obj_.Read<kReadBarrierOption>();
}
void SetObject(mirror::Object* object);
@@ -197,7 +198,7 @@ class Monitor {
// What object are we part of. This is a weak root. Do not access
// this directly, use GetObject() to read it so it will be guarded
// by a read barrier.
- mirror::Object* obj_;
+ GcRoot<mirror::Object> obj_;
// Threads currently waiting on this monitor.
Thread* wait_set_ GUARDED_BY(monitor_lock_);
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index 440a6be07b..4964aa06c5 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -52,7 +52,7 @@ void MonitorPool::AllocateChunk() {
monitor_chunks_.StoreRelaxed(new_backing);
capacity_ = new_capacity;
old_chunk_arrays_.push_back(old_backing);
- LOG(INFO) << "Resizing to capacity " << capacity_;
+ VLOG(monitor) << "Resizing to capacity " << capacity_;
}
}
@@ -64,7 +64,7 @@ void MonitorPool::AllocateChunk() {
CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment);
// Add the chunk.
- *(monitor_chunks_.LoadRelaxed()+num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
+ *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast<uintptr_t>(chunk);
num_chunks_++;
// Set up the free list
@@ -96,7 +96,7 @@ Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::O
// Enough space, or need to resize?
if (first_free_ == nullptr) {
- LOG(INFO) << "Allocating a new chunk.";
+ VLOG(monitor) << "Allocating a new chunk.";
AllocateChunk();
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 440d3d0b59..c3304e6670 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -275,8 +275,96 @@ static void CopyProfileFile(const char* oldfile, const char* newfile) {
}
}
-static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
+// Java: dalvik.system.DexFile.UP_TO_DATE
+static const jbyte kUpToDate = 0;
+// Java: dalvik.system.DexFile.DEXOPT_NEEDED
+static const jbyte kPatchoatNeeded = 1;
+// Java: dalvik.system.DexFile.PATCHOAT_NEEDED
+static const jbyte kDexoptNeeded = 2;
+
+template <const bool kVerboseLogging, const bool kReasonLogging>
+static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
+ InstructionSet target_instruction_set) {
+ std::string error_msg;
+ std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
+ false, &error_msg));
+ if (oat_file.get() == nullptr) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
+ << "' for file location '" << filename << "': " << error_msg;
+ }
+ error_msg.clear();
+ return kDexoptNeeded;
+ }
+ bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+ uint32_t location_checksum = 0;
+ const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
+ kReasonLogging);
+ if (oat_dex_file != nullptr) {
+ // If its not possible to read the classes.dex assume up-to-date as we won't be able to
+ // compile it anyway.
+ if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded found precompiled stripped file: "
+ << filename << " for " << oat_filename << ": " << error_msg;
+ }
+ if (ClassLinker::VerifyOatChecksums(oat_file.get(), target_instruction_set, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is up-to-date for " << filename;
+ }
+ return kUpToDate;
+ } else if (should_relocate_if_possible &&
+ ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " needs to be relocated for " << filename;
+ }
+ return kPatchoatNeeded;
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is out of date for " << filename;
+ }
+ return kDexoptNeeded;
+ }
+ // If we get here the file is out of date and we should use the system one to relocate.
+ } else {
+ if (ClassLinker::VerifyOatAndDexFileChecksums(oat_file.get(), filename, location_checksum,
+ target_instruction_set, &error_msg)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is up-to-date for " << filename;
+ }
+ return kUpToDate;
+ } else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
+ && should_relocate_if_possible
+ && ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " needs to be relocated for " << filename;
+ }
+ return kPatchoatNeeded;
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " is out of date for " << filename;
+ }
+ return kDexoptNeeded;
+ }
+ }
+ } else {
+ if (kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
+ << " does not contain " << filename;
+ }
+ return kDexoptNeeded;
+ }
+}
+
+static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
const char* pkgname, const char* instruction_set, const jboolean defer) {
+ // TODO disable this logging.
const bool kVerboseLogging = false; // Spammy logging.
const bool kReasonLogging = true; // Logging of reason for returning JNI_TRUE.
@@ -285,7 +373,7 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
ScopedLocalRef<jclass> fnfe(env, env->FindClass("java/io/FileNotFoundException"));
const char* message = (filename == nullptr) ? "<empty file name>" : filename;
env->ThrowNew(fnfe.get(), message);
- return JNI_FALSE;
+ return kUpToDate;
}
// Always treat elements of the bootclasspath as up-to-date. The
@@ -301,78 +389,45 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
if (kVerboseLogging) {
LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename;
}
- return JNI_FALSE;
+ return kUpToDate;
}
}
- const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
-
- // Check if we have an odex file next to the dex file.
- std::string odex_filename(DexFilenameToOdexFilename(filename, kRuntimeISA));
- std::string error_msg;
- std::unique_ptr<const OatFile> oat_file(OatFile::Open(odex_filename, odex_filename, NULL, false,
- &error_msg));
- if (oat_file.get() == nullptr) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << filename
- << "': " << error_msg;
- }
- error_msg.clear();
- } else {
- const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, NULL,
- kReasonLogging);
- if (oat_dex_file != nullptr) {
- uint32_t location_checksum;
- // If its not possible to read the classes.dex assume up-to-date as we won't be able to
- // compile it anyway.
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded ignoring precompiled stripped file: "
- << filename << ": " << error_msg;
- }
- return JNI_FALSE;
- }
- if (ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set,
- &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded precompiled file " << odex_filename
- << " has an up-to-date checksum compared to " << filename;
- }
- return JNI_FALSE;
- } else {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded found precompiled file " << odex_filename
- << " with an out-of-date checksum compared to " << filename
- << ": " << error_msg;
- }
- error_msg.clear();
- }
- }
- }
+ bool force_system_only = false;
+ bool require_system_version = false;
// Check the profile file. We need to rerun dex2oat if the profile has changed significantly
// since the last time, or it's new.
// If the 'defer' argument is true then this will be retried later. In this case we
// need to make sure that the profile file copy is not made so that we will get the
// same result second time.
+ std::string profile_file;
+ std::string prev_profile_file;
+ bool should_copy_profile = false;
if (Runtime::Current()->GetProfilerOptions().IsEnabled() && (pkgname != nullptr)) {
- const std::string profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
+ profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
+ std::string("/") + pkgname;
- const std::string prev_profile_file = profile_file + std::string("@old");
+ prev_profile_file = profile_file + std::string("@old");
struct stat profstat, prevstat;
int e1 = stat(profile_file.c_str(), &profstat);
+ int e1_errno = errno;
int e2 = stat(prev_profile_file.c_str(), &prevstat);
+ int e2_errno = errno;
if (e1 < 0) {
- // No profile file, need to run dex2oat
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded profile file " << profile_file << " doesn't exist";
+ if (e1_errno != EACCES) {
+ // No profile file, need to run dex2oat, unless we find a file in system
+ if (kReasonLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal profile file " << profile_file << " doesn't exist. "
+ << "Will check odex to see if we can find a working version.";
+ }
+ // Force it to only accept system files/files with versions in system.
+ require_system_version = true;
+ } else {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal recieved EACCES trying to stat profile file "
+ << profile_file;
}
- return JNI_TRUE;
- }
-
- if (e2 == 0) {
+ } else if (e2 == 0) {
// There is a previous profile file. Check if the profile has changed significantly.
// A change in profile is considered significant if X% (change_thr property) of the top K%
// (compile_thr property) samples has changed.
@@ -384,7 +439,7 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
bool old_ok = old_profile.LoadFile(prev_profile_file);
if (!new_ok || !old_ok) {
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded Ignoring invalid profiles: "
+ LOG(INFO) << "DexFile_isDexOptNeededInternal Ignoring invalid profiles: "
<< (new_ok ? "" : profile_file) << " " << (old_ok ? "" : prev_profile_file);
}
} else {
@@ -393,7 +448,7 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
old_profile.GetTopKSamples(old_top_k, top_k_threshold);
if (new_top_k.empty()) {
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded empty profile: " << profile_file;
+ LOG(INFO) << "DexFile_isDexOptNeededInternal empty profile: " << profile_file;
}
// If the new topK is empty we shouldn't optimize so we leave the change_percent at 0.0.
} else {
@@ -405,7 +460,7 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
if (kVerboseLogging) {
std::set<std::string>::iterator end = diff.end();
for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
- LOG(INFO) << "DexFile_isDexOptNeeded new in topK: " << *it;
+ LOG(INFO) << "DexFile_isDexOptNeededInternal new in topK: " << *it;
}
}
}
@@ -413,67 +468,85 @@ static jboolean IsDexOptNeededInternal(JNIEnv* env, const char* filename,
if (change_percent > change_threshold) {
if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded size of new profile file " << profile_file <<
+ LOG(INFO) << "DexFile_isDexOptNeededInternal size of new profile file " << profile_file <<
" is significantly different from old profile file " << prev_profile_file << " (top "
<< top_k_threshold << "% samples changed in proportion of " << change_percent << "%)";
}
- if (!defer) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
- }
- return JNI_TRUE;
+ should_copy_profile = !defer;
+ // Force us to only accept system files.
+ force_system_only = true;
}
- } else {
+ } else if (e2_errno == ENOENT) {
// Previous profile does not exist. Make a copy of the current one.
if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded previous profile doesn't exist: " << prev_profile_file;
- }
- if (!defer) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
+ LOG(INFO) << "DexFile_isDexOptNeededInternal previous profile doesn't exist: " << prev_profile_file;
}
+ should_copy_profile = !defer;
+ } else {
+ PLOG(INFO) << "Unable to stat previous profile file " << prev_profile_file;
}
}
- // Check if we have an oat file in the cache
- const std::string cache_dir(GetDalvikCacheOrDie(instruction_set));
- const std::string cache_location(
- GetDalvikCacheFilenameOrDie(filename, cache_dir.c_str()));
- oat_file.reset(OatFile::Open(cache_location, filename, NULL, false, &error_msg));
- if (oat_file.get() == nullptr) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " does not exist for " << filename << ": " << error_msg;
+ const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
+
+ // Get the filename for odex file next to the dex file.
+ std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
+ // Get the filename for the dalvik-cache file
+ std::string cache_dir;
+ bool have_android_data = false;
+ bool dalvik_cache_exists = false;
+ GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists);
+ std::string cache_filename; // was cache_location
+ bool have_cache_filename = false;
+ if (dalvik_cache_exists) {
+ std::string error_msg;
+ have_cache_filename = GetDalvikCacheFilename(filename, cache_dir.c_str(), &cache_filename,
+ &error_msg);
+ if (!have_cache_filename && kVerboseLogging) {
+ LOG(INFO) << "DexFile_isDexOptNeededInternal failed to find cache file for dex file " << filename
+ << ": " << error_msg;
}
- return JNI_TRUE;
}
- uint32_t location_checksum;
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kReasonLogging) {
- LOG(ERROR) << "DexFile_isDexOptNeeded failed to compute checksum of " << filename
- << " (error " << error_msg << ")";
+ bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
+
+ jbyte dalvik_cache_decision = -1;
+ // Lets try the cache first (since we want to load from there since thats where the relocated
+ // versions will be).
+ if (have_cache_filename && !force_system_only) {
+ // We can use the dalvik-cache if we find a good file.
+ dalvik_cache_decision =
+ IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename,
+ target_instruction_set);
+ // We will only return DexOptNeeded if both the cache and system return it.
+ if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
+ CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
+ << "May not return PatchoatNeeded when patching is disabled.";
+ return dalvik_cache_decision;
}
- return JNI_TRUE;
+ // We couldn't find one thats easy. We should now try the system.
}
- if (!ClassLinker::VerifyOatFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set, &error_msg)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " has out-of-date checksum compared to " << filename
- << " (error " << error_msg << ")";
- }
- return JNI_TRUE;
+ jbyte system_decision =
+ IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename,
+ target_instruction_set);
+ CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
+ << "May not return PatchoatNeeded when patching is disabled.";
+
+ if (require_system_version && system_decision == kPatchoatNeeded
+ && dalvik_cache_decision == kUpToDate) {
+ // We have a version from system relocated to the cache. Return it.
+ return dalvik_cache_decision;
}
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
- << " is up-to-date for " << filename;
+ if (should_copy_profile && system_decision == kDexoptNeeded) {
+ CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
}
- CHECK(error_msg.empty()) << error_msg;
- return JNI_FALSE;
+
+ return system_decision;
}
-static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
+static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
NullableScopedUtfChars pkgname(env, javaPkgname);
@@ -487,8 +560,8 @@ static jboolean DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring java
static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename) {
const char* instruction_set = GetInstructionSetString(kRuntimeISA);
ScopedUtfChars filename(env, javaFilename);
- return IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
- instruction_set, false /* defer */);
+ return kUpToDate != IsDexOptNeededInternal(env, filename.c_str(), nullptr /* pkgname */,
+ instruction_set, false /* defer */);
}
@@ -497,7 +570,7 @@ static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, defineClassNative, "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
NATIVE_METHOD(DexFile, getClassNameList, "(J)[Ljava/lang/String;"),
NATIVE_METHOD(DexFile, isDexOptNeeded, "(Ljava/lang/String;)Z"),
- NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)Z"),
+ NATIVE_METHOD(DexFile, isDexOptNeededInternal, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Z)B"),
NATIVE_METHOD(DexFile, openDexFileNative, "(Ljava/lang/String;Ljava/lang/String;I)J"),
};
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index b0b64aac16..a9ef8fc32e 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -167,7 +167,7 @@ static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->check_jni ? JNI_TRUE : JNI_FALSE;
+ return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index cf310648e5..5f718ba213 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -35,7 +35,12 @@ static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject p
// Suspend thread to build stack trace.
soa.Self()->TransitionFromRunnableToSuspended(kNative);
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != nullptr) {
// Must be runnable to create returned array.
CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kNative);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 820bd0420f..df6055dac3 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -17,6 +17,7 @@
#include <stdlib.h>
#include "debugger.h"
+#include "java_vm_ext.h"
#include "jni_internal.h"
#include "JNIHelp.h"
#include "thread-inl.h"
@@ -47,7 +48,7 @@ static void EnableDebugger() {
}
static void EnableDebugFeatures(uint32_t debug_flags) {
- // Must match values in dalvik.system.Zygote.
+ // Must match values in com.android.internal.os.Zygote.
enum {
DEBUG_ENABLE_DEBUGGER = 1,
DEBUG_ENABLE_CHECKJNI = 1 << 1,
@@ -59,7 +60,7 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
if ((debug_flags & DEBUG_ENABLE_CHECKJNI) != 0) {
Runtime* runtime = Runtime::Current();
JavaVMExt* vm = runtime->GetJavaVM();
- if (!vm->check_jni) {
+ if (!vm->IsCheckJniEnabled()) {
LOG(INFO) << "Late-enabling -Xcheck:jni";
vm->SetCheckJniEnabled(true);
// There's only one thread running at this point, so only one JNIEnv to fix up.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index e577c2c960..124bdf5475 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -71,7 +71,10 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
jthrowable cnfe = reinterpret_cast<jthrowable>(env->NewObject(WellKnownClasses::java_lang_ClassNotFoundException,
WellKnownClasses::java_lang_ClassNotFoundException_init,
javaName, cause.get()));
- env->Throw(cnfe);
+ if (cnfe != nullptr) {
+ // Make sure allocation didn't fail with an OOME.
+ env->Throw(cnfe);
+ }
return nullptr;
}
if (initialize) {
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
index 51cd5b80d5..c1c6c26047 100644
--- a/runtime/native/java_lang_DexCache.cc
+++ b/runtime/native/java_lang_DexCache.cc
@@ -15,6 +15,7 @@
*/
#include "dex_file.h"
+#include "jni_internal.h"
#include "mirror/dex_cache.h"
#include "mirror/object-inl.h"
#include "scoped_fast_native_object_access.h"
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index 496a1b251e..a85eec7464 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -38,11 +38,15 @@ static void Runtime_gc(JNIEnv*, jclass) {
}
static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
+ LOG(INFO) << "System.exit called, status: " << status;
Runtime::Current()->CallExitHook(status);
exit(status);
}
-static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader, jstring javaLdLibraryPath) {
+static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
+ jstring javaLdLibraryPath) {
+ // TODO: returns NULL on success or an error message describing the failure on failure. This
+ // should be refactored in terms of suppressed exceptions.
ScopedUtfChars filename(env, javaFilename);
if (filename.c_str() == NULL) {
return NULL;
@@ -63,14 +67,10 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
}
}
- std::string detail;
+ std::string error_msg;
{
- ScopedObjectAccess soa(env);
- StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ClassLoader> classLoader(
- hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
JavaVMExt* vm = Runtime::Current()->GetJavaVM();
- bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, &detail);
+ bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader, &error_msg);
if (success) {
return nullptr;
}
@@ -78,7 +78,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
// Don't let a pending exception from JNI_OnLoad cause a CheckJNI issue with NewStringUTF.
env->ExceptionClear();
- return env->NewStringUTF(detail.c_str());
+ return env->NewStringUTF(error_msg.c_str());
}
static jlong Runtime_maxMemory(JNIEnv*, jclass) {
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index bae67f20e8..8f83f96318 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -116,18 +116,25 @@ static void Thread_nativeInterrupt(JNIEnv* env, jobject java_thread) {
static void Thread_nativeSetName(JNIEnv* env, jobject peer, jstring java_name) {
ScopedUtfChars name(env, java_name);
+ Thread* self;
{
ScopedObjectAccess soa(env);
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
soa.Self()->SetThreadName(name.c_str());
return;
}
+ self = soa.Self();
}
// Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
// thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock
// in the DDMS send code.
bool timed_out;
- Thread* thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ Thread* thread;
+ {
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+ }
if (thread != NULL) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc
index f2b8a03c13..fefddae761 100644
--- a/runtime/native/java_lang_VMClassLoader.cc
+++ b/runtime/native/java_lang_VMClassLoader.cc
@@ -62,25 +62,28 @@ static jint VMClassLoader_getBootClassPathSize(JNIEnv*, jclass) {
*/
static jstring VMClassLoader_getBootClassPathResource(JNIEnv* env, jclass, jstring javaName, jint index) {
ScopedUtfChars name(env, javaName);
- if (name.c_str() == NULL) {
- return NULL;
+ if (name.c_str() == nullptr) {
+ return nullptr;
}
const std::vector<const DexFile*>& path = Runtime::Current()->GetClassLinker()->GetBootClassPath();
if (index < 0 || size_t(index) >= path.size()) {
- return NULL;
+ return nullptr;
}
const DexFile* dex_file = path[index];
- const std::string& location(dex_file->GetLocation());
+
+ // For multidex locations, e.g., x.jar:classes2.dex, we want to look into x.jar.
+ const std::string& location(dex_file->GetBaseLocation());
+
std::string error_msg;
std::unique_ptr<ZipArchive> zip_archive(ZipArchive::Open(location.c_str(), &error_msg));
if (zip_archive.get() == nullptr) {
LOG(WARNING) << "Failed to open zip archive '" << location << "': " << error_msg;
- return NULL;
+ return nullptr;
}
std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(name.c_str(), &error_msg));
- if (zip_entry.get() == NULL) {
- return NULL;
+ if (zip_entry.get() == nullptr) {
+ return nullptr;
}
std::string url;
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
index 163ae20628..8b2aecbbb1 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
@@ -16,6 +16,7 @@
#include "base/logging.h"
#include "debugger.h"
+#include "jni_internal.h"
#include "scoped_fast_native_object_access.h"
#include "ScopedPrimitiveArray.h"
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index e17e60a7ce..45ef9ae727 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -61,7 +61,12 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th
}
// Suspend thread to build stack trace.
- Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ Thread* thread;
+ {
+ // Take suspend thread lock to avoid races with threads trying to suspend this one.
+ MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
+ thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
+ }
if (thread != nullptr) {
{
ScopedObjectAccess soa(env);
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
new file mode 100644
index 0000000000..d0b516bf35
--- /dev/null
+++ b/runtime/native_bridge.cc
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_bridge.h"
+
+#include <dlfcn.h>
+#include <stdio.h>
+#include "jni.h"
+
+#include "base/mutex.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "thread.h"
+
+#ifdef HAVE_ANDROID_OS
+#include "cutils/properties.h"
+#endif
+
+
+namespace art {
+
+// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
+static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
+
+// The library name we are supposed to load.
+static std::string native_bridge_library_string = "";
+
+// Whether a native bridge is available (loaded and ready).
+static bool available = false;
+// Whether we have already initialized (or tried to).
+static bool initialized = false;
+
+struct NativeBridgeCallbacks;
+static NativeBridgeCallbacks* callbacks = nullptr;
+
+// ART interfaces to native-bridge.
+struct NativeBridgeArtCallbacks {
+ // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // mid [IN] Java methodID.
+ // Returns:
+ // short descriptor for method.
+ const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
+
+ // Get number of native methods for specified class.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // clazz [IN] Java class object.
+ // Returns:
+ // number of native methods.
+ uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+
+ // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
+ // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
+ //
+ // Parameters:
+ // env [IN] pointer to JNIenv.
+ // clazz [IN] Java class object.
+ // methods [OUT] array of method with the name, shorty, and fnPtr.
+ // method_count [IN] max number of elements in methods.
+ // Returns:
+ // number of method it actually wrote to methods.
+ uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count);
+};
+
+// Native-bridge interfaces to ART
+struct NativeBridgeCallbacks {
+ // Initialize native-bridge. Native-bridge's internal implementation must ensure MT safety and
+ // that the native-bridge is initialized only once. Thus it is OK to call this interface for an
+ // already initialized native-bridge.
+ //
+ // Parameters:
+ // art_cbs [IN] the pointer to NativeBridgeArtCallbacks.
+ // Returns:
+ // true iff initialization was successful.
+ bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
+
+ // Load a shared library that is supported by the native-bridge.
+ //
+ // Parameters:
+ // libpath [IN] path to the shared library
+ // flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
+ // Returns:
+ // The opaque handle of the shared library if sucessful, otherwise NULL
+ void* (*loadLibrary)(const char* libpath, int flag);
+
+ // Get a native-bridge trampoline for specified native method. The trampoline has same
+ // sigature as the native method.
+ //
+ // Parameters:
+ // handle [IN] the handle returned from loadLibrary
+ // shorty [IN] short descriptor of native method
+ // len [IN] length of shorty
+ // Returns:
+ // address of trampoline if successful, otherwise NULL
+ void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
+
+ // Check whether native library is valid and is for an ABI that is supported by native-bridge.
+ //
+ // Parameters:
+ // libpath [IN] path to the shared library
+ // Returns:
+ // TRUE if library is supported by native-bridge, FALSE otherwise
+ bool (*isSupported)(const char* libpath);
+};
+
+static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> scope(soa.Self());
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ MethodHelper mh(scope.NewHandle(m));
+ return mh.GetShorty();
+}
+
+static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+ if (clazz == nullptr)
+ return 0;
+
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t native_method_count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ return native_method_count;
+}
+
+static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
+ if ((clazz == nullptr) || (methods == nullptr)) {
+ return 0;
+ }
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ return count;
+}
+
+static NativeBridgeArtCallbacks NativeBridgeArtItf = {
+ GetMethodShorty,
+ GetNativeMethodCount,
+ GetNativeMethods
+};
+
+void SetNativeBridgeLibraryString(const std::string& nb_library_string) {
+ // This is called when the runtime starts and nothing is working concurrently
+ // so we don't need a lock here.
+
+ native_bridge_library_string = nb_library_string;
+
+ if (native_bridge_library_string.empty()) {
+ initialized = true;
+ available = false;
+ }
+}
+
+static bool NativeBridgeInitialize() {
+ // TODO: Missing annotalysis static lock ordering of DEFAULT_MUTEX_ACQUIRED, place lock into
+ // global order or remove.
+ static Mutex lock("native bridge lock");
+ MutexLock mu(Thread::Current(), lock);
+
+ if (initialized) {
+ // Somebody did it before.
+ return available;
+ }
+
+ available = false;
+
+ void* handle = dlopen(native_bridge_library_string.c_str(), RTLD_LAZY);
+ if (handle != nullptr) {
+ callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
+ kNativeBridgeInterfaceSymbol));
+
+ if (callbacks != nullptr) {
+ available = callbacks->initialize(&NativeBridgeArtItf);
+ }
+
+ if (!available) {
+ dlclose(handle);
+ }
+ }
+
+ initialized = true;
+
+ return available;
+}
+
+void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->loadLibrary(libpath, flag);
+ }
+ return nullptr;
+}
+
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
+ uint32_t len) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->getTrampoline(handle, name, shorty, len);
+ }
+ return nullptr;
+}
+
+bool NativeBridgeIsSupported(const char* libpath) {
+ if (NativeBridgeInitialize()) {
+ return callbacks->isSupported(libpath);
+ }
+ return false;
+}
+
+}; // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
new file mode 100644
index 0000000000..be647fc1eb
--- /dev/null
+++ b/runtime/native_bridge.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
+#define ART_RUNTIME_NATIVE_BRIDGE_H_
+
+#include <string>
+
+namespace art {
+
+// Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
+// signals that we do not want to load a native bridge.
+void SetNativeBridgeLibraryString(const std::string& native_bridge_library_string);
+
+// Load a shared library that is supported by the native-bridge.
+void* NativeBridgeLoadLibrary(const char* libpath, int flag);
+
+// Get a native-bridge trampoline for specified native method.
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
+
+// True if native library is valid and is for an ABI that is supported by native-bridge.
+bool NativeBridgeIsSupported(const char* libpath);
+
+}; // namespace art
+
+#endif // ART_RUNTIME_NATIVE_BRIDGE_H_
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index 65498deaab..e9ad3531b6 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -32,6 +32,11 @@ class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
void ClassRejected(ClassReference ref) OVERRIDE {}
+ // This is only used by compilers which need to be able to run without relocation even when it
+ // would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
+ // to disable the relocation since both deal with writing out the images directly.
+ bool IsRelocationPossible() OVERRIDE { return false; }
+
private:
DISALLOW_COPY_AND_ASSIGN(NoopCompilerCallbacks);
};
diff --git a/runtime/oat.cc b/runtime/oat.cc
index 1421baffcf..0a8c35b561 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -23,7 +23,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '3', '7', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '3', '8', '\0' };
static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
size_t estimate = 0U;
@@ -67,6 +67,8 @@ OatHeader::OatHeader(InstructionSet instruction_set,
const SafeMap<std::string, std::string>* variable_data) {
memcpy(magic_, kOatMagic, sizeof(kOatMagic));
memcpy(version_, kOatVersion, sizeof(kOatVersion));
+ executable_offset_ = 0;
+ image_patch_delta_ = 0;
adler32_checksum_ = adler32(0L, Z_NULL, 0);
@@ -98,7 +100,6 @@ OatHeader::OatHeader(InstructionSet instruction_set,
UpdateChecksum(&key_value_store_, key_value_store_size_);
}
- executable_offset_ = 0;
interpreter_to_interpreter_bridge_offset_ = 0;
interpreter_to_compiled_code_bridge_offset_ = 0;
jni_dlsym_lookup_offset_ = 0;
@@ -118,6 +119,12 @@ bool OatHeader::IsValid() const {
if (memcmp(version_, kOatVersion, sizeof(kOatVersion)) != 0) {
return false;
}
+ if (!IsAligned<kPageSize>(executable_offset_)) {
+ return false;
+ }
+ if (!IsAligned<kPageSize>(image_patch_delta_)) {
+ return false;
+ }
return true;
}
@@ -355,6 +362,26 @@ void OatHeader::SetQuickToInterpreterBridgeOffset(uint32_t offset) {
UpdateChecksum(&quick_to_interpreter_bridge_offset_, sizeof(offset));
}
+int32_t OatHeader::GetImagePatchDelta() const {
+ CHECK(IsValid());
+ return image_patch_delta_;
+}
+
+void OatHeader::RelocateOat(off_t delta) {
+ CHECK(IsValid());
+ CHECK_ALIGNED(delta, kPageSize);
+ image_patch_delta_ += delta;
+ if (image_file_location_oat_data_begin_ != 0) {
+ image_file_location_oat_data_begin_ += delta;
+ }
+}
+
+void OatHeader::SetImagePatchDelta(int32_t off) {
+ CHECK(IsValid());
+ CHECK_ALIGNED(off, kPageSize);
+ image_patch_delta_ = off;
+}
+
uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
CHECK(IsValid());
return image_file_location_oat_checksum_;
diff --git a/runtime/oat.h b/runtime/oat.h
index fbed596d33..6d5fefe2ce 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -88,6 +88,10 @@ class PACKED(4) OatHeader {
uint32_t GetQuickToInterpreterBridgeOffset() const;
void SetQuickToInterpreterBridgeOffset(uint32_t offset);
+ int32_t GetImagePatchDelta() const;
+ void RelocateOat(off_t delta);
+ void SetImagePatchDelta(int32_t off);
+
InstructionSet GetInstructionSet() const;
const InstructionSetFeatures& GetInstructionSetFeatures() const;
uint32_t GetImageFileLocationOatChecksum() const;
@@ -129,6 +133,9 @@ class PACKED(4) OatHeader {
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
+ // The amount that the image this oat is associated with has been patched.
+ int32_t image_patch_delta_;
+
uint32_t image_file_location_oat_checksum_;
uint32_t image_file_location_oat_data_begin_;
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 9cefcb6b1a..971daf8bbf 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,12 +18,12 @@
#include <dlfcn.h>
#include <sstream>
+#include <string.h>
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "elf_file.h"
-#include "implicit_check_options.h"
#include "oat.h"
#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
@@ -80,36 +80,7 @@ OatFile* OatFile::Open(const std::string& filename,
}
ret.reset(OpenElfFile(file.get(), location, requested_base, false, executable, error_msg));
}
-
- if (ret.get() == nullptr) {
- return nullptr;
- }
-
- // Embedded options check. Right now only implicit checks.
- // TODO: Refactor to somewhere else?
- const char* implicit_checks_value = ret->GetOatHeader().
- GetStoreValueByKey(ImplicitCheckOptions::kImplicitChecksOatHeaderKey);
-
- if (implicit_checks_value == nullptr) {
- *error_msg = "Did not find implicit checks value.";
- return nullptr;
- }
-
- bool explicit_null_checks, explicit_so_checks, explicit_suspend_checks;
- if (ImplicitCheckOptions::Parse(implicit_checks_value, &explicit_null_checks,
- &explicit_so_checks, &explicit_suspend_checks)) {
- // Check whether the runtime agrees with the recorded checks.
- if (ImplicitCheckOptions::CheckRuntimeSupport(executable, explicit_null_checks,
- explicit_so_checks, explicit_suspend_checks,
- error_msg)) {
- return ret.release();
- } else {
- return nullptr;
- }
- } else {
- *error_msg = "Failed parsing implicit check options.";
- return nullptr;
- }
+ return ret.release();
}
OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::string* error_msg) {
@@ -117,6 +88,11 @@ OatFile* OatFile::OpenWritable(File* file, const std::string& location, std::str
return OpenElfFile(file, location, NULL, true, false, error_msg);
}
+OatFile* OatFile::OpenReadable(File* file, const std::string& location, std::string* error_msg) {
+ CheckLocation(location);
+ return OpenElfFile(file, location, NULL, false, false, error_msg);
+}
+
OatFile* OatFile::OpenDlopen(const std::string& elf_filename,
const std::string& location,
byte* requested_base,
@@ -145,7 +121,8 @@ OatFile* OatFile::OpenElfFile(File* file,
}
OatFile::OatFile(const std::string& location)
- : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL) {
+ : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL),
+ secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -325,12 +302,12 @@ bool OatFile::Setup(std::string* error_msg) {
return false;
}
+ // Create the OatDexFile and add it to the owning map indexed by the dex file location.
OatDexFile* oat_dex_file = new OatDexFile(this,
dex_file_location,
dex_file_checksum,
dex_file_pointer,
methods_offsets_pointer);
- // Use a StringPiece backed by the oat_dex_file's internal std::string as the key.
StringPiece key(oat_dex_file->GetDexFileLocation());
oat_dex_files_.Put(key, oat_dex_file);
}
@@ -354,30 +331,79 @@ const byte* OatFile::End() const {
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
bool warn_if_not_found) const {
- Table::const_iterator it = oat_dex_files_.find(dex_location);
- if (it != oat_dex_files_.end()) {
- const OatFile::OatDexFile* oat_dex_file = it->second;
- if (dex_location_checksum == NULL ||
- oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum) {
- return oat_dex_file;
+ // NOTE: We assume here that the canonical location for a given dex_location never
+ // changes. If it does (i.e. some symlink used by the filename changes) we may return
+ // an incorrect OatDexFile. As long as we have a checksum to check, we shall return
+ // an identical file or fail; otherwise we may see some unpredictable failures.
+
+ // TODO: Additional analysis of usage patterns to see if this can be simplified
+ // without any performance loss, for example by not doing the first lock-free lookup.
+
+ const OatFile::OatDexFile* oat_dex_file = nullptr;
+ StringPiece key(dex_location);
+ // Try to find the key cheaply in the oat_dex_files_ map which holds dex locations
+ // directly mentioned in the oat file and doesn't require locking.
+ auto primary_it = oat_dex_files_.find(key);
+ if (primary_it != oat_dex_files_.end()) {
+ oat_dex_file = primary_it->second;
+ DCHECK(oat_dex_file != nullptr);
+ } else {
+ // This dex_location is not one of the dex locations directly mentioned in the
+ // oat file. The correct lookup is via the canonical location but first see in
+ // the secondary_oat_dex_files_ whether we've looked up this location before.
+ MutexLock mu(Thread::Current(), secondary_lookup_lock_);
+ auto secondary_lb = secondary_oat_dex_files_.lower_bound(key);
+ if (secondary_lb != secondary_oat_dex_files_.end() && key == secondary_lb->first) {
+ oat_dex_file = secondary_lb->second; // May be nullptr.
+ } else {
+ // We haven't seen this dex_location before, we must check the canonical location.
+ if (UNLIKELY(oat_dex_files_by_canonical_location_.empty())) {
+ // Lazily fill in the oat_dex_files_by_canonical_location_.
+ for (const auto& entry : oat_dex_files_) {
+ const std::string& dex_location = entry.second->GetDexFileLocation();
+ string_cache_.emplace_back(DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+ StringPiece canonical_location_key(string_cache_.back());
+ oat_dex_files_by_canonical_location_.Put(canonical_location_key, entry.second);
+ }
+ }
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
+ StringPiece canonical_key(dex_canonical_location);
+ auto canonical_it = oat_dex_files_by_canonical_location_.find(canonical_key);
+ if (canonical_it != oat_dex_files_by_canonical_location_.end()) {
+ oat_dex_file = canonical_it->second;
+ } // else keep nullptr.
+
+ // Copy the key to the string_cache_ and store the result in secondary map.
+ string_cache_.emplace_back(key.data(), key.length());
+ StringPiece key_copy(string_cache_.back());
+ secondary_oat_dex_files_.PutBefore(secondary_lb, key_copy, oat_dex_file);
}
}
+ if (oat_dex_file != nullptr &&
+ (dex_location_checksum == nullptr ||
+ oat_dex_file->GetDexFileLocationChecksum() == *dex_location_checksum)) {
+ return oat_dex_file;
+ }
if (warn_if_not_found) {
+ std::string dex_canonical_location = DexFile::GetDexCanonicalLocation(dex_location);
std::string checksum("<unspecified>");
if (dex_location_checksum != NULL) {
checksum = StringPrintf("0x%08x", *dex_location_checksum);
}
LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location
+ << " ( canonical path " << dex_canonical_location << ")"
<< " with checksum " << checksum << " in OatFile " << GetLocation();
if (kIsDebugBuild) {
for (Table::const_iterator it = oat_dex_files_.begin(); it != oat_dex_files_.end(); ++it) {
LOG(WARNING) << "OatFile " << GetLocation()
<< " contains OatDexFile " << it->second->GetDexFileLocation()
+ << " (canonical path " << it->first << ")"
<< " with checksum 0x" << std::hex << it->second->GetDexFileLocationChecksum();
}
}
}
+
return NULL;
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 44f4466724..9710a2addd 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -17,9 +17,11 @@
#ifndef ART_RUNTIME_OAT_FILE_H_
#define ART_RUNTIME_OAT_FILE_H_
+#include <list>
#include <string>
#include <vector>
+#include "base/mutex.h"
#include "base/stringpiece.h"
#include "dex_file.h"
#include "invoke_type.h"
@@ -52,6 +54,8 @@ class OatFile {
// ImageWriter which wants to open a writable version from an existing
// file descriptor for patching.
static OatFile* OpenWritable(File* file, const std::string& location, std::string* error_msg);
+ // Opens an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
+ static OatFile* OpenReadable(File* file, const std::string& location, std::string* error_msg);
// Open an oat file backed by a std::vector with the given location.
static OatFile* OpenMemory(std::vector<uint8_t>& oat_contents,
@@ -225,7 +229,8 @@ class OatFile {
const OatDexFile* GetOatDexFile(const char* dex_location,
const uint32_t* const dex_location_checksum,
- bool exception_if_not_found = true) const;
+ bool exception_if_not_found = true) const
+ LOCKS_EXCLUDED(secondary_lookup_lock_);
std::vector<const OatDexFile*> GetOatDexFiles() const;
@@ -277,10 +282,38 @@ class OatFile {
// dlopen handle during runtime.
void* dlopen_handle_;
- // NOTE: We use a StringPiece as the key type to avoid a memory allocation on every lookup
- // with a const char* key.
+ // NOTE: We use a StringPiece as the key type to avoid a memory allocation on every
+ // lookup with a const char* key. The StringPiece doesn't own its backing storage,
+ // therefore we're using the OatDexFile::dex_file_location_ as the backing storage
+ // for keys in oat_dex_files_ and the string_cache_ entries for the backing storage
+ // of keys in secondary_oat_dex_files_ and oat_dex_files_by_canonical_location_.
typedef SafeMap<StringPiece, const OatDexFile*> Table;
- Table oat_dex_files_;
+
+ // Map each plain dex file location retrieved from the oat file to its OatDexFile.
+ // This map doesn't change after it's constructed in Setup() and therefore doesn't
+ // need any locking and provides the cheapest dex file lookup for GetOatDexFile()
+ // for a very frequent use case. Never contains a nullptr value.
+ Table oat_dex_files_; // Owns the OatDexFile* values.
+
+ // Lock guarding all members needed for secondary lookup in GetOatDexFile().
+ mutable Mutex secondary_lookup_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ // If the primary oat_dex_files_ lookup fails, use a secondary map. This map stores
+ // the results of all previous secondary lookups, whether successful (non-null) or
+ // failed (null). If it doesn't contain an entry we need to calculate the canonical
+ // location and use oat_dex_files_by_canonical_location_.
+ mutable Table secondary_oat_dex_files_ GUARDED_BY(secondary_lookup_lock_);
+
+ // Map the canonical location to an OatDexFile. This lazily constructed map is used
+ // when we're doing the secondary lookup for a given location for the first time.
+ mutable Table oat_dex_files_by_canonical_location_ GUARDED_BY(secondary_lookup_lock_);
+
+ // Cache of strings. Contains the backing storage for keys in the secondary_oat_dex_files_
+ // and the lazily initialized oat_dex_files_by_canonical_location_.
+ // NOTE: We're keeping references to contained strings in form of StringPiece and adding
+ // new strings to the end. The adding of a new element must not touch any previously stored
+ // elements. std::list<> and std::deque<> satisfy this requirement, std::vector<> doesn't.
+ mutable std::list<std::string> string_cache_ GUARDED_BY(secondary_lookup_lock_);
friend class OatClass;
friend class OatDexFile;
diff --git a/runtime/object_callbacks.h b/runtime/object_callbacks.h
index 0e6f4d80ae..592deed1a7 100644
--- a/runtime/object_callbacks.h
+++ b/runtime/object_callbacks.h
@@ -24,6 +24,8 @@
// For size_t.
#include <stdlib.h>
+#include "base/macros.h"
+
namespace art {
namespace mirror {
class Class;
@@ -57,8 +59,7 @@ typedef void (RootCallback)(mirror::Object** root, void* arg, uint32_t thread_id
// A callback for visiting an object in the heap.
typedef void (ObjectCallback)(mirror::Object* obj, void* arg);
// A callback used for marking an object, returns the new address of the object if the object moved.
-typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (MarkObjectCallback)(mirror::Object* obj, void* arg) WARN_UNUSED;
// A callback for verifying roots.
typedef void (VerifyRootCallback)(const mirror::Object* root, void* arg, size_t vreg,
const StackVisitor* visitor, RootType root_type);
@@ -68,13 +69,12 @@ typedef void (DelayReferenceReferentCallback)(mirror::Class* klass, mirror::Refe
// A callback for testing if an object is marked, returns nullptr if not marked, otherwise the new
// address the object (if the object didn't move, returns the object input parameter).
-typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg)
- __attribute__((warn_unused_result));
+typedef mirror::Object* (IsMarkedCallback)(mirror::Object* object, void* arg) WARN_UNUSED;
// Returns true if the object in the heap reference is marked, if it is marked and has moved the
// callback updates the heap reference contain the new value.
typedef bool (IsHeapReferenceMarkedCallback)(mirror::HeapReference<mirror::Object>* object,
- void* arg) __attribute__((warn_unused_result));
+ void* arg) WARN_UNUSED;
typedef void (ProcessMarkStackCallback)(void* arg);
} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 577691c90b..12f9f33f5f 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -177,6 +177,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
}
// -Xcheck:jni is off by default for regular builds but on by default in debug builds.
check_jni_ = kIsDebugBuild;
+ force_copy_ = false;
heap_initial_size_ = gc::Heap::kDefaultInitialSize;
heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
@@ -221,6 +222,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
compiler_callbacks_ = nullptr;
is_zygote_ = false;
+ must_relocate_ = kDefaultMustRelocate;
if (kPoisonHeapReferences) {
// kPoisonHeapReferences currently works only with the interpreter only.
// TODO: make it work with the compiler.
@@ -265,38 +267,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
verify_ = true;
image_isa_ = kRuntimeISA;
- // Default to explicit checks. Switch off with -implicit-checks:.
- // or setprop dalvik.vm.implicit_checks check1,check2,...
-#ifdef HAVE_ANDROID_OS
- {
- char buf[PROP_VALUE_MAX];
- property_get("dalvik.vm.implicit_checks", buf, "null,stack");
- std::string checks(buf);
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- }
- }
- }
-#else
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
-#endif
-
for (size_t i = 0; i < options.size(); ++i) {
if (true && options[0].first == "-Xzygote") {
LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -312,6 +282,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
Exit(0);
} else if (StartsWith(option, "-Xbootclasspath:")) {
boot_class_path_string_ = option.substr(strlen("-Xbootclasspath:")).data();
+ LOG(INFO) << "setting boot class path to " << boot_class_path_string_;
} else if (option == "-classpath" || option == "-cp") {
// TODO: support -Djava.class.path
i++;
@@ -330,6 +301,8 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
}
} else if (StartsWith(option, "-Xcheck:jni")) {
check_jni_ = true;
+ } else if (StartsWith(option, "-Xjniopts:forcecopy")) {
+ force_copy_ = true;
} else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) {
std::string tail(option.substr(option[1] == 'X' ? 10 : 15));
// TODO: move parsing logic out of Dbg
@@ -421,6 +394,7 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
ignore_max_footprint_ = true;
} else if (option == "-XX:LowMemoryMode") {
low_memory_mode_ = true;
+ // TODO Might want to turn off must_relocate here.
} else if (option == "-XX:UseTLAB") {
use_tlab_ = true;
} else if (option == "-XX:EnableHSpaceCompactForOOM") {
@@ -439,6 +413,14 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
reinterpret_cast<const char*>(options[i].second));
} else if (option == "-Xzygote") {
is_zygote_ = true;
+ } else if (StartsWith(option, "-Xpatchoat:")) {
+ if (!ParseStringAfterChar(option, ':', &patchoat_executable_)) {
+ return false;
+ }
+ } else if (option == "-Xrelocate") {
+ must_relocate_ = true;
+ } else if (option == "-Xnorelocate") {
+ must_relocate_ = false;
} else if (option == "-Xint") {
interpreter_only_ = true;
} else if (StartsWith(option, "-Xgc:")) {
@@ -589,54 +571,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
if (!ParseUnsignedInteger(option, ':', &profiler_options_.max_stack_depth_)) {
return false;
}
- } else if (StartsWith(option, "-implicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else if (val == "null") {
- explicit_checks_ &= ~kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ &= ~kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ &= ~kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = 0;
- } else {
- return false;
- }
- }
- } else if (StartsWith(option, "-explicit-checks:")) {
- std::string checks;
- if (!ParseStringAfterChar(option, ':', &checks)) {
- return false;
- }
- std::vector<std::string> checkvec;
- Split(checks, ',', checkvec);
- for (auto& str : checkvec) {
- std::string val = Trim(str);
- if (val == "none") {
- explicit_checks_ = 0;
- } else if (val == "null") {
- explicit_checks_ |= kExplicitNullCheck;
- } else if (val == "suspend") {
- explicit_checks_ |= kExplicitSuspendCheck;
- } else if (val == "stack") {
- explicit_checks_ |= kExplicitStackOverflowCheck;
- } else if (val == "all") {
- explicit_checks_ = kExplicitNullCheck | kExplicitSuspendCheck |
- kExplicitStackOverflowCheck;
- } else {
- return false;
- }
- }
} else if (StartsWith(option, "-Xcompiler:")) {
if (!ParseStringAfterChar(option, ':', &compiler_executable_)) {
return false;
@@ -665,6 +599,10 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
Usage("Unknown -Xverify option %s\n", verify_mode.c_str());
return false;
}
+ } else if (StartsWith(option, "-XX:NativeBridge=")) {
+ if (!ParseStringAfterChar(option, '=', &native_bridge_library_string_)) {
+ return false;
+ }
} else if (StartsWith(option, "-ea") ||
StartsWith(option, "-da") ||
StartsWith(option, "-enableassertions") ||
@@ -678,7 +616,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
StartsWith(option, "-Xint:") ||
StartsWith(option, "-Xdexopt:") ||
(option == "-Xnoquithandler") ||
- StartsWith(option, "-Xjniopts:") ||
StartsWith(option, "-Xjnigreflimit:") ||
(option == "-Xgenregmap") ||
(option == "-Xnogenregmap") ||
@@ -837,6 +774,8 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
+ UsageMessage(stream, " -Xpatchoat:filename\n");
+ UsageMessage(stream, " -X[no]relocate\n");
UsageMessage(stream, "\n");
UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index b1de62a54f..c328ca7ef5 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -44,9 +44,13 @@ class ParsedOptions {
std::string class_path_string_;
std::string image_;
bool check_jni_;
+ bool force_copy_;
std::string jni_trace_;
+ std::string native_bridge_library_string_;
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
+ bool must_relocate_;
+ std::string patchoat_executable_;
bool interpreter_only_;
bool is_explicit_gc_disabled_;
bool use_tlab_;
@@ -93,10 +97,6 @@ class ParsedOptions {
bool verify_;
InstructionSet image_isa_;
- static constexpr uint32_t kExplicitNullCheck = 1;
- static constexpr uint32_t kExplicitSuspendCheck = 2;
- static constexpr uint32_t kExplicitStackOverflowCheck = 4;
- uint32_t explicit_checks_;
// Whether or not we use homogeneous space compaction to avoid OOM errors. If enabled,
// the heap will attempt to create an extra space which enables compacting from a malloc space to
// another malloc space when we are about to throw OOM.
diff --git a/runtime/primitive.cc b/runtime/primitive.cc
index 16ca0fe1f6..a639f93f45 100644
--- a/runtime/primitive.cc
+++ b/runtime/primitive.cc
@@ -30,6 +30,7 @@ static const char* kTypeNames[] = {
"PrimDouble",
"PrimVoid",
};
+
std::ostream& operator<<(std::ostream& os, const Primitive::Type& type) {
int32_t int_type = static_cast<int32_t>(type);
if (type >= Primitive::kPrimNot && type <= Primitive::kPrimVoid) {
diff --git a/runtime/primitive.h b/runtime/primitive.h
index b436bd2165..a36e9cb31b 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -21,12 +21,10 @@
#include "base/logging.h"
#include "base/macros.h"
-#include "mirror/object_reference.h"
namespace art {
-namespace mirror {
-class Object;
-} // namespace mirror
+
+static constexpr size_t kObjectReferenceSize = 4;
class Primitive {
public:
@@ -79,7 +77,7 @@ class Primitive {
case kPrimFloat: return 4;
case kPrimLong:
case kPrimDouble: return 8;
- case kPrimNot: return sizeof(mirror::HeapReference<mirror::Object>);
+ case kPrimNot: return kObjectReferenceSize;
default:
LOG(FATAL) << "Invalid type " << static_cast<int>(type);
return 0;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index bd6656dda1..308142157c 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -17,14 +17,14 @@
#include <jni.h>
#include <vector>
-#include "common_compiler_test.h"
+#include "common_runtime_test.h"
#include "field_helper.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-class ProxyTest : public CommonCompilerTest {
+class ProxyTest : public CommonRuntimeTest {
public:
// Generate a proxy class with the given name and interfaces. This is a simplification from what
// libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
@@ -103,6 +103,12 @@ class ProxyTest : public CommonCompilerTest {
soa.Self()->AssertNoPendingException();
return proxyClass;
}
+
+ protected:
+ void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
+ options->push_back(std::make_pair(StringPrintf("-Ximage:%s", GetLibCoreOatFileName().c_str()),
+ nullptr));
+ }
};
// Creates a proxy class and check ClassHelper works correctly.
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 982553d3af..c4d51cb173 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -48,7 +48,12 @@ enum InlineMethodOpcode : uint16_t {
kIntrinsicMinMaxFloat,
kIntrinsicMinMaxDouble,
kIntrinsicSqrt,
- kIntrinsicGet,
+ kIntrinsicCeil,
+ kIntrinsicFloor,
+ kIntrinsicRint,
+ kIntrinsicRoundFloat,
+ kIntrinsicRoundDouble,
+ kIntrinsicReferenceGet,
kIntrinsicCharAt,
kIntrinsicCompareTo,
kIntrinsicIsEmptyOrLength,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 6581f9b627..41d69894d5 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -194,6 +194,10 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
}
private:
+ static VRegKind GetVRegKind(uint16_t reg, const std::vector<int32_t>& kinds) {
+ return static_cast<VRegKind>(kinds.at(reg * 2));
+ }
+
bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
CHECK(code_item != nullptr);
@@ -210,9 +214,9 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
&m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
m->GetAccessFlags(), false, true, true);
verifier.Verify();
- std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
+ const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
for (uint16_t reg = 0; reg < num_regs; ++reg) {
- VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
+ VRegKind kind = GetVRegKind(reg, kinds);
switch (kind) {
case kUndefined:
new_frame->SetVReg(reg, 0xEBADDE09);
@@ -224,6 +228,36 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
new_frame->SetVRegReference(reg,
reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
break;
+ case kLongLoVReg:
+ if (GetVRegKind(reg + 1, kinds), kLongHiVReg) {
+ // Treat it as a "long" register pair.
+ new_frame->SetVRegLong(reg, GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg));
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kLongHiVReg:
+ if (GetVRegKind(reg - 1, kinds), kLongLoVReg) {
+ // Nothing to do: we treated it as a "long" register pair.
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kDoubleLoVReg:
+ if (GetVRegKind(reg + 1, kinds), kDoubleHiVReg) {
+ // Treat it as a "double" register pair.
+ new_frame->SetVRegLong(reg, GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg));
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
+ case kDoubleHiVReg:
+ if (GetVRegKind(reg - 1, kinds), kDoubleLoVReg) {
+ // Nothing to do: we treated it as a "double" register pair.
+ } else {
+ new_frame->SetVReg(reg, GetVReg(m, reg, kind));
+ }
+ break;
default:
new_frame->SetVReg(reg, GetVReg(m, reg, kind));
break;
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index cd35863543..70aba9bbf1 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -24,7 +24,6 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/string-inl.h"
-#include "read_barrier.h"
#include "thread.h"
#include "utils.h"
@@ -46,14 +45,13 @@ void ReferenceTable::Add(mirror::Object* obj) {
LOG(FATAL) << "ReferenceTable '" << name_ << "' "
<< "overflowed (" << max_size_ << " entries)";
}
- entries_.push_back(obj);
+ entries_.push_back(GcRoot<mirror::Object>(obj));
}
void ReferenceTable::Remove(mirror::Object* obj) {
// We iterate backwards on the assumption that references are LIFO.
for (int i = entries_.size() - 1; i >= 0; --i) {
- mirror::Object* entry =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries_[i]);
+ mirror::Object* entry = entries_[i].Read();
if (entry == obj) {
entries_.erase(entries_.begin() + i);
return;
@@ -71,10 +69,12 @@ static size_t GetElementCount(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::
}
struct ObjectComparator {
- bool operator()(mirror::Object* obj1, mirror::Object* obj2)
+ bool operator()(GcRoot<mirror::Object> root1, GcRoot<mirror::Object> root2)
// TODO: enable analysis when analysis can work with the STL.
NO_THREAD_SAFETY_ANALYSIS {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ mirror::Object* obj1 = root1.Read<kWithoutReadBarrier>();
+ mirror::Object* obj2 = root2.Read<kWithoutReadBarrier>();
// Ensure null references and cleared jweaks appear at the end.
if (obj1 == NULL) {
return true;
@@ -163,8 +163,7 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
}
os << " Last " << (count - first) << " entries (of " << count << "):\n";
for (int idx = count - 1; idx >= first; --idx) {
- mirror::Object* ref =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[idx]);
+ mirror::Object* ref = entries[idx].Read();
if (ref == NULL) {
continue;
}
@@ -200,17 +199,17 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
// Make a copy of the table and sort it.
Table sorted_entries;
for (size_t i = 0; i < entries.size(); ++i) {
- mirror::Object* entry =
- ReadBarrier::BarrierForRoot<mirror::Object, kWithReadBarrier>(&entries[i]);
- sorted_entries.push_back(entry);
+ mirror::Object* entry = entries[i].Read();
+ sorted_entries.push_back(GcRoot<mirror::Object>(entry));
}
std::sort(sorted_entries.begin(), sorted_entries.end(), ObjectComparator());
// Remove any uninteresting stuff from the list. The sort moved them all to the end.
- while (!sorted_entries.empty() && sorted_entries.back() == NULL) {
+ while (!sorted_entries.empty() && sorted_entries.back().IsNull()) {
sorted_entries.pop_back();
}
- while (!sorted_entries.empty() && sorted_entries.back() == kClearedJniWeakGlobal) {
+ while (!sorted_entries.empty() &&
+ sorted_entries.back().Read<kWithoutReadBarrier>() == kClearedJniWeakGlobal) {
sorted_entries.pop_back();
}
if (sorted_entries.empty()) {
@@ -222,8 +221,8 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
size_t equiv = 0;
size_t identical = 0;
for (size_t idx = 1; idx < count; idx++) {
- mirror::Object* prev = sorted_entries[idx-1];
- mirror::Object* current = sorted_entries[idx];
+ mirror::Object* prev = sorted_entries[idx-1].Read<kWithoutReadBarrier>();
+ mirror::Object* current = sorted_entries[idx].Read<kWithoutReadBarrier>();
size_t element_count = GetElementCount(prev);
if (current == prev) {
// Same reference, added more than once.
@@ -238,13 +237,15 @@ void ReferenceTable::Dump(std::ostream& os, Table& entries) {
}
}
// Handle the last entry.
- DumpSummaryLine(os, sorted_entries.back(), GetElementCount(sorted_entries.back()), identical, equiv);
+ DumpSummaryLine(os, sorted_entries.back().Read<kWithoutReadBarrier>(),
+ GetElementCount(sorted_entries.back().Read<kWithoutReadBarrier>()),
+ identical, equiv);
}
void ReferenceTable::VisitRoots(RootCallback* visitor, void* arg, uint32_t tid,
RootType root_type) {
- for (auto& ref : entries_) {
- visitor(&ref, arg, tid, root_type);
+ for (GcRoot<mirror::Object>& root : entries_) {
+ root.VisitRoot(visitor, arg, tid, root_type);
}
}
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index 1cd0999f26..876544238a 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -23,6 +23,7 @@
#include <vector>
#include "base/mutex.h"
+#include "gc_root.h"
#include "object_callbacks.h"
namespace art {
@@ -50,7 +51,7 @@ class ReferenceTable {
void VisitRoots(RootCallback* visitor, void* arg, uint32_t tid, RootType root_type);
private:
- typedef std::vector<mirror::Object*> Table;
+ typedef std::vector<GcRoot<mirror::Object>> Table;
static void Dump(std::ostream& os, Table& entries)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
diff --git a/runtime/reference_table_test.cc b/runtime/reference_table_test.cc
index d2877f98d1..db98e1fc9d 100644
--- a/runtime/reference_table_test.cc
+++ b/runtime/reference_table_test.cc
@@ -17,7 +17,7 @@
#include "reference_table.h"
#include "common_runtime_test.h"
-#include "mirror/array.h"
+#include "mirror/array-inl.h"
#include "mirror/string.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 0af4117783..0169cccbf0 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -20,7 +20,7 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "jni_internal.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -347,7 +347,7 @@ class ArgArray {
std::unique_ptr<uint32_t[]> large_arg_array_;
};
-static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
+static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* args)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
@@ -375,11 +375,11 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
self->ClearException();
++error_count;
} else if (!param_type->IsPrimitive()) {
- // TODO: check primitives are in range.
// TODO: There is a compaction bug here since GetClassFromTypeIdx can cause thread suspension,
// this is a hard to fix problem since the args can contain Object*, we need to save and
// restore them by using a visitor similar to the ones used in the trampoline entrypoints.
- mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
+ mirror::Object* argument =
+ (reinterpret_cast<StackReference<mirror::Object>*>(&args[i + offset]))->AsMirrorPtr();
if (argument != nullptr && !argument->InstanceOf(param_type)) {
LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
<< PrettyTypeOf(argument) << " as argument " << (i + 1)
@@ -388,13 +388,40 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
}
} else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
offset++;
+ } else {
+ int32_t arg = static_cast<int32_t>(args[i + offset]);
+ if (param_type->IsPrimitiveBoolean()) {
+ if (arg != JNI_TRUE && arg != JNI_FALSE) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jboolean (0/1) but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveByte()) {
+ if (arg < -128 || arg > 127) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jbyte but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveChar()) {
+ if (args[i + offset] > 0xFFFF) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jchar but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ } else if (param_type->IsPrimitiveShort()) {
+ if (arg < -32768 || arg > 0x7FFF) {
+ LOG(ERROR) << "JNI ERROR (app bug): expected jshort but got value of "
+ << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+ ++error_count;
+ }
+ }
}
}
- if (error_count > 0) {
+ if (UNLIKELY(error_count > 0)) {
// TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
// with an argument.
- JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
- PrettyMethod(h_m.Get()).c_str());
+ vm->JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
+ PrettyMethod(h_m.Get()).c_str());
}
}
@@ -411,7 +438,7 @@ static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
- CheckMethodArguments(method, args);
+ CheckMethodArguments(soa.Vm(), method, args);
}
method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
}
@@ -567,11 +594,6 @@ bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
return true;
}
-static std::string PrettyDescriptor(Primitive::Type type) {
- std::string descriptor_string(Primitive::Descriptor(type));
- return PrettyDescriptor(descriptor_string);
-}
-
bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
const JValue& src, JValue* dst) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 2c54c067fd..61370c650e 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_REFLECTION_H_
#define ART_RUNTIME_REFLECTION_H_
+#include "base/mutex.h"
#include "jni.h"
#include "primitive.h"
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index f776bcd336..ac9026b605 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -41,32 +41,29 @@ inline QuickMethodFrameInfo Runtime::GetRuntimeMethodFrameInfo(mirror::ArtMethod
inline mirror::ArtMethod* Runtime::GetResolutionMethod() {
CHECK(HasResolutionMethod());
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&resolution_method_);
+ return resolution_method_.Read();
}
inline mirror::ArtMethod* Runtime::GetImtConflictMethod() {
CHECK(HasImtConflictMethod());
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(&imt_conflict_method_);
+ return imt_conflict_method_.Read();
}
inline mirror::ObjectArray<mirror::ArtMethod>* Runtime::GetDefaultImt()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(HasDefaultImt());
- return ReadBarrier::BarrierForRoot<mirror::ObjectArray<mirror::ArtMethod>, kWithReadBarrier>(
- &default_imt_);
+ return default_imt_.Read();
}
inline mirror::ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(
- &callee_save_methods_[type]);
+ return callee_save_methods_[type].Read();
}
inline mirror::ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ReadBarrier::BarrierForRoot<mirror::ArtMethod, kWithReadBarrier>(
- &callee_save_methods_[type]);
+ return callee_save_methods_[type].Read();
}
} // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0ddd2aed4a..d677729b7e 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -63,6 +63,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
+#include "native_bridge.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "quick/quick_method_frame_info.h"
@@ -94,13 +95,10 @@ const char* Runtime::kDefaultInstructionSetFeatures =
Runtime* Runtime::instance_ = NULL;
Runtime::Runtime()
- : pre_allocated_OutOfMemoryError_(nullptr),
- resolution_method_(nullptr),
- imt_conflict_method_(nullptr),
- default_imt_(nullptr),
- instruction_set_(kNone),
+ : instruction_set_(kNone),
compiler_callbacks_(nullptr),
is_zygote_(false),
+ must_relocate_(false),
is_concurrent_gc_enabled_(true),
is_explicit_gc_disabled_(false),
default_stack_size_(0),
@@ -141,20 +139,13 @@ Runtime::Runtime()
suspend_handler_(nullptr),
stack_overflow_handler_(nullptr),
verify_(false),
- target_sdk_version_(0) {
- for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- callee_save_methods_[i] = nullptr;
- }
+ target_sdk_version_(0),
+ implicit_null_checks_(false),
+ implicit_so_checks_(false),
+ implicit_suspend_checks_(false) {
}
Runtime::~Runtime() {
- if (method_trace_ && Thread::Current() == nullptr) {
- // We need a current thread to shutdown method tracing: re-attach it now.
- JNIEnv* unused_env;
- if (GetJavaVM()->AttachCurrentThread(&unused_env, nullptr) != JNI_OK) {
- LOG(ERROR) << "Could not attach current thread before runtime shutdown.";
- }
- }
if (dump_gc_performance_on_shutdown_) {
// This can't be called from the Heap destructor below because it
// could call RosAlloc::InspectAll() which needs the thread_list
@@ -348,7 +339,7 @@ jobject CreateSystemClassLoader() {
ScopedObjectAccess soa(Thread::Current());
ClassLinker* cl = Runtime::Current()->GetClassLinker();
- StackHandleScope<3> hs(soa.Self());
+ StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> class_loader_class(
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
CHECK(cl->EnsureInitialized(class_loader_class, true, true));
@@ -358,15 +349,12 @@ jobject CreateSystemClassLoader() {
CHECK(getSystemClassLoader != NULL);
JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
- Handle<mirror::ClassLoader> class_loader(
- hs.NewHandle(down_cast<mirror::ClassLoader*>(result.GetL())));
- CHECK(class_loader.Get() != nullptr);
JNIEnv* env = soa.Self()->GetJniEnv();
ScopedLocalRef<jobject> system_class_loader(env,
- soa.AddLocalReference<jobject>(class_loader.Get()));
+ soa.AddLocalReference<jobject>(result.GetL()));
CHECK(system_class_loader.get() != nullptr);
- soa.Self()->SetClassLoaderOverride(class_loader.Get());
+ soa.Self()->SetClassLoaderOverride(system_class_loader.get());
Handle<mirror::Class> thread_class(
hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
@@ -377,11 +365,21 @@ jobject CreateSystemClassLoader() {
CHECK(contextClassLoader != NULL);
// We can't run in a transaction yet.
- contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.Get());
+ contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
+ soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));
return env->NewGlobalRef(system_class_loader.get());
}
+std::string Runtime::GetPatchoatExecutable() const {
+ if (!patchoat_executable_.empty()) {
+ return patchoat_executable_;
+ }
+ std::string patchoat_executable_(GetAndroidRoot());
+ patchoat_executable_ += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
+ return patchoat_executable_;
+}
+
std::string Runtime::GetCompilerExecutable() const {
if (!compiler_executable_.empty()) {
return compiler_executable_;
@@ -554,6 +552,8 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
properties_ = options->properties_;
compiler_callbacks_ = options->compiler_callbacks_;
+ patchoat_executable_ = options->patchoat_executable_;
+ must_relocate_ = options->must_relocate_;
is_zygote_ = options->is_zygote_;
is_explicit_gc_disabled_ = options->is_explicit_gc_disabled_;
@@ -581,41 +581,6 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
GetInstrumentation()->ForceInterpretOnly();
}
- bool implicit_checks_supported = false;
- switch (kRuntimeISA) {
- case kArm:
- case kThumb2:
- implicit_checks_supported = true;
- break;
- default:
- break;
- }
-
- if (!options->interpreter_only_ && implicit_checks_supported &&
- (options->explicit_checks_ != (ParsedOptions::kExplicitSuspendCheck |
- ParsedOptions::kExplicitNullCheck |
- ParsedOptions::kExplicitStackOverflowCheck) || kEnableJavaStackTraceHandler)) {
- fault_manager.Init();
-
- // These need to be in a specific order. The null point check handler must be
- // after the suspend check and stack overflow check handlers.
- if ((options->explicit_checks_ & ParsedOptions::kExplicitSuspendCheck) == 0) {
- suspend_handler_ = new SuspensionHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitStackOverflowCheck) == 0) {
- stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
- }
-
- if ((options->explicit_checks_ & ParsedOptions::kExplicitNullCheck) == 0) {
- null_pointer_handler_ = new NullPointerHandler(&fault_manager);
- }
-
- if (kEnableJavaStackTraceHandler) {
- new JavaStackTraceHandler(&fault_manager);
- }
- }
-
heap_ = new gc::Heap(options->heap_initial_size_,
options->heap_growth_limit_,
options->heap_min_free_,
@@ -648,6 +613,43 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
BlockSignals();
InitPlatformSignalHandlers();
+ // Change the implicit checks flags based on runtime architecture.
+ switch (kRuntimeISA) {
+ case kArm:
+ case kThumb2:
+ case kX86:
+ case kArm64:
+ case kX86_64:
+ implicit_null_checks_ = true;
+ implicit_so_checks_ = true;
+ break;
+ default:
+ // Keep the defaults.
+ break;
+ }
+
+ if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
+ fault_manager.Init();
+
+ // These need to be in a specific order. The null point check handler must be
+ // after the suspend check and stack overflow check handlers.
+ if (implicit_suspend_checks_) {
+ suspend_handler_ = new SuspensionHandler(&fault_manager);
+ }
+
+ if (implicit_so_checks_) {
+ stack_overflow_handler_ = new StackOverflowHandler(&fault_manager);
+ }
+
+ if (implicit_null_checks_) {
+ null_pointer_handler_ = new NullPointerHandler(&fault_manager);
+ }
+
+ if (kEnableJavaStackTraceHandler) {
+ new JavaStackTraceHandler(&fault_manager);
+ }
+ }
+
java_vm_ = new JavaVMExt(this, options.get());
Thread::Startup();
@@ -700,9 +702,12 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
"no stack available");
- pre_allocated_OutOfMemoryError_ = self->GetException(NULL);
+ pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
self->ClearException();
+ // Look for a native bridge.
+ SetNativeBridgeLibraryString(options->native_bridge_library_string_);
+
VLOG(startup) << "Runtime::Init exiting";
return true;
}
@@ -729,13 +734,9 @@ void Runtime::InitNativeMethods() {
{
std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
std::string reason;
- self->TransitionFromSuspendedToRunnable();
- StackHandleScope<1> hs(self);
- auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
- if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, class_loader, &reason)) {
+ if (!instance_->java_vm_->LoadNativeLibrary(env, mapped_name, nullptr, &reason)) {
LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
}
- self->TransitionFromRunnableToSuspended(kNative);
}
// Initialize well known classes that may invoke runtime native methods.
@@ -912,8 +913,7 @@ void Runtime::DetachCurrentThread() {
}
mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
- mirror::Throwable* oome = ReadBarrier::BarrierForRoot<mirror::Throwable, kWithReadBarrier>(
- &pre_allocated_OutOfMemoryError_);
+ mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
if (oome == NULL) {
LOG(ERROR) << "Failed to return pre-allocated OOME";
}
@@ -952,23 +952,21 @@ void Runtime::VisitConcurrentRoots(RootCallback* callback, void* arg, VisitRootF
void Runtime::VisitNonThreadRoots(RootCallback* callback, void* arg) {
java_vm_->VisitRoots(callback, arg);
- if (pre_allocated_OutOfMemoryError_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&pre_allocated_OutOfMemoryError_), arg, 0,
- kRootVMInternal);
- DCHECK(pre_allocated_OutOfMemoryError_ != nullptr);
+ if (!pre_allocated_OutOfMemoryError_.IsNull()) {
+ pre_allocated_OutOfMemoryError_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!pre_allocated_OutOfMemoryError_.IsNull());
}
- callback(reinterpret_cast<mirror::Object**>(&resolution_method_), arg, 0, kRootVMInternal);
- DCHECK(resolution_method_ != nullptr);
+ resolution_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
+ DCHECK(!resolution_method_.IsNull());
if (HasImtConflictMethod()) {
- callback(reinterpret_cast<mirror::Object**>(&imt_conflict_method_), arg, 0, kRootVMInternal);
+ imt_conflict_method_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
if (HasDefaultImt()) {
- callback(reinterpret_cast<mirror::Object**>(&default_imt_), arg, 0, kRootVMInternal);
+ default_imt_.VisitRoot(callback, arg, 0, kRootVMInternal);
}
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
- if (callee_save_methods_[i] != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&callee_save_methods_[i]), arg, 0,
- kRootVMInternal);
+ if (!callee_save_methods_[i].IsNull()) {
+ callee_save_methods_[i].VisitRoot(callback, arg, 0, kRootVMInternal);
}
}
{
@@ -1106,7 +1104,7 @@ void Runtime::SetInstructionSet(InstructionSet instruction_set) {
void Runtime::SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type) {
DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
- callee_save_methods_[type] = method;
+ callee_save_methods_[type] = GcRoot<mirror::ArtMethod>(method);
}
const std::vector<const DexFile*>& Runtime::GetCompileTimeClassPath(jobject class_loader) {
@@ -1222,37 +1220,6 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin
argv->push_back("--compiler-filter=interpret-only");
}
- argv->push_back("--runtime-arg");
- std::string checkstr = "-implicit-checks";
-
- int nchecks = 0;
- char checksep = ':';
-
- if (!ExplicitNullChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "null";
- ++nchecks;
- }
- if (!ExplicitSuspendChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "suspend";
- ++nchecks;
- }
-
- if (!ExplicitStackOverflowChecks()) {
- checkstr += checksep;
- checksep = ',';
- checkstr += "stack";
- ++nchecks;
- }
-
- if (nchecks == 0) {
- checkstr += ":none";
- }
- argv->push_back(checkstr);
-
// Make the dex2oat instruction set match that of the launching runtime. If we have multiple
// architecture support, dex2oat may be compiled as a different instruction-set than that
// currently being executed.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index fccccbdfd7..a85c2e4eda 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -21,10 +21,13 @@
#include <stdio.h>
#include <iosfwd>
+#include <set>
#include <string>
#include <utility>
#include <vector>
+#include "compiler_callbacks.h"
+#include "gc_root.h"
#include "instrumentation.h"
#include "instruction_set.h"
#include "jobject_comparator.h"
@@ -54,7 +57,6 @@ namespace verifier {
class MethodVerifier;
}
class ClassLinker;
-class CompilerCallbacks;
class DexFile;
class InternTable;
class JavaVMExt;
@@ -91,6 +93,18 @@ class Runtime {
return compiler_callbacks_ != nullptr;
}
+ bool CanRelocate() const {
+ return !IsCompiler() || compiler_callbacks_->IsRelocationPossible();
+ }
+
+ bool ShouldRelocate() const {
+ return must_relocate_ && CanRelocate();
+ }
+
+ bool MustRelocateIfPossible() const {
+ return must_relocate_;
+ }
+
CompilerCallbacks* GetCompilerCallbacks() {
return compiler_callbacks_;
}
@@ -104,6 +118,7 @@ class Runtime {
}
std::string GetCompilerExecutable() const;
+ std::string GetPatchoatExecutable() const;
const std::vector<std::string>& GetCompilerOptions() const {
return compiler_options_;
@@ -173,7 +188,7 @@ class Runtime {
void DetachCurrentThread() LOCKS_EXCLUDED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void DumpLockHolders(std::ostream& os);
~Runtime();
@@ -268,11 +283,11 @@ class Runtime {
mirror::ArtMethod* GetResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasResolutionMethod() const {
- return resolution_method_ != nullptr;
+ return !resolution_method_.IsNull();
}
void SetResolutionMethod(mirror::ArtMethod* method) {
- resolution_method_ = method;
+ resolution_method_ = GcRoot<mirror::ArtMethod>(method);
}
mirror::ArtMethod* CreateResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -281,11 +296,11 @@ class Runtime {
mirror::ArtMethod* GetImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasImtConflictMethod() const {
- return imt_conflict_method_ != nullptr;
+ return !imt_conflict_method_.IsNull();
}
void SetImtConflictMethod(mirror::ArtMethod* method) {
- imt_conflict_method_ = method;
+ imt_conflict_method_ = GcRoot<mirror::ArtMethod>(method);
}
mirror::ArtMethod* CreateImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -295,11 +310,11 @@ class Runtime {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasDefaultImt() const {
- return default_imt_ != nullptr;
+ return !default_imt_.IsNull();
}
void SetDefaultImt(mirror::ObjectArray<mirror::ArtMethod>* imt) {
- default_imt_ = imt;
+ default_imt_ = GcRoot<mirror::ObjectArray<mirror::ArtMethod>>(imt);
}
mirror::ObjectArray<mirror::ArtMethod>* CreateDefaultImt(ClassLinker* cl)
@@ -314,7 +329,7 @@ class Runtime {
};
bool HasCalleeSaveMethod(CalleeSaveType type) const {
- return callee_save_methods_[type] != NULL;
+ return !callee_save_methods_[type].IsNull();
}
mirror::ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
@@ -474,21 +489,23 @@ class Runtime {
static constexpr int kProfileForground = 0;
static constexpr int kProfileBackgrouud = 1;
- mirror::ArtMethod* callee_save_methods_[kLastCalleeSaveType];
- mirror::Throwable* pre_allocated_OutOfMemoryError_;
- mirror::ArtMethod* resolution_method_;
- mirror::ArtMethod* imt_conflict_method_;
- mirror::ObjectArray<mirror::ArtMethod>* default_imt_;
+ GcRoot<mirror::ArtMethod> callee_save_methods_[kLastCalleeSaveType];
+ GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
+ GcRoot<mirror::ArtMethod> resolution_method_;
+ GcRoot<mirror::ArtMethod> imt_conflict_method_;
+ GcRoot<mirror::ObjectArray<mirror::ArtMethod>> default_imt_;
InstructionSet instruction_set_;
QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
+ bool must_relocate_;
bool is_concurrent_gc_enabled_;
bool is_explicit_gc_disabled_;
std::string compiler_executable_;
+ std::string patchoat_executable_;
std::vector<std::string> compiler_options_;
std::vector<std::string> image_compiler_options_;
@@ -589,6 +606,11 @@ class Runtime {
// Specifies target SDK version to allow workarounds for certain API levels.
int32_t target_sdk_version_;
+ // Implicit checks flags.
+ bool implicit_null_checks_; // NullPointer checks are implicit.
+ bool implicit_so_checks_; // StackOverflow checks are implicit.
+ bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 941fd0e2de..f7e238c21e 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -52,6 +52,7 @@ class SafeMap {
return *this;
}
+ allocator_type get_allocator() const { return map_.get_allocator(); }
key_compare key_comp() const { return map_.key_comp(); }
value_compare value_comp() const { return map_.value_comp(); }
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index d69162334d..ae3eaf2e22 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -18,7 +18,9 @@
#define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
#include "base/casts.h"
-#include "jni_internal-inl.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext-inl.h"
+#include "read_barrier.h"
#include "thread-inl.h"
#include "verify_object.h"
@@ -113,6 +115,10 @@ class ScopedObjectAccessAlreadyRunnable {
return vm_;
}
+ bool ForceCopy() const {
+ return vm_->ForceCopy();
+ }
+
/*
* Add a local reference for an object to the indirect reference table associated with the
* current stack frame. When the native function returns, the reference will be discarded.
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 71e566efc3..2d0060eb69 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -144,8 +144,8 @@ size_t StackVisitor::GetNativePcOffset() const {
bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- if (cur_quick_frame_ != NULL) {
- DCHECK(context_ != NULL); // You can't reliably read registers without a context.
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
DCHECK(m == GetMethod());
const void* code_pointer = m->GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
@@ -158,14 +158,12 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
uintptr_t ptr_val;
- bool success = false;
- bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
- if (is_float) {
- success = GetFPR(reg, &ptr_val);
- } else {
- success = GetGPR(reg, &ptr_val);
+ bool success = is_float ? GetFPR(reg, &ptr_val) : GetGPR(reg, &ptr_val);
+ if (!success) {
+ return false;
}
- if (success && target64) {
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
int64_t value_long = static_cast<int64_t>(ptr_val);
@@ -176,10 +174,11 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
}
}
*val = ptr_val;
- return success;
+ return true;
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
*val = *GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
return true;
@@ -190,10 +189,64 @@ bool StackVisitor::GetVReg(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
}
}
+bool StackVisitor::GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi, uint64_t* val) const {
+ if (kind_lo == kLongLoVReg) {
+ DCHECK_EQ(kind_hi, kLongHiVReg);
+ } else if (kind_lo == kDoubleLoVReg) {
+ DCHECK_EQ(kind_hi, kDoubleHiVReg);
+ } else {
+ LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
+ }
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably read registers without a context.
+ DCHECK(m == GetMethod());
+ const void* code_pointer = m->GetQuickOatCodePointer();
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ uintptr_t ptr_val_lo, ptr_val_hi;
+ bool success = is_float ? GetFPR(reg_lo, &ptr_val_lo) : GetGPR(reg_lo, &ptr_val_lo);
+ success &= is_float ? GetFPR(reg_hi, &ptr_val_hi) : GetGPR(reg_hi, &ptr_val_hi);
+ if (!success) {
+ return false;
+ }
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ if (target64) {
+ int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
+ int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
+ ptr_val_lo = static_cast<uintptr_t>(value_long_lo & 0xFFFFFFFF);
+ ptr_val_hi = static_cast<uintptr_t>(value_long_hi >> 32);
+ }
+ *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
+ return true;
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *val = *reinterpret_cast<uint64_t*>(addr);
+ return true;
+ }
+ } else {
+ *val = cur_shadow_frame_->GetVRegLong(vreg);
+ return true;
+ }
+}
+
bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value,
VRegKind kind) {
- if (cur_quick_frame_ != NULL) {
- DCHECK(context_ != NULL); // You can't reliably write registers without a context.
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
DCHECK(m == GetMethod());
const void* code_pointer = m->GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
@@ -205,7 +258,7 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
const uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind);
- bool target64 = (kRuntimeISA == kArm64) || (kRuntimeISA == kX86_64);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
// Deal with 32 or 64-bit wide registers in a way that builds on all targets.
if (target64) {
bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
@@ -234,11 +287,11 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
}
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
- DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
- int offset = GetVRegOffset(code_item, frame_info.CoreSpillMask(), frame_info.FpSpillMask(),
- frame_info.FrameSizeInBytes(), vreg, kRuntimeISA);
- byte* vreg_addr = reinterpret_cast<byte*>(GetCurrentQuickFrame()) + offset;
- *reinterpret_cast<uint32_t*>(vreg_addr) = new_value;
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *addr = new_value;
return true;
}
} else {
@@ -247,6 +300,68 @@ bool StackVisitor::SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_val
}
}
+bool StackVisitor::SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi) {
+ if (kind_lo == kLongLoVReg) {
+ DCHECK_EQ(kind_hi, kLongHiVReg);
+ } else if (kind_lo == kDoubleLoVReg) {
+ DCHECK_EQ(kind_hi, kDoubleHiVReg);
+ } else {
+ LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
+ }
+ if (cur_quick_frame_ != nullptr) {
+ DCHECK(context_ != nullptr); // You can't reliably write registers without a context.
+ DCHECK(m == GetMethod());
+ const void* code_pointer = m->GetQuickOatCodePointer();
+ DCHECK(code_pointer != nullptr);
+ const VmapTable vmap_table(m->GetVmapTable(code_pointer));
+ QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
+ uint32_t vmap_offset_lo, vmap_offset_hi;
+ // TODO: IsInContext stops before spotting floating point registers.
+ if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) &&
+ vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) {
+ bool is_float = (kind_lo == kDoubleLoVReg);
+ uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask();
+ uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo);
+ uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi);
+ uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF);
+ uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32);
+ bool target64 = Is64BitInstructionSet(kRuntimeISA);
+ // Deal with 32 or 64-bit wide registers in a way that builds on all targets.
+ if (target64) {
+ uintptr_t old_reg_val_lo, old_reg_val_hi;
+ bool success = is_float ? GetFPR(reg_lo, &old_reg_val_lo) : GetGPR(reg_lo, &old_reg_val_lo);
+ success &= is_float ? GetFPR(reg_hi, &old_reg_val_hi) : GetGPR(reg_hi, &old_reg_val_hi);
+ if (!success) {
+ return false;
+ }
+ uint64_t new_vreg_portion_lo = static_cast<uint64_t>(new_value_lo);
+ uint64_t new_vreg_portion_hi = static_cast<uint64_t>(new_value_hi) << 32;
+ uint64_t old_reg_val_lo_as_wide = static_cast<uint64_t>(old_reg_val_lo);
+ uint64_t old_reg_val_hi_as_wide = static_cast<uint64_t>(old_reg_val_hi);
+ uint64_t mask_lo = static_cast<uint64_t>(0xffffffff) << 32;
+ uint64_t mask_hi = 0xffffffff;
+ new_value_lo = static_cast<uintptr_t>((old_reg_val_lo_as_wide & mask_lo) | new_vreg_portion_lo);
+ new_value_hi = static_cast<uintptr_t>((old_reg_val_hi_as_wide & mask_hi) | new_vreg_portion_hi);
+ }
+ bool success = is_float ? SetFPR(reg_lo, new_value_lo) : SetGPR(reg_lo, new_value_lo);
+ success &= is_float ? SetFPR(reg_hi, new_value_hi) : SetGPR(reg_hi, new_value_hi);
+ return success;
+ } else {
+ const DexFile::CodeItem* code_item = m->GetCodeItem();
+ DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
+ // its instructions?
+ uint32_t* addr = GetVRegAddr(cur_quick_frame_, code_item, frame_info.CoreSpillMask(),
+ frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg);
+ *reinterpret_cast<uint64_t*>(addr) = new_value;
+ return true;
+ }
+ } else {
+ cur_shadow_frame_->SetVRegLong(vreg, new_value);
+ return true;
+ }
+}
+
uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
DCHECK(cur_quick_frame_ != NULL) << "This is a quick frame routine";
return context_->GetGPRAddress(reg);
diff --git a/runtime/stack.h b/runtime/stack.h
index ef498ef06f..578f569c43 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -568,9 +568,26 @@ class StackVisitor {
return val;
}
+ bool GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
+ uint64_t* val) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ uint64_t GetVRegPair(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
+ VRegKind kind_hi) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint64_t val;
+ bool success = GetVRegPair(m, vreg, kind_lo, kind_hi, &val);
+ CHECK(success) << "Failed to read vreg pair " << vreg
+ << " of kind [" << kind_lo << "," << kind_hi << "]";
+ return val;
+ }
+
bool SetVReg(mirror::ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool SetVRegPair(mirror::ArtMethod* m, uint16_t vreg, uint64_t new_value,
+ VRegKind kind_lo, VRegKind kind_hi)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uintptr_t* GetGPRAddress(uint32_t reg) const;
// This is a fast-path for getting/setting values in a quick frame.
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
new file mode 100644
index 0000000000..7d3a48fd88
--- /dev/null
+++ b/runtime/stack_map.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STACK_MAP_H_
+#define ART_RUNTIME_STACK_MAP_H_
+
+#include "base/bit_vector.h"
+#include "memory_region.h"
+
+namespace art {
+
+/**
+ * Classes in the following file are wrapper on stack map information backed
+ * by a MemoryRegion. As such they read and write to the region, they don't have
+ * their own fields.
+ */
+
+/**
+ * Inline information for a specific PC. The information is of the form:
+ * [inlining_depth, [method_dex reference]+]
+ */
+class InlineInfo {
+ public:
+ explicit InlineInfo(MemoryRegion region) : region_(region) {}
+
+ uint8_t GetDepth() const {
+ return region_.Load<uint8_t>(kDepthOffset);
+ }
+
+ void SetDepth(uint8_t depth) {
+ region_.Store<uint8_t>(kDepthOffset, depth);
+ }
+
+ uint32_t GetMethodReferenceIndexAtDepth(uint8_t depth) const {
+ return region_.Load<uint32_t>(kFixedSize + depth * SingleEntrySize());
+ }
+
+ void SetMethodReferenceIndexAtDepth(uint8_t depth, uint32_t index) {
+ region_.Store<uint32_t>(kFixedSize + depth * SingleEntrySize(), index);
+ }
+
+ static size_t SingleEntrySize() {
+ return sizeof(uint32_t);
+ }
+
+ private:
+ static constexpr int kDepthOffset = 0;
+ static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
+
+ static constexpr uint32_t kNoInlineInfo = -1;
+
+ MemoryRegion region_;
+
+ template<typename T> friend class CodeInfo;
+ template<typename T> friend class StackMap;
+ template<typename T> friend class StackMapStream;
+};
+
+/**
+ * Information on dex register values for a specific PC. The information is
+ * of the form:
+ * [location_kind, register_value]+.
+ *
+ * The location_kind for a Dex register can either be:
+ * - Constant: register_value holds the constant,
+ * - Stack: register_value holds the stack offset,
+ * - Register: register_value holds the register number.
+ */
+class DexRegisterMap {
+ public:
+ explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
+
+ enum LocationKind {
+ kInStack,
+ kInRegister,
+ kConstant
+ };
+
+ LocationKind GetLocationKind(uint16_t register_index) const {
+ return region_.Load<LocationKind>(
+ kFixedSize + register_index * SingleEntrySize());
+ }
+
+ void SetRegisterInfo(uint16_t register_index, LocationKind kind, int32_t value) {
+ size_t entry = kFixedSize + register_index * SingleEntrySize();
+ region_.Store<LocationKind>(entry, kind);
+ region_.Store<int32_t>(entry + sizeof(LocationKind), value);
+ }
+
+ int32_t GetValue(uint16_t register_index) const {
+ return region_.Load<int32_t>(
+ kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
+ }
+
+ static size_t SingleEntrySize() {
+ return sizeof(LocationKind) + sizeof(int32_t);
+ }
+
+ private:
+ static constexpr int kFixedSize = 0;
+
+ MemoryRegion region_;
+
+ template <typename T> friend class CodeInfo;
+ template <typename T> friend class StackMapStream;
+};
+
+/**
+ * A Stack Map holds compilation information for a specific PC necessary for:
+ * - Mapping it to a dex PC,
+ * - Knowing which stack entries are objects,
+ * - Knowing which registers hold objects,
+ * - Knowing the inlining information,
+ * - Knowing the values of dex registers.
+ *
+ * The information is of the form:
+ * [dex_pc, native_pc, dex_register_map_offset, inlining_info_offset, register_mask, stack_mask].
+ *
+ * Note that register_mask is fixed size, but stack_mask is variable size, depending on the
+ * stack size of a method.
+ */
+template <typename T>
+class StackMap {
+ public:
+ explicit StackMap(MemoryRegion region) : region_(region) {}
+
+ uint32_t GetDexPc() const {
+ return region_.Load<uint32_t>(kDexPcOffset);
+ }
+
+ void SetDexPc(uint32_t dex_pc) {
+ region_.Store<uint32_t>(kDexPcOffset, dex_pc);
+ }
+
+ T GetNativePc() const {
+ return region_.Load<T>(kNativePcOffset);
+ }
+
+ void SetNativePc(T native_pc) {
+ return region_.Store<T>(kNativePcOffset, native_pc);
+ }
+
+ uint32_t GetDexRegisterMapOffset() const {
+ return region_.Load<uint32_t>(kDexRegisterMapOffsetOffset);
+ }
+
+ void SetDexRegisterMapOffset(uint32_t offset) {
+ return region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+ }
+
+ uint32_t GetInlineDescriptorOffset() const {
+ return region_.Load<uint32_t>(kInlineDescriptorOffsetOffset);
+ }
+
+ void SetInlineDescriptorOffset(uint32_t offset) {
+ return region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+ }
+
+ uint32_t GetRegisterMask() const {
+ return region_.Load<uint32_t>(kRegisterMaskOffset);
+ }
+
+ void SetRegisterMask(uint32_t mask) {
+ region_.Store<uint32_t>(kRegisterMaskOffset, mask);
+ }
+
+ MemoryRegion GetStackMask() const {
+ return region_.Subregion(kStackMaskOffset, StackMaskSize());
+ }
+
+ void SetStackMask(const BitVector& sp_map) {
+ MemoryRegion region = GetStackMask();
+ for (size_t i = 0; i < region.size_in_bits(); i++) {
+ region.StoreBit(i, sp_map.IsBitSet(i));
+ }
+ }
+
+ bool HasInlineInfo() const {
+ return GetInlineDescriptorOffset() != InlineInfo::kNoInlineInfo;
+ }
+
+ bool Equals(const StackMap& other) {
+ return region_.pointer() == other.region_.pointer()
+ && region_.size() == other.region_.size();
+ }
+
+ private:
+ static constexpr int kDexPcOffset = 0;
+ static constexpr int kNativePcOffset = kDexPcOffset + sizeof(uint32_t);
+ static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffset + sizeof(T);
+ static constexpr int kInlineDescriptorOffsetOffset =
+ kDexRegisterMapOffsetOffset + sizeof(uint32_t);
+ static constexpr int kRegisterMaskOffset = kInlineDescriptorOffsetOffset + sizeof(uint32_t);
+ static constexpr int kFixedSize = kRegisterMaskOffset + sizeof(uint32_t);
+ static constexpr int kStackMaskOffset = kFixedSize;
+
+ size_t StackMaskSize() const { return region_.size() - kFixedSize; }
+
+ MemoryRegion region_;
+
+ template <typename U> friend class CodeInfo;
+ template <typename U> friend class StackMapStream;
+};
+
+
+/**
+ * Wrapper around all compiler information collected for a method.
+ * The information is of the form:
+ * [number_of_stack_maps, stack_mask_size, StackMap+, DexRegisterInfo+, InlineInfo*].
+ */
+template <typename T>
+class CodeInfo {
+ public:
+ explicit CodeInfo(MemoryRegion region) : region_(region) {}
+
+ StackMap<T> GetStackMapAt(size_t i) const {
+ size_t size = StackMapSize();
+ return StackMap<T>(GetStackMaps().Subregion(i * size, size));
+ }
+
+ uint32_t GetStackMaskSize() const {
+ return region_.Load<uint32_t>(kStackMaskSizeOffset);
+ }
+
+ void SetStackMaskSize(uint32_t size) {
+ region_.Store<uint32_t>(kStackMaskSizeOffset, size);
+ }
+
+ size_t GetNumberOfStackMaps() const {
+ return region_.Load<uint32_t>(kNumberOfStackMapsOffset);
+ }
+
+ void SetNumberOfStackMaps(uint32_t number_of_stack_maps) {
+ region_.Store<uint32_t>(kNumberOfStackMapsOffset, number_of_stack_maps);
+ }
+
+ size_t StackMapSize() const {
+ return StackMap<T>::kFixedSize + GetStackMaskSize();
+ }
+
+ DexRegisterMap GetDexRegisterMapOf(StackMap<T> stack_map, uint32_t number_of_dex_registers) {
+ uint32_t offset = stack_map.GetDexRegisterMapOffset();
+ return DexRegisterMap(region_.Subregion(offset,
+ DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
+ }
+
+ InlineInfo GetInlineInfoOf(StackMap<T> stack_map) {
+ uint32_t offset = stack_map.GetInlineDescriptorOffset();
+ uint8_t depth = region_.Load<uint8_t>(offset);
+ return InlineInfo(region_.Subregion(offset,
+ InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
+ }
+
+ StackMap<T> GetStackMapForDexPc(uint32_t dex_pc) {
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap<T> stack_map = GetStackMapAt(i);
+ if (stack_map.GetDexPc() == dex_pc) {
+ return stack_map;
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return StackMap<T>(MemoryRegion());
+ }
+
+ StackMap<T> GetStackMapForNativePc(T native_pc) {
+ // TODO: stack maps are sorted by native pc, we can do a binary search.
+ for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap<T> stack_map = GetStackMapAt(i);
+ if (stack_map.GetNativePc() == native_pc) {
+ return stack_map;
+ }
+ }
+ LOG(FATAL) << "Unreachable";
+ return StackMap<T>(MemoryRegion());
+ }
+
+ private:
+ static constexpr int kNumberOfStackMapsOffset = 0;
+ static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
+ static constexpr int kFixedSize = kStackMaskSizeOffset + sizeof(uint32_t);
+
+ MemoryRegion GetStackMaps() const {
+ return region_.size() == 0
+ ? MemoryRegion()
+ : region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
+ }
+
+ MemoryRegion region_;
+ template<typename U> friend class StackMapStream;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_STACK_MAP_H_
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 38f1307321..bd399e7734 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -24,7 +24,7 @@
#include "base/casts.h"
#include "base/mutex-inl.h"
#include "gc/heap.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
namespace art {
@@ -57,26 +57,24 @@ inline ThreadState Thread::SetState(ThreadState new_state) {
}
inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
-#ifdef NDEBUG
- UNUSED(check_locks); // Keep GCC happy about unused parameters.
-#else
- CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
- if (check_locks) {
- bool bad_mutexes_held = false;
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- // We expect no locks except the mutator_lock_.
- if (i != kMutatorLock) {
- BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(ERROR) << "holding \"" << held_mutex->GetName()
- << "\" at point where thread suspension is expected";
- bad_mutexes_held = true;
+ if (kIsDebugBuild) {
+ CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
+ if (check_locks) {
+ bool bad_mutexes_held = false;
+ for (int i = kLockLevelCount - 1; i >= 0; --i) {
+ // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
+ if (i != kMutatorLock && i != kThreadListSuspendThreadLock) {
+ BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
+ if (held_mutex != NULL) {
+ LOG(ERROR) << "holding \"" << held_mutex->GetName()
+ << "\" at point where thread suspension is expected";
+ bad_mutexes_held = true;
+ }
}
}
+ CHECK(!bad_mutexes_held);
}
- CHECK(!bad_mutexes_held);
}
-#endif
}
inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9fa158d5e0..8e6da74e5e 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -76,6 +76,8 @@ namespace art {
bool Thread::is_started_ = false;
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
+const size_t Thread::kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
+ GetStackOverflowReservedBytes(kRuntimeISA);
static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
@@ -219,7 +221,7 @@ static size_t FixStackSize(size_t stack_size) {
// It's likely that callers are trying to ensure they have at least a certain amount of
// stack space, so we should add our reserved space on top of what they requested, rather
// than implicitly take it away from them.
- stack_size += kRuntimeStackOverflowReservedBytes;
+ stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
} else {
// If we are going to use implicit stack checks, allocate space for the protected
// region at the bottom of the stack.
@@ -232,47 +234,95 @@ static size_t FixStackSize(size_t stack_size) {
return stack_size;
}
+// Global variable to prevent the compiler optimizing away the page reads for the stack.
+byte dont_optimize_this;
+
// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack
// overflow is detected. It is located right below the stack_end_. Just below that
// is the StackOverflow reserved region used when creating the StackOverflow
// exception.
+//
+// There is a little complexity here that deserves a special mention. When running on the
+// host (glibc), the process's main thread's stack is allocated with a special flag
+// to prevent memory being allocated when it's not needed. This flag makes the
+// kernel only allocate memory for the stack by growing down in memory. Because we
+// want to put an mprotected region far away from that at the stack top, we need
+// to make sure the pages for the stack are mapped in before we call mprotect. We do
+// this by reading every page from the stack bottom (highest address) to the stack top.
+// We then madvise this away.
void Thread::InstallImplicitProtection(bool is_main_stack) {
byte* pregion = tlsPtr_.stack_end;
+ byte* stack_lowmem = tlsPtr_.stack_begin;
+ byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&pregion) &
+ ~(kPageSize - 1)); // Page containing current top of stack.
+
+ const bool running_on_intel = (kRuntimeISA == kX86) || (kRuntimeISA == kX86_64);
+
+ if (running_on_intel) {
+ // On Intel, we need to map in the main stack. This must be done by reading from the
+ // current stack pointer downwards as the stack is mapped using VM_GROWSDOWN
+ // in the kernel. Any access more than a page below the current SP will cause
+ // a segv.
+ if (is_main_stack) {
+ // First we need to unprotect the protected region because this may
+ // be called more than once for a particular stack and we will crash
+ // if we try to read the protected page.
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_READ);
+
+ // Read every page from the high address to the low.
+ for (byte* p = stack_top; p > stack_lowmem; p -= kPageSize) {
+ dont_optimize_this = *p;
+ }
+ }
+ }
+ // Check and place a marker word at the lowest usable address in the stack. This
+ // is used to prevent a double protection.
constexpr uint32_t kMarker = 0xdadadada;
uintptr_t *marker = reinterpret_cast<uintptr_t*>(pregion);
if (*marker == kMarker) {
- // The region has already been set up.
+ // The region has already been set up. But on the main stack on the host we have
+ // removed the protected region in order to read the stack memory. We need to put
+ // this back again.
+ if (is_main_stack && running_on_intel) {
+ mprotect(pregion - kStackOverflowProtectedSize, kStackOverflowProtectedSize, PROT_NONE);
+ madvise(stack_lowmem, stack_top - stack_lowmem, MADV_DONTNEED);
+ }
return;
}
// Add marker so that we can detect a second attempt to do this.
*marker = kMarker;
- pregion -= kStackOverflowProtectedSize;
-
- // Touch the pages in the region to map them in. Otherwise mprotect fails. Only
- // need to do this on the main stack. We only need to touch one byte per page.
- if (is_main_stack) {
- byte* start = pregion;
- byte* end = pregion + kStackOverflowProtectedSize;
- while (start < end) {
- *start = static_cast<byte>(0);
- start += kPageSize;
+ if (!running_on_intel) {
+ // Running on !Intel, stacks are mapped cleanly. The protected region for the
+ // main stack just needs to be mapped in. We do this by writing one byte per page.
+ for (byte* p = pregion - kStackOverflowProtectedSize; p < pregion; p += kPageSize) {
+ *p = 0;
}
}
+ pregion -= kStackOverflowProtectedSize;
+
VLOG(threads) << "installing stack protected region at " << std::hex <<
static_cast<void*>(pregion) << " to " <<
static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
+
if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. Reason:"
- << strerror(errno);
+ << strerror(errno) << kStackOverflowProtectedSize;
}
// Tell the kernel that we won't be needing these pages any more.
+ // NB. madvise will probably write zeroes into the memory (on linux it does).
if (is_main_stack) {
- madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ if (running_on_intel) {
+ // On the host, it's the whole stack (minus a page to prevent overwrite of stack top).
+ madvise(stack_lowmem, stack_top - stack_lowmem - kPageSize, MADV_DONTNEED);
+ } else {
+ // On Android, just the protected region.
+ madvise(pregion, kStackOverflowProtectedSize, MADV_DONTNEED);
+ }
}
}
@@ -488,7 +538,7 @@ void Thread::InitStackHwm() {
tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
tlsPtr_.stack_size = read_stack_size;
- if (read_stack_size <= kRuntimeStackOverflowReservedBytes) {
+ if (read_stack_size <= GetStackOverflowReservedBytes(kRuntimeISA)) {
LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
<< " bytes)";
}
@@ -533,13 +583,17 @@ void Thread::InitStackHwm() {
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
if (is_main_thread) {
- // The main thread has a 16K protected region at the bottom. We need
+ size_t guardsize;
+ pthread_attr_t attributes;
+ CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, &guardsize), "guard size query");
+ CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), "guard size query");
+ // The main thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- constexpr uint32_t kDelta = 16 * KB;
- tlsPtr_.stack_begin += kDelta;
- tlsPtr_.stack_end += kDelta;
- tlsPtr_.stack_size -= kDelta;
+ tlsPtr_.stack_begin += guardsize;
+ tlsPtr_.stack_end += guardsize;
+ tlsPtr_.stack_size -= guardsize;
}
InstallImplicitProtection(is_main_thread);
}
@@ -1086,7 +1140,7 @@ void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
mirror::Throwable* exception = GetException(nullptr);
- LOG(FATAL) << "Throwing new exception " << msg << " with unexpected pending exception: "
+ LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
<< exception->Dump();
}
}
@@ -1109,6 +1163,21 @@ void Thread::Destroy() {
Thread* self = this;
DCHECK_EQ(self, Thread::Current());
+ if (tlsPtr_.jni_env != nullptr) {
+ // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
+ tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
+ // Release locally held global references which releasing may require the mutator lock.
+ if (tlsPtr_.jpeer != nullptr) {
+ // If pthread_create fails we don't have a jni env here.
+ tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
+ tlsPtr_.jpeer = nullptr;
+ }
+ if (tlsPtr_.class_loader_override != nullptr) {
+ tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
+ tlsPtr_.class_loader_override = nullptr;
+ }
+ }
+
if (tlsPtr_.opeer != nullptr) {
ScopedObjectAccess soa(self);
// We may need to call user-supplied managed code, do this before final clean-up.
@@ -1136,22 +1205,16 @@ void Thread::Destroy() {
ObjectLock<mirror::Object> locker(self, h_obj);
locker.NotifyAll();
}
+ tlsPtr_.opeer = nullptr;
}
- // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
- if (tlsPtr_.jni_env != nullptr) {
- tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
- }
+ Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
}
Thread::~Thread() {
- if (tlsPtr_.jni_env != nullptr && tlsPtr_.jpeer != nullptr) {
- // If pthread_create fails we don't have a jni env here.
- tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
- tlsPtr_.jpeer = nullptr;
- }
- tlsPtr_.opeer = nullptr;
-
+ CHECK(tlsPtr_.class_loader_override == nullptr);
+ CHECK(tlsPtr_.jpeer == nullptr);
+ CHECK(tlsPtr_.opeer == nullptr);
bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run?
if (initialized) {
delete tlsPtr_.jni_env;
@@ -1183,7 +1246,7 @@ Thread::~Thread() {
delete tlsPtr_.name;
delete tlsPtr_.stack_trace_sample;
- Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
+ Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
TearDownAlternateSignalStack();
}
@@ -1293,11 +1356,10 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
result = kInvalidIndirectRefObject;
}
} else if (kind == kGlobal) {
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
- result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
+ result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
} else {
DCHECK_EQ(kind, kWeakGlobal);
- result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
+ result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
if (result == kClearedJniWeakGlobal) {
// This is a special case where it's okay to return nullptr.
return nullptr;
@@ -1305,7 +1367,8 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
}
if (UNLIKELY(result == nullptr)) {
- JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
+ tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
+ ToStr<IndirectRefKind>(kind).c_str(), obj);
}
return result;
}
@@ -1345,6 +1408,13 @@ void Thread::NotifyLocked(Thread* self) {
}
}
+void Thread::SetClassLoaderOverride(jobject class_loader_override) {
+ if (tlsPtr_.class_loader_override != nullptr) {
+ GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
+ }
+ tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
+}
+
class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
@@ -1879,6 +1949,8 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
+ QUICK_ENTRY_POINT_INFO(pA64Load)
+ QUICK_ENTRY_POINT_INFO(pA64Store)
#undef QUICK_ENTRY_POINT_INFO
os << offset;
@@ -1916,10 +1988,13 @@ Context* Thread::GetLongJumpContext() {
return result;
}
+// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
+// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {}
+ : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
+ abort_on_error_(abort_on_error) {}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
@@ -1930,16 +2005,17 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
this_object_ = GetThisObject();
}
method_ = m;
- dex_pc_ = GetDexPc();
+ dex_pc_ = GetDexPc(abort_on_error_);
return false;
}
mirror::Object* this_object_;
mirror::ArtMethod* method_;
uint32_t dex_pc_;
+ const bool abort_on_error_;
};
-mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr);
+mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -1949,7 +2025,7 @@ mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
ThrowLocation Thread::GetCurrentLocationForThrow() {
Context* context = GetLongJumpContext();
- CurrentMethodVisitor visitor(this, context);
+ CurrentMethodVisitor visitor(this, context, true);
visitor.WalkStack(false);
ReleaseLongJumpContext(context);
return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
@@ -2113,11 +2189,6 @@ class RootCallbackVisitor {
const uint32_t tid_;
};
-void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
- VerifyObject(class_loader_override);
- tlsPtr_.class_loader_override = class_loader_override;
-}
-
void Thread::VisitRoots(RootCallback* visitor, void* arg) {
uint32_t thread_id = GetThreadId();
if (tlsPtr_.opeer != nullptr) {
@@ -2127,10 +2198,6 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg, thread_id, kRootNativeStack);
}
tlsPtr_.throw_location.VisitRoots(visitor, arg);
- if (tlsPtr_.class_loader_override != nullptr) {
- visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.class_loader_override), arg, thread_id,
- kRootNativeStack);
- }
if (tlsPtr_.monitor_enter_object != nullptr) {
visitor(&tlsPtr_.monitor_enter_object, arg, thread_id, kRootNativeStack);
}
@@ -2193,7 +2260,7 @@ void Thread::SetStackEndForStackOverflow() {
if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
// However, we seem to have already extended to use the full stack.
LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
- << kRuntimeStackOverflowReservedBytes << ")?";
+ << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
DumpStack(LOG(ERROR));
LOG(FATAL) << "Recursive stack overflow.";
}
diff --git a/runtime/thread.h b/runtime/thread.h
index d08c2fce82..c2b200bf1a 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -104,8 +104,7 @@ class Thread {
// is protected against reads and the lower is available for use while
// throwing the StackOverflow exception.
static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
- static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
- kRuntimeStackOverflowReservedBytes;
+ static const size_t kStackOverflowImplicitCheckSize;
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
@@ -323,7 +322,9 @@ class Thread {
tlsPtr_.long_jump_context = context;
}
- mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
+ // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
+ // abort the runtime iff abort_on_error is true.
+ mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -430,12 +431,11 @@ class Thread {
tlsPtr_.wait_next = next;
}
- mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jobject GetClassLoaderOverride() {
return tlsPtr_.class_loader_override;
}
- void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetClassLoaderOverride(jobject class_loader_override);
// Create the internal representation of a stack trace, that is more time
// and space efficient to compute than the StackTraceElement[].
@@ -551,6 +551,16 @@ class Thread {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
}
+ byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
+ if (implicit_overflow_check) {
+ // The interpreter needs the extra overflow bytes that stack_end does
+ // not include.
+ return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
+ } else {
+ return tlsPtr_.stack_end;
+ }
+ }
+
byte* GetStackEnd() const {
return tlsPtr_.stack_end;
}
@@ -567,7 +577,7 @@ class Thread {
// overflow region.
tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
} else {
- tlsPtr_.stack_end = tlsPtr_.stack_begin + kRuntimeStackOverflowReservedBytes;
+ tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
}
}
@@ -1029,7 +1039,7 @@ class Thread {
// Needed to get the right ClassLoader in JNI_OnLoad, but also
// useful for testing.
- mirror::ClassLoader* class_loader_override;
+ jobject class_loader_override;
// Thread local, lazily allocated, long jump context. Used to deliver exceptions.
Context* long_jump_context;
diff --git a/runtime/thread_linux.cc b/runtime/thread_linux.cc
index ee66ccc29a..9aacb301ec 100644
--- a/runtime/thread_linux.cc
+++ b/runtime/thread_linux.cc
@@ -32,11 +32,18 @@ static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
}
}
+// The default SIGSTKSZ on linux is 8K. If we do any logging in a signal
+// handler this is too small. We allocate 16K instead.
+static constexpr int kHostAltSigStackSize = 16*1024; // 16K signal stack.
+
void Thread::SetUpAlternateSignalStack() {
// Create and set an alternate signal stack.
+#ifdef HAVE_ANDROID_OS
+ LOG(FATAL) << "Invalid use of alternate signal stack on Android";
+#endif
stack_t ss;
- ss.ss_sp = new uint8_t[SIGSTKSZ];
- ss.ss_size = SIGSTKSZ;
+ ss.ss_sp = new uint8_t[kHostAltSigStackSize];
+ ss.ss_size = kHostAltSigStackSize;
ss.ss_flags = 0;
CHECK(ss.ss_sp != NULL);
SigAltStack(&ss, NULL);
@@ -56,7 +63,7 @@ void Thread::TearDownAlternateSignalStack() {
// Tell the kernel to stop using it.
ss.ss_sp = NULL;
ss.ss_flags = SS_DISABLE;
- ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
+ ss.ss_size = kHostAltSigStackSize; // Avoid ENOMEM failure with Mac OS' buggy libc.
SigAltStack(&ss, NULL);
// Free it.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b649b626ca..5077a89ee9 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -170,16 +170,7 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout() {
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us,
- bool holding_locks) {
- if (!holding_locks) {
- for (int i = kLockLevelCount - 1; i >= 0; --i) {
- BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
- if (held_mutex != NULL) {
- LOG(FATAL) << "Holding " << held_mutex->GetName() << " while sleeping for thread suspension";
- }
- }
- }
+static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -244,7 +235,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, true);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -303,16 +294,19 @@ size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
void ThreadList::SuspendAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " SuspendAll starting...";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll starting...";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll starting...";
+ }
ATRACE_BEGIN("Suspending mutator threads");
uint64_t start_time = NanoTime();
Locks::mutator_lock_->AssertNotHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
- if (kDebugLocking) {
+ if (kDebugLocking && self != nullptr) {
CHECK_NE(self->GetState(), kRunnable);
}
{
@@ -353,14 +347,21 @@ void ThreadList::SuspendAll() {
ATRACE_END();
ATRACE_BEGIN("Mutator threads suspended");
- VLOG(threads) << *self << " SuspendAll complete";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " SuspendAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] SuspendAll complete";
+ }
}
void ThreadList::ResumeAll() {
Thread* self = Thread::Current();
- DCHECK(self != nullptr);
- VLOG(threads) << *self << " ResumeAll starting";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll starting";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll starting";
+ }
ATRACE_END();
ATRACE_BEGIN("Resuming mutator threads");
@@ -386,11 +387,20 @@ void ThreadList::ResumeAll() {
// Broadcast a notification to all suspended threads, some or all of
// which may choose to wake up. No need to wait for them.
- VLOG(threads) << *self << " ResumeAll waking others";
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll waking others";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll waking others";
+ }
Thread::resume_cond_->Broadcast(self);
}
ATRACE_END();
- VLOG(threads) << *self << " ResumeAll complete";
+
+ if (self != nullptr) {
+ VLOG(threads) << *self << " ResumeAll complete";
+ } else {
+ VLOG(threads) << "Thread[null] ResumeAll complete";
+ }
}
void ThreadList::Resume(Thread* thread, bool for_debugger) {
@@ -444,6 +454,11 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
while (true) {
Thread* thread;
{
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
thread = Thread::FromManagedThread(soa, peer);
@@ -483,7 +498,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
@@ -502,9 +517,14 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
CHECK_NE(thread_id, kInvalidThreadId);
while (true) {
{
- Thread* thread = NULL;
+ // Note: this will transition to runnable and potentially suspend. We ensure only one thread
+ // is requesting another suspend, to avoid deadlock, by requiring this function be called
+ // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
+ // than request thread suspension, to avoid potential cycles in threads requesting each other
+ // suspend.
ScopedObjectAccess soa(self);
MutexLock mu(self, *Locks::thread_list_lock_);
+ Thread* thread = nullptr;
for (const auto& it : list_) {
if (it->GetThreadId() == thread_id) {
thread = it;
@@ -550,7 +570,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
}
// Release locks and come out of runnable state.
}
- ThreadSuspendSleep(self, &delay_us, &total_delay_us, false);
+ ThreadSuspendSleep(self, &delay_us, &total_delay_us);
}
}
@@ -781,6 +801,8 @@ void ThreadList::Register(Thread* self) {
void ThreadList::Unregister(Thread* self) {
DCHECK_EQ(self, Thread::Current());
+ CHECK_NE(self->GetState(), kRunnable);
+ Locks::mutator_lock_->AssertNotHeld(self);
VLOG(threads) << "ThreadList::Unregister() " << *self;
@@ -795,14 +817,18 @@ void ThreadList::Unregister(Thread* self) {
// Note: deliberately not using MutexLock that could hold a stale self pointer.
Locks::thread_list_lock_->ExclusiveLock(self);
CHECK(Contains(self));
- // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
- // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
+ Locks::thread_suspend_count_lock_->ExclusiveLock(self);
+ bool removed = false;
if (!self->IsSuspended()) {
list_.remove(self);
+ removed = true;
+ }
+ Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ if (removed) {
delete self;
self = nullptr;
}
- Locks::thread_list_lock_->ExclusiveUnlock(self);
}
// Release the thread ID after the thread is finished and deleted to avoid cases where we can
// temporarily have multiple threads with the same thread id. When this occurs, it causes
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index d46987a8b8..1b67ac0588 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -68,6 +68,7 @@ class ThreadList {
// is set to true.
static Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -77,6 +78,7 @@ class ThreadList {
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
// thread, that may be terminating. If the suspension times out then *timeout is set to true.
Thread* SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspension, bool* timed_out)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
LOCKS_EXCLUDED(Locks::mutator_lock_,
Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f51b8c435a..ca5e150b89 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -178,29 +178,29 @@ bool Trace::UseWallClock() {
(clock_source_ == kTraceClockSourceDual);
}
-static void MeasureClockOverhead(Trace* trace) {
- if (trace->UseThreadCpuClock()) {
+void Trace::MeasureClockOverhead() {
+ if (UseThreadCpuClock()) {
Thread::Current()->GetCpuMicroTime();
}
- if (trace->UseWallClock()) {
+ if (UseWallClock()) {
MicroTime();
}
}
// Compute an average time taken to measure clocks.
-static uint32_t GetClockOverheadNanoSeconds(Trace* trace) {
+uint32_t Trace::GetClockOverheadNanoSeconds() {
Thread* self = Thread::Current();
uint64_t start = self->GetCpuMicroTime();
for (int i = 4000; i > 0; i--) {
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
- MeasureClockOverhead(trace);
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
+ MeasureClockOverhead();
}
uint64_t elapsed_us = self->GetCpuMicroTime() - start;
@@ -444,7 +444,8 @@ TracingMode Trace::GetMethodTracingMode() {
Trace::Trace(File* trace_file, int buffer_size, int flags, bool sampling_enabled)
: trace_file_(trace_file), buf_(new uint8_t[buffer_size]()), flags_(flags),
sampling_enabled_(sampling_enabled), clock_source_(default_clock_source_),
- buffer_size_(buffer_size), start_time_(MicroTime()), cur_offset_(0), overflow_(false) {
+ buffer_size_(buffer_size), start_time_(MicroTime()),
+ clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false) {
// Set up the beginning of the trace.
uint16_t trace_version = GetTraceVersion(clock_source_);
memset(buf_.get(), 0, kTraceHeaderLength);
@@ -480,7 +481,6 @@ void Trace::FinishTracing() {
uint64_t elapsed = MicroTime() - start_time_;
size_t final_offset = cur_offset_.LoadRelaxed();
- uint32_t clock_overhead_ns = GetClockOverheadNanoSeconds(this);
if ((flags_ & kTraceCountAllocs) != 0) {
Runtime::Current()->SetStatsEnabled(false);
@@ -506,7 +506,7 @@ void Trace::FinishTracing() {
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
os << StringPrintf("num-method-calls=%zd\n", num_records);
- os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns);
+ os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
if ((flags_ & kTraceCountAllocs) != 0) {
os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
diff --git a/runtime/trace.h b/runtime/trace.h
index d7836b8965..45a02dab3c 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -68,6 +68,8 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
bool UseWallClock();
bool UseThreadCpuClock();
+ void MeasureClockOverhead();
+ uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<mirror::ArtMethod*>* stack_trace)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -155,6 +157,9 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
// Time trace was created.
const uint64_t start_time_;
+ // Clock overhead.
+ const uint32_t clock_overhead_ns_;
+
// Offset into buf_.
AtomicInteger cur_offset_;
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 8b1ad39edc..48d6cdf263 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -281,6 +281,11 @@ std::string PrettyDescriptor(const std::string& descriptor) {
return result;
}
+std::string PrettyDescriptor(Primitive::Type type) {
+ std::string descriptor_string(Primitive::Descriptor(type));
+ return PrettyDescriptor(descriptor_string);
+}
+
std::string PrettyField(mirror::ArtField* f, bool with_type) {
if (f == NULL) {
return "null";
@@ -1154,22 +1159,55 @@ const char* GetAndroidRoot() {
}
const char* GetAndroidData() {
+ std::string error_msg;
+ const char* dir = GetAndroidDataSafe(&error_msg);
+ if (dir != nullptr) {
+ return dir;
+ } else {
+ LOG(FATAL) << error_msg;
+ return "";
+ }
+}
+
+const char* GetAndroidDataSafe(std::string* error_msg) {
const char* android_data = getenv("ANDROID_DATA");
if (android_data == NULL) {
if (OS::DirectoryExists("/data")) {
android_data = "/data";
} else {
- LOG(FATAL) << "ANDROID_DATA not set and /data does not exist";
- return "";
+ *error_msg = "ANDROID_DATA not set and /data does not exist";
+ return nullptr;
}
}
if (!OS::DirectoryExists(android_data)) {
- LOG(FATAL) << "Failed to find ANDROID_DATA directory " << android_data;
- return "";
+ *error_msg = StringPrintf("Failed to find ANDROID_DATA directory %s", android_data);
+ return nullptr;
}
return android_data;
}
+void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists) {
+ CHECK(subdir != nullptr);
+ std::string error_msg;
+ const char* android_data = GetAndroidDataSafe(&error_msg);
+ if (android_data == nullptr) {
+ *have_android_data = false;
+ *dalvik_cache_exists = false;
+ return;
+ } else {
+ *have_android_data = true;
+ }
+ const std::string dalvik_cache_root(StringPrintf("%s/dalvik-cache/", android_data));
+ *dalvik_cache = dalvik_cache_root + subdir;
+ *dalvik_cache_exists = OS::DirectoryExists(dalvik_cache->c_str());
+ if (create_if_absent && !*dalvik_cache_exists && strcmp(android_data, "/data") != 0) {
+ // Don't create the system's /data/dalvik-cache/... because it needs special permissions.
+ *dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
+ (mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
+ }
+}
+
std::string GetDalvikCacheOrDie(const char* subdir, const bool create_if_absent) {
CHECK(subdir != nullptr);
const char* android_data = GetAndroidData();
@@ -1196,17 +1234,29 @@ std::string GetDalvikCacheOrDie(const char* subdir, const bool create_if_absent)
return dalvik_cache;
}
-std::string GetDalvikCacheFilenameOrDie(const char* location, const char* cache_location) {
+bool GetDalvikCacheFilename(const char* location, const char* cache_location,
+ std::string* filename, std::string* error_msg) {
if (location[0] != '/') {
- LOG(FATAL) << "Expected path in location to be absolute: "<< location;
+ *error_msg = StringPrintf("Expected path in location to be absolute: %s", location);
+ return false;
}
std::string cache_file(&location[1]); // skip leading slash
- if (!EndsWith(location, ".dex") && !EndsWith(location, ".art")) {
+ if (!EndsWith(location, ".dex") && !EndsWith(location, ".art") && !EndsWith(location, ".oat")) {
cache_file += "/";
cache_file += DexFile::kClassesDex;
}
std::replace(cache_file.begin(), cache_file.end(), '/', '@');
- return StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ *filename = StringPrintf("%s/%s", cache_location, cache_file.c_str());
+ return true;
+}
+
+std::string GetDalvikCacheFilenameOrDie(const char* location, const char* cache_location) {
+ std::string ret;
+ std::string error_msg;
+ if (!GetDalvikCacheFilename(location, cache_location, &ret, &error_msg)) {
+ LOG(FATAL) << error_msg;
+ }
+ return ret;
}
static void InsertIsaDirectory(const InstructionSet isa, std::string* filename) {
@@ -1309,4 +1359,29 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
return true;
}
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
+ size_t encoded_size = UnsignedLeb128Size(data);
+ size_t cur_index = dst->size();
+ dst->resize(dst->size() + encoded_size);
+ uint8_t* write_pos = &((*dst)[cur_index]);
+ uint8_t* write_pos_after = EncodeUnsignedLeb128(write_pos, data);
+ DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+}
+
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
+ size_t encoded_size = SignedLeb128Size(data);
+ size_t cur_index = dst->size();
+ dst->resize(dst->size() + encoded_size);
+ uint8_t* write_pos = &((*dst)[cur_index]);
+ uint8_t* write_pos_after = EncodeSignedLeb128(write_pos, data);
+ DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+}
+
+void PushWord(std::vector<uint8_t>* buf, int data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+ buf->push_back((data >> 16) & 0xff);
+ buf->push_back((data >> 24) & 0xff);
+}
+
} // namespace art
diff --git a/runtime/utils.h b/runtime/utils.h
index 2cb3af7aec..f6773be289 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -24,13 +24,10 @@
#include <vector>
#include "base/logging.h"
+#include "base/mutex.h"
#include "globals.h"
#include "instruction_set.h"
-#include "base/mutex.h"
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-#endif
+#include "primitive.h"
namespace art {
@@ -167,8 +164,7 @@ struct TypeIdentity {
// For rounding integers.
template<typename T>
-static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
@@ -178,8 +174,7 @@ static constexpr T RoundDown(T x, typename TypeIdentity<T>::type n) {
}
template<typename T>
-static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n)
- __attribute__((warn_unused_result));
+static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) WARN_UNUSED;
template<typename T>
static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
@@ -188,7 +183,7 @@ static constexpr T RoundUp(T x, typename TypeIdentity<T>::type n) {
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignDown(T* x, uintptr_t n) {
@@ -196,7 +191,7 @@ static inline T* AlignDown(T* x, uintptr_t n) {
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) __attribute__((warn_unused_result));
+static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
static inline T* AlignUp(T* x, uintptr_t n) {
@@ -281,6 +276,7 @@ std::string PrettyDescriptor(mirror::String* descriptor)
std::string PrettyDescriptor(const std::string& descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+std::string PrettyDescriptor(Primitive::Type type);
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
@@ -441,11 +437,22 @@ const char* GetAndroidRoot();
// Find $ANDROID_DATA, /data, or abort.
const char* GetAndroidData();
+// Find $ANDROID_DATA, /data, or return nullptr.
+const char* GetAndroidDataSafe(std::string* error_msg);
// Returns the dalvik-cache location, or dies trying. subdir will be
// appended to the cache location.
std::string GetDalvikCacheOrDie(const char* subdir, bool create_if_absent = true);
-
+// Return true if we found the dalvik cache and stored it in the dalvik_cache argument.
+// have_android_data will be set to true if we have an ANDROID_DATA that exists,
+// dalvik_cache_exists will be true if there is a dalvik-cache directory that is present.
+void GetDalvikCache(const char* subdir, bool create_if_absent, std::string* dalvik_cache,
+ bool* have_android_data, bool* dalvik_cache_exists);
+
+// Returns the absolute dalvik-cache path for a DexFile or OatFile. The path returned will be
+// rooted at cache_location.
+bool GetDalvikCacheFilename(const char* file_location, const char* cache_location,
+ std::string* filename, std::string* error_msg);
// Returns the absolute dalvik-cache path for a DexFile or OatFile, or
// dies trying. The path returned will be rooted at cache_location.
std::string GetDalvikCacheFilenameOrDie(const char* file_location,
@@ -488,6 +495,11 @@ class VoidFunctor {
}
};
+void PushWord(std::vector<uint8_t>* buf, int32_t data);
+
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 7cd5980c44..d6c90e1d45 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -350,6 +350,8 @@ TEST_F(UtilsTest, GetDalvikCacheFilenameOrDie) {
GetDalvikCacheFilenameOrDie("/system/framework/core.jar", "/foo").c_str());
EXPECT_STREQ("/foo/system@framework@boot.art",
GetDalvikCacheFilenameOrDie("/system/framework/boot.art", "/foo").c_str());
+ EXPECT_STREQ("/foo/system@framework@boot.oat",
+ GetDalvikCacheFilenameOrDie("/system/framework/boot.oat", "/foo").c_str());
}
TEST_F(UtilsTest, GetSystemImageFilename) {
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 62ecf4b49f..d4fe1061e2 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -66,9 +66,9 @@ inline bool MethodVerifier::HasFailures() const {
return !failure_messages_.empty();
}
-inline const RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
+inline RegType& MethodVerifier::ResolveCheckedClass(uint32_t class_idx) {
DCHECK(!HasFailures());
- const RegType& result = ResolveClassAndCheckAccess(class_idx);
+ RegType& result = ResolveClassAndCheckAccess(class_idx);
DCHECK(!HasFailures());
return result;
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a1b86e05f5..329b4dc3cd 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -30,7 +30,7 @@
#include "indenter.h"
#include "intern_table.h"
#include "leb128.h"
-#include "method_helper.h"
+#include "method_helper-inl.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class.h"
@@ -1175,7 +1175,7 @@ bool MethodVerifier::SetTypesFromSignature() {
// If this is a constructor for a class other than java.lang.Object, mark the first ("this")
// argument as uninitialized. This restricts field access until the superclass constructor is
// called.
- const RegType& declaring_class = GetDeclaringClass();
+ RegType& declaring_class = GetDeclaringClass();
if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
reg_line->SetRegisterType(arg_start + cur_arg,
reg_types_.UninitializedThisArgument(declaring_class));
@@ -1207,7 +1207,7 @@ bool MethodVerifier::SetTypesFromSignature() {
// it's effectively considered initialized the instant we reach here (in the sense that we
// can return without doing anything or call virtual methods).
{
- const RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
+ RegType& reg_type = ResolveClassAndCheckAccess(iterator.GetTypeIdx());
if (!reg_type.IsNonZeroReferenceTypes()) {
DCHECK(HasFailures());
return false;
@@ -1241,8 +1241,8 @@ bool MethodVerifier::SetTypesFromSignature() {
return false;
}
- const RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
- const RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
+ RegType& lo_half = descriptor[0] == 'J' ? reg_types_.LongLo() : reg_types_.DoubleLo();
+ RegType& hi_half = descriptor[0] == 'J' ? reg_types_.LongHi() : reg_types_.DoubleHi();
reg_line->SetRegisterTypeWide(arg_start + cur_arg, lo_half, hi_half);
cur_arg++;
break;
@@ -1536,7 +1536,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* This statement can only appear as the first instruction in an exception handler. We verify
* that as part of extracting the exception type from the catch block list.
*/
- const RegType& res_type = GetCaughtExceptionType();
+ RegType& res_type = GetCaughtExceptionType();
work_line_->SetRegisterType(inst->VRegA_11x(), res_type);
break;
}
@@ -1550,7 +1550,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::RETURN:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
<< return_type;
@@ -1558,7 +1558,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
const uint32_t vregA = inst->VRegA_11x();
- const RegType& src_type = work_line_->GetRegisterType(vregA);
+ RegType& src_type = work_line_->GetRegisterType(vregA);
bool use_src = ((return_type.IsBoolean() && src_type.IsByte()) ||
((return_type.IsBoolean() || return_type.IsByte() ||
return_type.IsShort() || return_type.IsChar()) &&
@@ -1575,7 +1575,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::RETURN_WIDE:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
/* check the method signature */
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory2Types()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-wide not expected";
} else {
@@ -1590,7 +1590,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
case Instruction::RETURN_OBJECT:
if (!IsConstructor() || work_line_->CheckConstructorReturn()) {
- const RegType& return_type = GetMethodReturnType();
+ RegType& return_type = GetMethodReturnType();
if (!return_type.IsReferenceTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-object not expected";
} else {
@@ -1598,7 +1598,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
DCHECK(!return_type.IsZero());
DCHECK(!return_type.IsUninitializedReference());
const uint32_t vregA = inst->VRegA_11x();
- const RegType& reg_type = work_line_->GetRegisterType(vregA);
+ RegType& reg_type = work_line_->GetRegisterType(vregA);
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
@@ -1645,29 +1645,29 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* could be long or double; resolved upon use */
case Instruction::CONST_WIDE_16: {
int64_t val = static_cast<int16_t>(inst->VRegB_21s());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21s(), lo, hi);
break;
}
case Instruction::CONST_WIDE_32: {
int64_t val = static_cast<int32_t>(inst->VRegB_31i());
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_31i(), lo, hi);
break;
}
case Instruction::CONST_WIDE: {
int64_t val = inst->VRegB_51l();
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_51l(), lo, hi);
break;
}
case Instruction::CONST_WIDE_HIGH16: {
int64_t val = static_cast<uint64_t>(inst->VRegB_21h()) << 48;
- const RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& lo = reg_types_.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& hi = reg_types_.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
work_line_->SetRegisterTypeWide(inst->VRegA_21h(), lo, hi);
break;
}
@@ -1680,7 +1680,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::CONST_CLASS: {
// Get type from instruction if unresolved then we need an access check
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
// Register holds class, ie its type is class, on error it will hold Conflict.
work_line_->SetRegisterType(inst->VRegA_21c(),
res_type.IsConflict() ? res_type
@@ -1726,8 +1726,17 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
const bool is_checkcast = (inst->Opcode() == Instruction::CHECK_CAST);
const uint32_t type_idx = (is_checkcast) ? inst->VRegB_21c() : inst->VRegC_22c();
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) {
+ // If this is a primitive type, fail HARD.
+ mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
+ if (klass != nullptr && klass->IsPrimitive()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
+ << dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
+ << GetDeclaringClass();
+ break;
+ }
+
DCHECK_NE(failures_.size(), 0U);
if (!is_checkcast) {
work_line_->SetRegisterType(inst->VRegA_22c(), reg_types_.Boolean());
@@ -1736,7 +1745,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
// TODO: check Compiler::CanAccessTypeWithoutChecks returns false when res_type is unresolved
uint32_t orig_type_reg = (is_checkcast) ? inst->VRegA_21c() : inst->VRegB_22c();
- const RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
+ RegType& orig_type = work_line_->GetRegisterType(orig_type_reg);
if (!res_type.IsNonZeroReferenceTypes()) {
if (is_checkcast) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "check-cast on unexpected class " << res_type;
@@ -1759,18 +1768,20 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
break;
}
case Instruction::ARRAY_LENGTH: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegB_12x());
if (res_type.IsReferenceTypes()) {
if (!res_type.IsArrayTypes() && !res_type.IsZero()) { // ie not an array or null
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
} else {
work_line_->SetRegisterType(inst->VRegA_12x(), reg_types_.Integer());
}
+ } else {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array-length on non-array " << res_type;
}
break;
}
case Instruction::NEW_INSTANCE: {
- const RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
+ RegType& res_type = ResolveClassAndCheckAccess(inst->VRegB_21c());
if (res_type.IsConflict()) {
DCHECK_NE(failures_.size(), 0U);
break; // bad class
@@ -1782,7 +1793,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
<< "new-instance on primitive, interface or abstract class" << res_type;
// Soft failure so carry on to set register type.
}
- const RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
+ RegType& uninit_type = reg_types_.Uninitialized(res_type, work_insn_idx_);
// Any registers holding previous allocations from this address that have not yet been
// initialized must be marked invalid.
work_line_->MarkUninitRefsAsInvalid(uninit_type);
@@ -1835,7 +1846,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
work_line_->SetRegisterType(inst->VRegA_23x(), reg_types_.Integer());
break;
case Instruction::THROW: {
- const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
+ RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
Fail(res_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS : VERIFY_ERROR_BAD_CLASS_SOFT)
<< "thrown class " << res_type << " not instanceof Throwable";
@@ -1856,14 +1867,14 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::FILL_ARRAY_DATA: {
/* Similar to the verification done for APUT */
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegA_31t());
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
<< array_type;
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type,
+ RegType& component_type = reg_types_.GetComponentType(array_type,
class_loader_->Get());
DCHECK(!component_type.IsConflict());
if (component_type.IsNonZeroReferenceTypes()) {
@@ -1891,8 +1902,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::IF_EQ:
case Instruction::IF_NE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
bool mismatch = false;
if (reg_type1.IsZero()) { // zero then integral or reference expected
mismatch = !reg_type2.IsReferenceTypes() && !reg_type2.IsIntegralTypes();
@@ -1911,8 +1922,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::IF_GE:
case Instruction::IF_GT:
case Instruction::IF_LE: {
- const RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
- const RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
+ RegType& reg_type1 = work_line_->GetRegisterType(inst->VRegA_22t());
+ RegType& reg_type2 = work_line_->GetRegisterType(inst->VRegB_22t());
if (!reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to 'if' (" << reg_type1 << ","
<< reg_type2 << ") must be integral";
@@ -1921,7 +1932,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
case Instruction::IF_EQZ:
case Instruction::IF_NEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-eqz/if-nez";
@@ -1967,11 +1978,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// type is assignable to the original then allow optimization. This check is performed to
// ensure that subsequent merges don't lose type information - such as becoming an
// interface from a class that would lose information relevant to field checks.
- const RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
- const RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
+ RegType& orig_type = work_line_->GetRegisterType(instance_of_inst->VRegB_22c());
+ RegType& cast_type = ResolveClassAndCheckAccess(instance_of_inst->VRegC_22c());
if (!orig_type.Equals(cast_type) &&
!cast_type.IsUnresolvedTypes() && !orig_type.IsUnresolvedTypes() &&
+ cast_type.HasClass() && // Could be conflict type, make sure it has a class.
!cast_type.GetClass()->IsInterface() &&
(orig_type.IsZero() ||
orig_type.IsStrictlyAssignableFrom(cast_type.Merge(orig_type, &reg_types_)))) {
@@ -2022,7 +2034,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
case Instruction::IF_GEZ:
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
- const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
+ RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
<< " unexpected as arg to if-ltz/if-gez/if-gtz/if-lez";
@@ -2171,7 +2183,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
inst->Opcode() == Instruction::INVOKE_SUPER_RANGE);
mirror::ArtMethod* called_method = VerifyInvocationArgs(inst, METHOD_VIRTUAL, is_range,
is_super);
- const RegType* return_type = nullptr;
+ RegType* return_type = nullptr;
if (called_method != nullptr) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -2227,7 +2239,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* allowing the latter only if the "this" argument is the same as the "this" argument to
* this method (which implies that we're in a constructor ourselves).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsConflict()) // failure.
break;
@@ -2238,7 +2250,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
/* must be in same class or in superclass */
- // const RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
+ // RegType& this_super_klass = this_type.GetSuperClass(&reg_types_);
// TODO: re-enable constructor type verification
// if (this_super_klass.IsConflict()) {
// Unknown super class, fail so we re-check at runtime.
@@ -2259,7 +2271,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
*/
work_line_->MarkRefsAsInitialized(this_type);
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(),
return_type_descriptor, false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2285,7 +2297,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
descriptor = called_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2313,7 +2325,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
/* Get the type of the "this" arg, which should either be a sub-interface of called
* interface or Object (see comments in RegType::JoinClass).
*/
- const RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& this_type = work_line_->GetInvocationThis(inst, is_range);
if (this_type.IsZero()) {
/* null pointer always passes (and always fails at runtime) */
} else {
@@ -2343,7 +2355,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2609,7 +2621,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
if (called_method != NULL) {
const char* descriptor = called_method->GetReturnTypeDescriptor();
- const RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
+ RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
false);
if (!return_type.IsLowHalf()) {
work_line_->SetResultRegisterType(return_type);
@@ -2763,12 +2775,30 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* "try" block when they throw, control transfers out of the method.)
*/
if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
- bool within_catch_all = false;
+ bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
+ // Need the linker to try and resolve the handled class to check if it's Throwable.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+
for (; iterator.HasNext(); iterator.Next()) {
- if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
- within_catch_all = true;
+ uint16_t handler_type_idx = iterator.GetHandlerTypeIndex();
+ if (handler_type_idx == DexFile::kDexNoIndex16) {
+ has_catch_all_handler = true;
+ } else {
+ // It is also a catch-all if it is java.lang.Throwable.
+ mirror::Class* klass = linker->ResolveType(*dex_file_, handler_type_idx, *dex_cache_,
+ *class_loader_);
+ if (klass != nullptr) {
+ if (klass == mirror::Throwable::GetJavaLangThrowable()) {
+ has_catch_all_handler = true;
+ }
+ } else {
+ // Clear exception.
+ Thread* self = Thread::Current();
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ }
}
/*
* Merge registers into the "catch" block. We want to use the "savedRegs" rather than
@@ -2784,7 +2814,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
* If the monitor stack depth is nonzero, there must be a "catch all" handler for this
* instruction. This does apply to monitor-exit because of async exception handling.
*/
- if (work_line_->MonitorStackDepth() > 0 && !within_catch_all) {
+ if (work_line_->MonitorStackDepth() > 0 && !has_catch_all_handler) {
/*
* The state in work_line reflects the post-execution state. If the current instruction is a
* monitor-enter and the monitor stack was empty, we don't need a catch-all (if it throws,
@@ -2875,11 +2905,11 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
return true;
} // NOLINT(readability/fn_size)
-const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
+RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
- const RegType& result =
+ RegType& result =
klass != NULL ? reg_types_.FromClass(descriptor, klass,
klass->CannotBeAssignedFromOtherTypes())
: reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
@@ -2902,8 +2932,8 @@ const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
return result;
}
-const RegType& MethodVerifier::GetCaughtExceptionType() {
- const RegType* common_super = NULL;
+RegType& MethodVerifier::GetCaughtExceptionType() {
+ RegType* common_super = NULL;
if (code_item_->tries_size_ != 0) {
const byte* handlers_ptr = DexFile::GetCatchHandlerData(*code_item_, 0);
uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
@@ -2914,7 +2944,7 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
if (iterator.GetHandlerTypeIndex() == DexFile::kDexNoIndex16) {
common_super = &reg_types_.JavaLangThrowable(false);
} else {
- const RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
+ RegType& exception = ResolveClassAndCheckAccess(iterator.GetHandlerTypeIndex());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception)) {
if (exception.IsUnresolvedTypes()) {
// We don't know enough about the type. Fail here and let runtime handle it.
@@ -2949,7 +2979,7 @@ const RegType& MethodVerifier::GetCaughtExceptionType() {
mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_method_idx,
MethodType method_type) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx);
- const RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(method_id.class_idx_);
if (klass_type.IsConflict()) {
std::string append(" in attempt to access method ");
append += dex_file_->GetMethodName(method_id);
@@ -2960,7 +2990,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
return NULL; // Can't resolve Class so no more to do here
}
mirror::Class* klass = klass_type.GetClass();
- const RegType& referrer = GetDeclaringClass();
+ RegType& referrer = GetDeclaringClass();
mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
if (res_method == NULL) {
const char* name = dex_file_->GetMethodName(method_id);
@@ -3067,7 +3097,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const
* rigorous check here (which is okay since we have to do it at runtime).
*/
if (method_type != METHOD_STATIC) {
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
CHECK(have_pending_hard_failure_);
return nullptr;
@@ -3081,14 +3111,14 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const
} else {
// Check whether the name of the called method is "<init>"
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "init") != 0) {
+ if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized";
return nullptr;
}
}
}
if (method_type != METHOD_INTERFACE && !actual_arg_type.IsZero()) {
- const RegType* res_method_class;
+ RegType* res_method_class;
if (res_method != nullptr) {
mirror::Class* klass = res_method->GetDeclaringClass();
res_method_class = &reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
@@ -3129,12 +3159,12 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const
return nullptr;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
arg[sig_registers];
if (reg_type.IsIntegralTypes()) {
- const RegType& src_type = work_line_->GetRegisterType(get_reg);
+ RegType& src_type = work_line_->GetRegisterType(get_reg);
if (!src_type.IsIntegralTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << get_reg << " has type " << src_type
<< " but expected " << reg_type;
@@ -3217,7 +3247,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
// has a vtable entry for the target method.
if (is_super) {
DCHECK(method_type == METHOD_VIRTUAL);
- const RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
+ RegType& super = GetDeclaringClass().GetSuperClass(&reg_types_);
if (super.IsUnresolvedTypes()) {
Fail(VERIFY_ERROR_NO_METHOD) << "unknown super class in invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
@@ -3225,7 +3255,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
return nullptr;
}
mirror::Class* super_klass = super.GetClass();
- if (res_method->GetMethodIndex() >= super_klass->GetVTable()->GetLength()) {
+ if (res_method->GetMethodIndex() >= super_klass->GetVTableLength()) {
Fail(VERIFY_ERROR_NO_METHOD) << "invalid invoke-super from "
<< PrettyMethod(dex_method_idx_, *dex_file_)
<< " to super " << super
@@ -3245,25 +3275,26 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
RegisterLine* reg_line, bool is_range) {
DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- const RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = reg_line->GetInvocationThis(inst, is_range);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
}
- mirror::ObjectArray<mirror::ArtMethod>* vtable = nullptr;
mirror::Class* klass = actual_arg_type.GetClass();
+ mirror::Class* dispatch_class;
if (klass->IsInterface()) {
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
CHECK(object_klass->IsObjectClass());
- vtable = object_klass->GetVTable();
+ dispatch_class = object_klass;
} else {
- vtable = klass->GetVTable();
+ dispatch_class = klass;
}
- CHECK(vtable != nullptr) << PrettyDescriptor(klass);
+ CHECK(dispatch_class->HasVTable()) << PrettyDescriptor(dispatch_class);
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- CHECK_LT(static_cast<int32_t>(vtable_index), vtable->GetLength()) << PrettyDescriptor(klass);
- mirror::ArtMethod* res_method = vtable->Get(vtable_index);
+ CHECK_LT(static_cast<int32_t>(vtable_index), dispatch_class->GetVTableLength())
+ << PrettyDescriptor(klass);
+ mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
CHECK(!Thread::Current()->IsExceptionPending());
return res_method;
}
@@ -3282,7 +3313,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
// We use vAA as our expected arg count, rather than res_method->insSize, because we need to
// match the call to the signature. Also, we might be calling through an abstract method
// definition (which doesn't have register count values).
- const RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
+ RegType& actual_arg_type = work_line_->GetInvocationThis(inst, is_range);
if (actual_arg_type.IsConflict()) { // GetInvocationThis failed.
return NULL;
}
@@ -3306,7 +3337,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
}
if (!actual_arg_type.IsZero()) {
mirror::Class* klass = res_method->GetDeclaringClass();
- const RegType& res_method_class =
+ RegType& res_method_class =
reg_types_.FromClass(klass->GetDescriptor().c_str(), klass,
klass->CannotBeAssignedFromOtherTypes());
if (!res_method_class.IsAssignableFrom(actual_arg_type)) {
@@ -3342,7 +3373,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
<< " missing signature component";
return NULL;
}
- const RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+ RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
return res_method;
@@ -3370,7 +3401,7 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
type_idx = inst->VRegB_3rc();
}
- const RegType& res_type = ResolveClassAndCheckAccess(type_idx);
+ RegType& res_type = ResolveClassAndCheckAccess(type_idx);
if (res_type.IsConflict()) { // bad class
DCHECK_NE(failures_.size(), 0U);
} else {
@@ -3381,12 +3412,12 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
/* make sure "size" register is valid type */
work_line_->VerifyRegisterType(inst->VRegB_22c(), reg_types_.Integer());
/* set register type to array class */
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetRegisterType(inst->VRegA_22c(), precise_type);
} else {
// Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
// the list and fail. It's legal, if silly, for arg_count to be zero.
- const RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+ RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
uint32_t arg[5];
if (!is_range) {
@@ -3400,19 +3431,19 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
}
}
// filled-array result goes into "result" register
- const RegType& precise_type = reg_types_.FromUninitialized(res_type);
+ RegType& precise_type = reg_types_.FromUninitialized(res_type);
work_line_->SetResultRegisterType(precise_type);
}
}
}
void MethodVerifier::VerifyAGet(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array class; this code path will fail at runtime. Infer a merge-able type from the
// instruction type. TODO: have a proper notion of bottom here.
@@ -3428,7 +3459,7 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
} else {
/* verify the class */
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
if (!component_type.IsReferenceTypes() && !is_primitive) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
<< " source for aget-object";
@@ -3455,12 +3486,12 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
}
}
-void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+void MethodVerifier::VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) {
// Primitive assignability rules are weaker than regular assignability rules.
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (target_type.IsIntegralTypes()) {
instruction_compatible = target_type.Equals(insn_type);
value_compatible = value_type.IsIntegralTypes();
@@ -3469,10 +3500,12 @@ void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegTyp
value_compatible = value_type.IsFloatTypes();
} else if (target_type.IsLong()) {
instruction_compatible = insn_type.IsLong();
- value_compatible = value_type.IsLongTypes();
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ value_compatible = value_type.IsLongTypes() && value_type.CheckWidePair(value_type_hi);
} else if (target_type.IsDouble()) {
instruction_compatible = insn_type.IsLong(); // no put-double, so expect put-long
- value_compatible = value_type.IsDoubleTypes();
+ RegType& value_type_hi = work_line_->GetRegisterType(vregA + 1);
+ value_compatible = value_type.IsDoubleTypes() && value_type.CheckWidePair(value_type_hi);
} else {
instruction_compatible = false; // reference with primitive store
value_compatible = false; // unused
@@ -3493,19 +3526,19 @@ void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegTyp
}
void MethodVerifier::VerifyAPut(const Instruction* inst,
- const RegType& insn_type, bool is_primitive) {
- const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
+ RegType& insn_type, bool is_primitive) {
+ RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
if (!index_type.IsArrayIndexTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Invalid reg type for array index (" << index_type << ")";
} else {
- const RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
+ RegType& array_type = work_line_->GetRegisterType(inst->VRegB_23x());
if (array_type.IsZero()) {
// Null array type; this code path will fail at runtime. Infer a merge-able type from the
// instruction type.
} else if (!array_type.IsArrayTypes()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
} else {
- const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+ RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
const uint32_t vregA = inst->VRegA_23x();
if (is_primitive) {
VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3527,7 +3560,7 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) { // bad class
AppendToLastFailMessage(StringPrintf(" in attempt to access static field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3559,10 +3592,10 @@ mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
return field;
}
-mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_idx) {
+mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_idx) {
const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
// Check access to class
- const RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
+ RegType& klass_type = ResolveClassAndCheckAccess(field_id.class_idx_);
if (klass_type.IsConflict()) {
AppendToLastFailMessage(StringPrintf(" in attempt to access instance field %d (%s) in %s",
field_idx, dex_file_->GetFieldName(field_id),
@@ -3596,7 +3629,7 @@ mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int
return field;
} else {
mirror::Class* klass = field->GetDeclaringClass();
- const RegType& field_klass =
+ RegType& field_klass =
reg_types_.FromClass(dex_file_->GetFieldDeclaringClassDescriptor(field_id),
klass, klass->CannotBeAssignedFromOtherTypes());
if (obj_type.IsUninitializedTypes() &&
@@ -3621,17 +3654,17 @@ mirror::ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int
}
}
-void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
Thread* self = Thread::Current();
mirror::Class* field_type_class;
@@ -3687,17 +3720,17 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, const RegType& insn_ty
}
}
-void MethodVerifier::VerifyISPut(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static) {
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
mirror::ArtField* field;
if (is_static) {
field = GetStaticField(field_idx);
} else {
- const RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = work_line_->GetRegisterType(inst->VRegB_22c());
field = GetInstanceField(object_type, field_idx);
}
- const RegType* field_type = nullptr;
+ RegType* field_type = nullptr;
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3749,7 +3782,7 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
inst->Opcode() == Instruction::IPUT_QUICK ||
inst->Opcode() == Instruction::IPUT_WIDE_QUICK ||
inst->Opcode() == Instruction::IPUT_OBJECT_QUICK);
- const RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
+ RegType& object_type = reg_line->GetRegisterType(inst->VRegB_22c());
if (!object_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << object_type << "'";
return nullptr;
@@ -3764,7 +3797,7 @@ mirror::ArtField* MethodVerifier::GetQuickFieldAccess(const Instruction* inst,
return f;
}
-void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3779,7 +3812,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
FieldHelper fh(h_field);
field_type_class = fh.GetType(can_load_classes_);
}
- const RegType* field_type;
+ RegType* field_type;
if (field_type_class != nullptr) {
field_type = &reg_types_.FromClass(field->GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
@@ -3824,7 +3857,7 @@ void MethodVerifier::VerifyIGetQuick(const Instruction* inst, const RegType& ins
}
}
-void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+void MethodVerifier::VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive) {
DCHECK(Runtime::Current()->IsStarted());
mirror::ArtField* field = GetQuickFieldAccess(inst, work_line_.get());
@@ -3834,7 +3867,7 @@ void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& ins
}
const char* descriptor = field->GetTypeDescriptor();
mirror::ClassLoader* loader = field->GetDeclaringClass()->GetClassLoader();
- const RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
+ RegType& field_type = reg_types_.FromDescriptor(loader, descriptor, false);
if (field != NULL) {
if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) {
Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << PrettyField(field)
@@ -3847,7 +3880,7 @@ void MethodVerifier::VerifyIPutQuick(const Instruction* inst, const RegType& ins
// Primitive field assignability rules are weaker than regular assignability rules
bool instruction_compatible;
bool value_compatible;
- const RegType& value_type = work_line_->GetRegisterType(vregA);
+ RegType& value_type = work_line_->GetRegisterType(vregA);
if (field_type.IsIntegralTypes()) {
instruction_compatible = insn_type.IsIntegralTypes();
value_compatible = value_type.IsIntegralTypes();
@@ -3965,7 +3998,7 @@ InstructionFlags* MethodVerifier::CurrentInsnFlags() {
return &insn_flags_[work_insn_idx_];
}
-const RegType& MethodVerifier::GetMethodReturnType() {
+RegType& MethodVerifier::GetMethodReturnType() {
if (return_type_ == nullptr) {
if (mirror_method_ != NULL) {
Thread* self = Thread::Current();
@@ -3995,7 +4028,7 @@ const RegType& MethodVerifier::GetMethodReturnType() {
return *return_type_;
}
-const RegType& MethodVerifier::GetDeclaringClass() {
+RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
const char* descriptor
@@ -4016,7 +4049,7 @@ std::vector<int32_t> MethodVerifier::DescribeVRegs(uint32_t dex_pc) {
DCHECK(line != nullptr) << "No register line at DEX pc " << StringPrintf("0x%x", dex_pc);
std::vector<int32_t> result;
for (size_t i = 0; i < line->NumRegs(); ++i) {
- const RegType& type = line->GetRegisterType(i);
+ RegType& type = line->GetRegisterType(i);
if (type.IsConstant()) {
result.push_back(type.IsPreciseConstant() ? kConstant : kImpreciseConstant);
result.push_back(type.ConstantValue());
@@ -4056,7 +4089,7 @@ std::vector<int32_t> MethodVerifier::DescribeVRegs(uint32_t dex_pc) {
return result;
}
-const RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
+RegType& MethodVerifier::DetermineCat1Constant(int32_t value, bool precise) {
if (precise) {
// Precise constant type.
return reg_types_.FromCat1Const(value, true);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 757c41993c..e63a90c2ba 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,7 +230,7 @@ class MethodVerifier {
bool HasCheckCasts() const;
bool HasVirtualOrInterfaceInvokes() const;
bool HasFailures() const;
- const RegType& ResolveCheckedClass(uint32_t class_idx)
+ RegType& ResolveCheckedClass(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
@@ -471,34 +471,34 @@ class MethodVerifier {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
- void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+ void VerifyPrimitivePut(RegType& target_type, RegType& insn_type,
const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
- void VerifyAGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyAGet(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
- void VerifyAPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyAPut(const Instruction* inst, RegType& insn_type,
bool is_primitive) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
- mirror::ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
+ mirror::ArtField* GetInstanceField(RegType& obj_type, int field_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
mirror::ArtField* GetStaticField(int field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget or sget instruction.
- void VerifyISGet(const Instruction* inst, const RegType& insn_type,
+ void VerifyISGet(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput or sput instruction.
- void VerifyISPut(const Instruction* inst, const RegType& insn_type,
+ void VerifyISPut(const Instruction* inst, RegType& insn_type,
bool is_primitive, bool is_static)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -508,18 +508,18 @@ class MethodVerifier {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iget-quick instruction.
- void VerifyIGetQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIGetQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Perform verification of an iput-quick instruction.
- void VerifyIPutQuick(const Instruction* inst, const RegType& insn_type,
+ void VerifyIPutQuick(const Instruction* inst, RegType& insn_type,
bool is_primitive)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
- const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
+ RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -527,7 +527,7 @@ class MethodVerifier {
* address, determine the Join of all exceptions that can land here. Fails if no matching
* exception handler can be found or if the Join of exception types fails.
*/
- const RegType& GetCaughtExceptionType()
+ RegType& GetCaughtExceptionType()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -613,14 +613,14 @@ class MethodVerifier {
}
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetMethodReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
- const RegType& DetermineCat1Constant(int32_t value, bool precise)
+ RegType& DetermineCat1Constant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
RegTypeCache reg_types_;
@@ -641,7 +641,7 @@ class MethodVerifier {
// Its object representation if known.
mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
const uint32_t method_access_flags_; // Method's access flags.
- const RegType* return_type_; // Lazily computed return type of the method.
+ RegType* return_type_; // Lazily computed return type of the method.
const DexFile* const dex_file_; // The dex file containing the method.
// The dex_cache for the declaring class of the method.
Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
@@ -649,7 +649,7 @@ class MethodVerifier {
Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
- const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
+ RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
std::unique_ptr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index f0729e4271..6422cdf979 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -81,7 +81,7 @@ Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t
: PrimitiveType(klass, descriptor, cache_id) {
}
-std::string PreciseConstType::Dump() const {
+std::string PreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -98,47 +98,47 @@ std::string PreciseConstType::Dump() const {
return result.str();
}
-std::string BooleanType::Dump() const {
+std::string BooleanType::Dump() {
return "Boolean";
}
-std::string ConflictType::Dump() const {
+std::string ConflictType::Dump() {
return "Conflict";
}
-std::string ByteType::Dump() const {
+std::string ByteType::Dump() {
return "Byte";
}
-std::string ShortType::Dump() const {
+std::string ShortType::Dump() {
return "Short";
}
-std::string CharType::Dump() const {
+std::string CharType::Dump() {
return "Char";
}
-std::string FloatType::Dump() const {
+std::string FloatType::Dump() {
return "Float";
}
-std::string LongLoType::Dump() const {
+std::string LongLoType::Dump() {
return "Long (Low Half)";
}
-std::string LongHiType::Dump() const {
+std::string LongHiType::Dump() {
return "Long (High Half)";
}
-std::string DoubleLoType::Dump() const {
+std::string DoubleLoType::Dump() {
return "Double (Low Half)";
}
-std::string DoubleHiType::Dump() const {
+std::string DoubleHiType::Dump() {
return "Double (High Half)";
}
-std::string IntegerType::Dump() const {
+std::string IntegerType::Dump() {
return "Integer";
}
@@ -361,7 +361,7 @@ void BooleanType::Destroy() {
}
}
-std::string UndefinedType::Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -391,7 +391,7 @@ PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::stri
DCHECK(klass->IsInstantiable());
}
-std::string UnresolvedMergedType::Dump() const {
+std::string UnresolvedMergedType::Dump() {
std::stringstream result;
std::set<uint16_t> types = GetMergedTypes();
result << "UnresolvedMergedReferences(";
@@ -405,59 +405,59 @@ std::string UnresolvedMergedType::Dump() const {
return result.str();
}
-std::string UnresolvedSuperClass::Dump() const {
+std::string UnresolvedSuperClass::Dump() {
std::stringstream result;
uint16_t super_type_id = GetUnresolvedSuperClassChildId();
result << "UnresolvedSuperClass(" << reg_type_cache_->GetFromId(super_type_id).Dump() << ")";
return result.str();
}
-std::string UnresolvedReferenceType::Dump() const {
+std::string UnresolvedReferenceType::Dump() {
std::stringstream result;
result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string UnresolvedUninitializedRefType::Dump() const {
+std::string UnresolvedUninitializedRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": " << PrettyDescriptor(GetDescriptor());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UnresolvedUninitializedThisRefType::Dump() const {
+std::string UnresolvedUninitializedThisRefType::Dump() {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference" << PrettyDescriptor(GetDescriptor());
return result.str();
}
-std::string ReferenceType::Dump() const {
+std::string ReferenceType::Dump() {
std::stringstream result;
result << "Reference" << ": " << PrettyDescriptor(GetClass());
return result.str();
}
-std::string PreciseReferenceType::Dump() const {
+std::string PreciseReferenceType::Dump() {
std::stringstream result;
result << "Precise Reference" << ": "<< PrettyDescriptor(GetClass());
return result.str();
}
-std::string UninitializedReferenceType::Dump() const {
+std::string UninitializedReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized Reference" << ": " << PrettyDescriptor(GetClass());
result << " Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string UninitializedThisReferenceType::Dump() const {
+std::string UninitializedThisReferenceType::Dump() {
std::stringstream result;
result << "Uninitialized This Reference" << ": " << PrettyDescriptor(GetClass());
result << "Allocation PC: " << GetAllocationPc();
return result.str();
}
-std::string ImpreciseConstType::Dump() const {
+std::string ImpreciseConstType::Dump() {
std::stringstream result;
uint32_t val = ConstantValue();
if (val == 0) {
@@ -472,7 +472,7 @@ std::string ImpreciseConstType::Dump() const {
}
return result.str();
}
-std::string PreciseConstLoType::Dump() const {
+std::string PreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -486,7 +486,7 @@ std::string PreciseConstLoType::Dump() const {
return result.str();
}
-std::string ImpreciseConstLoType::Dump() const {
+std::string ImpreciseConstLoType::Dump() {
std::stringstream result;
int32_t val = ConstantValueLo();
@@ -500,7 +500,7 @@ std::string ImpreciseConstLoType::Dump() const {
return result.str();
}
-std::string PreciseConstHiType::Dump() const {
+std::string PreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Precise ";
@@ -513,7 +513,7 @@ std::string PreciseConstHiType::Dump() const {
return result.str();
}
-std::string ImpreciseConstHiType::Dump() const {
+std::string ImpreciseConstHiType::Dump() {
std::stringstream result;
int32_t val = ConstantValueHi();
result << "Imprecise ";
@@ -530,7 +530,7 @@ ConstantType::ConstantType(uint32_t constant, uint16_t cache_id)
: RegType(NULL, "", cache_id), constant_(constant) {
}
-const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+RegType& UndefinedType::Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (incoming_type.IsUndefined()) {
return *this; // Undefined MERGE Undefined => Undefined
@@ -538,7 +538,7 @@ const RegType& UndefinedType::Merge(const RegType& incoming_type, RegTypeCache*
return reg_types->Conflict();
}
-const RegType& RegType::HighHalf(RegTypeCache* cache) const {
+RegType& RegType::HighHalf(RegTypeCache* cache) const {
DCHECK(IsLowHalf());
if (IsLongLo()) {
return cache->LongHi();
@@ -586,12 +586,10 @@ bool UnresolvedType::IsNonZeroReferenceTypes() const {
}
std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
std::pair<uint16_t, uint16_t> refs = GetTopMergedTypes();
- const RegType& _left(reg_type_cache_->GetFromId(refs.first));
- RegType& __left(const_cast<RegType&>(_left));
- UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&__left);
+ RegType& _left(reg_type_cache_->GetFromId(refs.first));
+ UnresolvedMergedType* left = down_cast<UnresolvedMergedType*>(&_left);
- RegType& _right(
- const_cast<RegType&>(reg_type_cache_->GetFromId(refs.second)));
+ RegType& _right(reg_type_cache_->GetFromId(refs.second));
UnresolvedMergedType* right = down_cast<UnresolvedMergedType*>(&_right);
std::set<uint16_t> types;
@@ -614,7 +612,7 @@ std::set<uint16_t> UnresolvedMergedType::GetMergedTypes() const {
return types;
}
-const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
+RegType& RegType::GetSuperClass(RegTypeCache* cache) {
if (!IsUnresolvedTypes()) {
mirror::Class* super_klass = GetClass()->GetSuperClass();
if (super_klass != NULL) {
@@ -635,7 +633,7 @@ const RegType& RegType::GetSuperClass(RegTypeCache* cache) const {
}
}
-bool RegType::CanAccess(const RegType& other) const {
+bool RegType::CanAccess(RegType& other) {
if (Equals(other)) {
return true; // Trivial accessibility.
} else {
@@ -651,7 +649,7 @@ bool RegType::CanAccess(const RegType& other) const {
}
}
-bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const {
+bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) {
if ((access_flags & kAccPublic) != 0) {
return true;
}
@@ -662,7 +660,7 @@ bool RegType::CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
}
}
-bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
// Primitive arrays will always resolve
DCHECK(descriptor_[1] == 'L' || descriptor_[1] == '[');
@@ -675,11 +673,11 @@ bool RegType::IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lo
}
}
-bool RegType::IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (IsUnresolvedTypes() && !IsUnresolvedMergedReference() && !IsUnresolvedSuperClass()) {
return descriptor_[0] == '[';
} else if (HasClass()) {
@@ -689,7 +687,7 @@ bool RegType::IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
}
}
-bool RegType::IsJavaLangObjectArray() const {
+bool RegType::IsJavaLangObjectArray() {
if (HasClass()) {
mirror::Class* type = GetClass();
return type->IsArrayClass() && type->GetComponentType()->IsObjectClass();
@@ -697,7 +695,7 @@ bool RegType::IsJavaLangObjectArray() const {
return false;
}
-bool RegType::IsInstantiableTypes() const {
+bool RegType::IsInstantiableTypes() {
return IsUnresolvedTypes() || (IsNonZeroReferenceTypes() && GetClass()->IsInstantiable());
}
@@ -705,7 +703,7 @@ ImpreciseConstType::ImpreciseConstType(uint32_t constat, uint16_t cache_id)
: ConstantType(constat, cache_id) {
}
-static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
+static bool AssignableFrom(RegType& lhs, RegType& rhs, bool strict)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (lhs.Equals(rhs)) {
return true;
@@ -753,11 +751,11 @@ static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
}
}
-bool RegType::IsAssignableFrom(const RegType& src) const {
+bool RegType::IsAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, false);
}
-bool RegType::IsStrictlyAssignableFrom(const RegType& src) const {
+bool RegType::IsStrictlyAssignableFrom(RegType& src) {
return AssignableFrom(*this, src, true);
}
@@ -775,11 +773,11 @@ int32_t ConstantType::ConstantValueHi() const {
}
}
-static const RegType& SelectNonConstant(const RegType& a, const RegType& b) {
+static RegType& SelectNonConstant(RegType& a, RegType& b) {
return a.IsConstantTypes() ? b : a;
}
-const RegType& RegType::Merge(const RegType& incoming_type, RegTypeCache* reg_types) const {
+RegType& RegType::Merge(RegType& incoming_type, RegTypeCache* reg_types) {
DCHECK(!Equals(incoming_type)); // Trivial equality handled by caller
if (IsConflict()) {
return *this; // Conflict MERGE * => Conflict
@@ -958,16 +956,16 @@ mirror::Class* RegType::ClassJoin(mirror::Class* s, mirror::Class* t) {
void RegType::CheckInvariants() const {
if (IsConstant() || IsConstantLo() || IsConstantHi()) {
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
- if (klass_ != NULL) {
+ if (!klass_.IsNull()) {
CHECK(!descriptor_.empty()) << *this;
}
}
void RegType::VisitRoots(RootCallback* callback, void* arg) {
- if (klass_ != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&klass_), arg, 0, kRootUnknown);
+ if (!klass_.IsNull()) {
+ klass_.VisitRoot(callback, arg, 0, kRootUnknown);
}
}
@@ -978,36 +976,37 @@ void UninitializedThisReferenceType::CheckInvariants() const {
void UnresolvedUninitializedThisRefType::CheckInvariants() const {
CHECK_EQ(GetAllocationPc(), 0U) << *this;
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedUninitializedRefType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedMergedType::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(merged_types_.first, 0U) << *this;
CHECK_NE(merged_types_.second, 0U) << *this;
}
void UnresolvedReferenceType::CheckInvariants() const {
CHECK(!descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
}
void UnresolvedSuperClass::CheckInvariants() const {
// Unresolved merged types: merged types should be defined.
CHECK(descriptor_.empty()) << *this;
- CHECK(klass_ == NULL) << *this;
+ CHECK(klass_.IsNull()) << *this;
CHECK_NE(unresolved_child_id_, 0U) << *this;
}
std::ostream& operator<<(std::ostream& os, const RegType& rhs) {
- os << rhs.Dump();
+ RegType& rhs_non_const = const_cast<RegType&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index e985f3a2de..1682d4e5d0 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -25,6 +25,8 @@
#include "jni.h"
#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc_root.h"
#include "globals.h"
#include "object_callbacks.h"
#include "primitive.h"
@@ -107,7 +109,7 @@ class RegType {
return IsLowHalf();
}
// Check this is the low half, and that type_h is its matching high-half.
- inline bool CheckWidePair(const RegType& type_h) const {
+ inline bool CheckWidePair(RegType& type_h) const {
if (IsLowHalf()) {
return ((IsPreciseConstantLo() && type_h.IsPreciseConstantHi()) ||
(IsPreciseConstantLo() && type_h.IsImpreciseConstantHi()) ||
@@ -119,7 +121,7 @@ class RegType {
return false;
}
// The high half that corresponds to this low half
- const RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& HighHalf(RegTypeCache* cache) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsConstantBoolean() const {
return IsConstant() && (ConstantValue() >= 0) && (ConstantValue() <= 1);
@@ -198,55 +200,54 @@ class RegType {
virtual bool HasClass() const {
return false;
}
- bool IsJavaLangObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObject() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
- bool IsJavaLangObjectArray() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsJavaLangObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const std::string& GetDescriptor() const {
DCHECK(HasClass() || (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
- DCHECK(klass_ != NULL) << Dump();
+ DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
- return klass_;
+ return klass_.Read();
}
uint16_t GetId() const {
return cache_id_;
}
- const RegType& GetSuperClass(RegTypeCache* cache) const
+ RegType& GetSuperClass(RegTypeCache* cache)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Can this type access other?
- bool CanAccess(const RegType& other) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool CanAccess(RegType& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
- bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
+ bool CanAccessMember(mirror::Class* klass, uint32_t access_flags)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see comment on
// ClassJoin.
- bool IsAssignableFrom(const RegType& src) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't allow assignment to
// an interface from an Object.
- bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool IsStrictlyAssignableFrom(RegType& src) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Are these RegTypes the same?
- bool Equals(const RegType& other) const {
+ bool Equals(RegType& other) const {
return GetId() == other.GetId();
}
// Compute the merge of this register from one edge (path) with incoming_type from another.
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -275,7 +276,7 @@ class RegType {
protected:
RegType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor), klass_(GcRoot<mirror::Class>(klass)), cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -285,7 +286,7 @@ class RegType {
const std::string descriptor_;
- mirror::Class* klass_; // Non-const only due to moving classes.
+ GcRoot<mirror::Class> klass_;
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -301,7 +302,7 @@ class ConflictType : public RegType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static ConflictType* GetInstance();
@@ -331,7 +332,7 @@ class UndefinedType : public RegType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static UndefinedType* GetInstance();
@@ -350,7 +351,7 @@ class UndefinedType : public RegType {
: RegType(klass, descriptor, cache_id) {
}
- virtual const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
+ virtual RegType& Merge(RegType& incoming_type, RegTypeCache* reg_types)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static UndefinedType* instance_;
@@ -373,7 +374,7 @@ class IntegerType : public Cat1Type {
bool IsInteger() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static IntegerType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -392,7 +393,7 @@ class BooleanType : public Cat1Type {
bool IsBoolean() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static BooleanType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -412,7 +413,7 @@ class ByteType : public Cat1Type {
bool IsByte() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ByteType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -431,7 +432,7 @@ class ShortType : public Cat1Type {
bool IsShort() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static ShortType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -450,7 +451,7 @@ class CharType : public Cat1Type {
bool IsChar() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static CharType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -469,7 +470,7 @@ class FloatType : public Cat1Type {
bool IsFloat() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static FloatType* CreateInstance(mirror::Class* klass, const std::string& descriptor,
uint16_t cache_id)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -491,7 +492,7 @@ class Cat2Type : public PrimitiveType {
class LongLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongLo() const {
return true;
}
@@ -513,7 +514,7 @@ class LongLoType : public Cat2Type {
class LongHiType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsLongHi() const {
return true;
}
@@ -532,7 +533,7 @@ class LongHiType : public Cat2Type {
class DoubleLoType : public Cat2Type {
public:
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDoubleLo() const {
return true;
}
@@ -554,7 +555,7 @@ class DoubleLoType : public Cat2Type {
class DoubleHiType : public Cat2Type {
public:
- std::string Dump() const;
+ std::string Dump();
virtual bool IsDoubleHi() const {
return true;
}
@@ -621,7 +622,7 @@ class PreciseConstType : public ConstantType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstLoType : public ConstantType {
@@ -633,7 +634,7 @@ class PreciseConstLoType : public ConstantType {
bool IsPreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class PreciseConstHiType : public ConstantType {
@@ -645,7 +646,7 @@ class PreciseConstHiType : public ConstantType {
bool IsPreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstType : public ConstantType {
@@ -655,7 +656,7 @@ class ImpreciseConstType : public ConstantType {
bool IsImpreciseConstant() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstLoType : public ConstantType {
@@ -666,7 +667,7 @@ class ImpreciseConstLoType : public ConstantType {
bool IsImpreciseConstantLo() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
class ImpreciseConstHiType : public ConstantType {
@@ -677,7 +678,7 @@ class ImpreciseConstHiType : public ConstantType {
bool IsImpreciseConstantHi() const {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by "new" dex
@@ -718,7 +719,7 @@ class UninitializedReferenceType : public UninitializedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a constructor.
@@ -737,7 +738,7 @@ class UnresolvedUninitializedRefType : public UninitializedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -762,7 +763,7 @@ class UninitializedThisReferenceType : public UninitializedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -782,7 +783,7 @@ class UnresolvedUninitializedThisRefType : public UninitializedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -807,7 +808,7 @@ class ReferenceType : public RegType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only an object of that
@@ -829,7 +830,7 @@ class PreciseReferenceType : public RegType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
@@ -857,7 +858,7 @@ class UnresolvedReferenceType : public UnresolvedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
@@ -883,7 +884,7 @@ class UnresolvedSuperClass : public UnresolvedType {
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -918,7 +919,7 @@ class UnresolvedMergedType : public UnresolvedType {
return true;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index fc9e5c98f7..fdf96a86e1 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -24,14 +24,14 @@
namespace art {
namespace verifier {
-inline const art::verifier::RegType& RegTypeCache::GetFromId(uint16_t id) const {
+inline RegType& RegTypeCache::GetFromId(uint16_t id) const {
DCHECK_LT(id, entries_.size());
RegType* result = entries_[id];
DCHECK(result != NULL);
return *result;
}
-inline const ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
+inline ConstantType& RegTypeCache::FromCat1Const(int32_t value, bool precise) {
// We only expect 0 to be a precise constant.
DCHECK(value != 0 || precise);
if (precise && (value >= kMinSmallConstant) && (value <= kMaxSmallConstant)) {
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 91fba4d2bb..c0e4351c15 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -65,8 +65,8 @@ void RegTypeCache::FillPrimitiveAndSmallConstantTypes() {
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
switch (descriptor[0]) {
@@ -97,7 +97,7 @@ const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const c
}
};
-const RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
+RegType& RegTypeCache::RegTypeFromPrimitiveType(Primitive::Type prim_type) const {
CHECK(RegTypeCache::primitive_initialized_);
switch (prim_type) {
case Primitive::kPrimBoolean:
@@ -156,8 +156,8 @@ mirror::Class* RegTypeCache::ResolveClass(const char* descriptor, mirror::ClassL
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
- bool precise) {
+RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+ bool precise) {
// Try looking up the class in the cache first.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
if (MatchDescriptor(i, descriptor, precise)) {
@@ -185,7 +185,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descr
} else {
entry = new ReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
} else { // Class not resolved.
// We tried loading the class and failed, this might get an exception raised
@@ -198,7 +198,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descr
}
if (IsValidDescriptor(descriptor)) {
RegType* entry = new UnresolvedReferenceType(descriptor, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
@@ -208,7 +208,7 @@ const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descr
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
@@ -218,7 +218,7 @@ const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* kl
// Look for the reference in the list of entries to have.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == klass && MatchingPrecisionForClass(cur_entry, precise)) {
+ if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
return *cur_entry;
}
}
@@ -229,7 +229,7 @@ const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* kl
} else {
entry = new ReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
}
@@ -311,17 +311,15 @@ void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
}
}
-const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
+RegType& RegTypeCache::FromUnresolvedMerge(RegType& left, RegType& right) {
std::set<uint16_t> types;
if (left.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(left));
- types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ types = (down_cast<UnresolvedMergedType*>(&left))->GetMergedTypes();
} else {
types.insert(left.GetId());
}
if (right.IsUnresolvedMergedReference()) {
- RegType& non_const(const_cast<RegType&>(right));
- std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&non_const))->GetMergedTypes();
+ std::set<uint16_t> right_types = (down_cast<UnresolvedMergedType*>(&right))->GetMergedTypes();
types.insert(right_types.begin(), right_types.end());
} else {
types.insert(right.GetId());
@@ -339,7 +337,7 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT
}
// Create entry.
RegType* entry = new UnresolvedMergedType(left.GetId(), right.GetId(), this, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
if (kIsDebugBuild) {
UnresolvedMergedType* tmp_entry = down_cast<UnresolvedMergedType*>(entry);
std::set<uint16_t> check_types = tmp_entry->GetMergedTypes();
@@ -348,7 +346,7 @@ const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegT
return *entry;
}
-const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
+RegType& RegTypeCache::FromUnresolvedSuperClass(RegType& child) {
// Check if entry already exists.
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
@@ -363,11 +361,11 @@ const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
}
}
RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
+UninitializedType& RegTypeCache::Uninitialized(RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = NULL;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -393,11 +391,11 @@ const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32
}
entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
+RegType& RegTypeCache::FromUninitialized(RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
@@ -435,48 +433,48 @@ const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
return Conflict();
}
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ImpreciseConstType& RegTypeCache::ByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
+ImpreciseConstType& RegTypeCache::ByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::CharConstant() {
+ImpreciseConstType& RegTypeCache::CharConstant() {
int32_t jchar_max = static_cast<int32_t>(std::numeric_limits<jchar>::max());
- const ConstantType& result = FromCat1Const(jchar_max, false);
+ ConstantType& result = FromCat1Const(jchar_max, false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::ShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
+ImpreciseConstType& RegTypeCache::ShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::min(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::IntConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
+ImpreciseConstType& RegTypeCache::IntConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jint>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosByteConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
+ImpreciseConstType& RegTypeCache::PosByteConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jbyte>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const ImpreciseConstType& RegTypeCache::PosShortConstant() {
- const ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
+ImpreciseConstType& RegTypeCache::PosShortConstant() {
+ ConstantType& result = FromCat1Const(std::numeric_limits<jshort>::max(), false);
DCHECK(result.IsImpreciseConstant());
- return *down_cast<const ImpreciseConstType*>(&result);
+ return *down_cast<ImpreciseConstType*>(&result);
}
-const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
+UninitializedType& RegTypeCache::UninitializedThisArgument(RegType& type) {
UninitializedType* entry;
const std::string& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
@@ -498,14 +496,14 @@ const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType&
}
entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
- if (cur_entry->klass_ == NULL && cur_entry->IsConstant() &&
+ if (cur_entry->klass_.IsNull() && cur_entry->IsConstant() &&
cur_entry->IsPreciseConstant() == precise &&
(down_cast<ConstantType*>(cur_entry))->ConstantValue() == value) {
return *down_cast<ConstantType*>(cur_entry);
@@ -517,11 +515,11 @@ const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool p
} else {
entry = new ImpreciseConstType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantLo() && (cur_entry->IsPrecise() == precise) &&
@@ -535,11 +533,11 @@ const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
} else {
entry = new ImpreciseConstLoType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
+ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
RegType* cur_entry = entries_[i];
if (cur_entry->IsConstantHi() && (cur_entry->IsPrecise() == precise) &&
@@ -553,11 +551,11 @@ const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
} else {
entry = new ImpreciseConstHiType(value, entries_.size());
}
- entries_.push_back(entry);
+ AddEntry(entry);
return *entry;
}
-const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
+RegType& RegTypeCache::GetComponentType(RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
@@ -566,8 +564,15 @@ const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::Clas
return FromDescriptor(loader, component.c_str(), false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
- return FromClass(klass->GetDescriptor().c_str(), klass,
- klass->CannotBeAssignedFromOtherTypes());
+ if (klass->IsErroneous()) {
+ // Arrays may have erroneous component types, use unresolved in that case.
+ // We assume that the primitive classes are not erroneous, so we know it is a
+ // reference type.
+ return FromDescriptor(loader, klass->GetDescriptor().c_str(), false);
+ } else {
+ return FromClass(klass->GetDescriptor().c_str(), klass,
+ klass->CannotBeAssignedFromOtherTypes());
+ }
}
}
@@ -586,5 +591,9 @@ void RegTypeCache::VisitRoots(RootCallback* callback, void* arg) {
}
}
+void RegTypeCache::AddEntry(RegType* new_entry) {
+ entries_.push_back(new_entry);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 70d5f0731c..d46cf2cc6e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -49,99 +49,99 @@ class RegTypeCache {
}
}
static void ShutDown();
- const art::verifier::RegType& GetFromId(uint16_t id) const;
- const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& GetFromId(uint16_t id) const;
+ RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
+ RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1Const(int32_t value, bool precise)
+ ConstantType& FromCat1Const(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
+ ConstantType& FromCat2ConstLo(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
+ ConstantType& FromCat2ConstHi(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
+ RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
+ RegType& FromUnresolvedMerge(RegType& left, RegType& right)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUnresolvedSuperClass(const RegType& child)
+ RegType& FromUnresolvedSuperClass(RegType& child)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// String is final and therefore always precise.
return From(NULL, "Ljava/lang/String;", true);
}
- const RegType& JavaLangThrowable(bool precise)
+ RegType& JavaLangThrowable(bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Throwable;", precise);
}
- const ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& Zero() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ConstantType& One() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& Boolean() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const RegType& Byte() {
+ RegType& Byte() {
return *ByteType::GetInstance();
}
- const RegType& Char() {
+ RegType& Char() {
return *CharType::GetInstance();
}
- const RegType& Short() {
+ RegType& Short() {
return *ShortType::GetInstance();
}
- const RegType& Integer() {
+ RegType& Integer() {
return *IntegerType::GetInstance();
}
- const RegType& Float() {
+ RegType& Float() {
return *FloatType::GetInstance();
}
- const RegType& LongLo() {
+ RegType& LongLo() {
return *LongLoType::GetInstance();
}
- const RegType& LongHi() {
+ RegType& LongHi() {
return *LongHiType::GetInstance();
}
- const RegType& DoubleLo() {
+ RegType& DoubleLo() {
return *DoubleLoType::GetInstance();
}
- const RegType& DoubleHi() {
+ RegType& DoubleHi() {
return *DoubleHiType::GetInstance();
}
- const RegType& Undefined() {
+ RegType& Undefined() {
return *UndefinedType::GetInstance();
}
- const RegType& Conflict() {
+ RegType& Conflict() {
return *ConflictType::GetInstance();
}
- const RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangClass(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Class;", precise);
}
- const RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ RegType& JavaLangObject(bool precise) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return From(NULL, "Ljava/lang/Object;", precise);
}
- const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
+ UninitializedType& Uninitialized(RegType& type, uint32_t allocation_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
- const UninitializedType& UninitializedThisArgument(const RegType& type)
+ UninitializedType& UninitializedThisArgument(RegType& type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& FromUninitialized(const RegType& uninit_type)
+ RegType& FromUninitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
+ ImpreciseConstType& ByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& CharConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& ShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& IntConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosByteConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ImpreciseConstType& PosShortConstant() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ RegType& GetComponentType(RegType& array, mirror::ClassLoader* loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
+ RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -151,9 +151,11 @@ class RegTypeCache {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const char* descriptor, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
+ ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void AddEntry(RegType* new_entry);
+
template <class Type>
static Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 9dc0df13fb..e27558a613 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -33,21 +33,21 @@ TEST_F(RegTypeTest, ConstLoHi) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
- const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
- const RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
+ RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
+ RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
+ RegType& ref_type_const_3 = cache.FromCat1Const(30, false);
EXPECT_TRUE(ref_type_const_0.Equals(ref_type_const_1));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_2));
EXPECT_FALSE(ref_type_const_0.Equals(ref_type_const_3));
- const RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
- const RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_0 = cache.FromCat2ConstHi(50, true);
+ RegType& ref_type_const_wide_1 = cache.FromCat2ConstHi(50, true);
EXPECT_TRUE(ref_type_const_wide_0.Equals(ref_type_const_wide_1));
- const RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
- const RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
+ RegType& ref_type_const_wide_2 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_3 = cache.FromCat2ConstLo(50, true);
+ RegType& ref_type_const_wide_4 = cache.FromCat2ConstLo(55, true);
EXPECT_TRUE(ref_type_const_wide_2.Equals(ref_type_const_wide_3));
EXPECT_FALSE(ref_type_const_wide_2.Equals(ref_type_const_wide_4));
}
@@ -56,11 +56,11 @@ TEST_F(RegTypeTest, Pairs) {
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
int64_t val = static_cast<int32_t>(1234);
- const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
- const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
- const RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
- const RegType& long_lo = cache.LongLo();
- const RegType& long_hi = cache.LongHi();
+ RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
+ RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
+ RegType& precise_const = cache.FromCat1Const(static_cast<int32_t>(val >> 32), true);
+ RegType& long_lo = cache.LongLo();
+ RegType& long_hi = cache.LongHi();
// Check sanity of types.
EXPECT_TRUE(precise_lo.IsLowHalf());
EXPECT_FALSE(precise_hi.IsLowHalf());
@@ -80,7 +80,7 @@ TEST_F(RegTypeTest, Primitives) {
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& bool_reg_type = cache.Boolean();
+ RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
EXPECT_FALSE(bool_reg_type.IsConflict());
EXPECT_FALSE(bool_reg_type.IsZero());
@@ -112,7 +112,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(bool_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(bool_reg_type.IsNonZeroReferenceTypes());
- const RegType& byte_reg_type = cache.Byte();
+ RegType& byte_reg_type = cache.Byte();
EXPECT_FALSE(byte_reg_type.IsUndefined());
EXPECT_FALSE(byte_reg_type.IsConflict());
EXPECT_FALSE(byte_reg_type.IsZero());
@@ -144,7 +144,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(byte_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(byte_reg_type.IsNonZeroReferenceTypes());
- const RegType& char_reg_type = cache.Char();
+ RegType& char_reg_type = cache.Char();
EXPECT_FALSE(char_reg_type.IsUndefined());
EXPECT_FALSE(char_reg_type.IsConflict());
EXPECT_FALSE(char_reg_type.IsZero());
@@ -176,7 +176,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(char_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(char_reg_type.IsNonZeroReferenceTypes());
- const RegType& short_reg_type = cache.Short();
+ RegType& short_reg_type = cache.Short();
EXPECT_FALSE(short_reg_type.IsUndefined());
EXPECT_FALSE(short_reg_type.IsConflict());
EXPECT_FALSE(short_reg_type.IsZero());
@@ -208,7 +208,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(short_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(short_reg_type.IsNonZeroReferenceTypes());
- const RegType& int_reg_type = cache.Integer();
+ RegType& int_reg_type = cache.Integer();
EXPECT_FALSE(int_reg_type.IsUndefined());
EXPECT_FALSE(int_reg_type.IsConflict());
EXPECT_FALSE(int_reg_type.IsZero());
@@ -240,7 +240,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_TRUE(int_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(int_reg_type.IsNonZeroReferenceTypes());
- const RegType& long_reg_type = cache.LongLo();
+ RegType& long_reg_type = cache.LongLo();
EXPECT_FALSE(long_reg_type.IsUndefined());
EXPECT_FALSE(long_reg_type.IsConflict());
EXPECT_FALSE(long_reg_type.IsZero());
@@ -272,7 +272,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(long_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(long_reg_type.IsNonZeroReferenceTypes());
- const RegType& float_reg_type = cache.Float();
+ RegType& float_reg_type = cache.Float();
EXPECT_FALSE(float_reg_type.IsUndefined());
EXPECT_FALSE(float_reg_type.IsConflict());
EXPECT_FALSE(float_reg_type.IsZero());
@@ -304,7 +304,7 @@ TEST_F(RegTypeTest, Primitives) {
EXPECT_FALSE(float_reg_type.IsArrayIndexTypes());
EXPECT_FALSE(float_reg_type.IsNonZeroReferenceTypes());
- const RegType& double_reg_type = cache.DoubleLo();
+ RegType& double_reg_type = cache.DoubleLo();
EXPECT_FALSE(double_reg_type.IsUndefined());
EXPECT_FALSE(double_reg_type.IsConflict());
EXPECT_FALSE(double_reg_type.IsZero());
@@ -344,9 +344,9 @@ TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& imprecise_obj = cache.JavaLangObject(false);
- const RegType& precise_obj = cache.JavaLangObject(true);
- const RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& imprecise_obj = cache.JavaLangObject(false);
+ RegType& precise_obj = cache.JavaLangObject(true);
+ RegType& precise_obj_2 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(precise_obj.Equals(precise_obj_2));
EXPECT_FALSE(imprecise_obj.Equals(precise_obj));
@@ -359,14 +359,14 @@ TEST_F(RegTypeReferenceTest, UnresolvedType) {
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
- const RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_1 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type_1));
- const RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
+ RegType& unresolved_super_class = cache.FromUnresolvedSuperClass(ref_type_0);
EXPECT_TRUE(unresolved_super_class.IsUnresolvedSuperClass());
EXPECT_TRUE(unresolved_super_class.IsNonZeroReferenceTypes());
}
@@ -375,21 +375,21 @@ TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.Equals(ref_type));
// Create an uninitialized type of this unresolved type
- const RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_TRUE(unresolved_unintialised.IsUninitializedTypes());
EXPECT_TRUE(unresolved_unintialised.IsNonZeroReferenceTypes());
// Create an uninitialized type of this unresolved type with different PC
- const RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
+ RegType& ref_type_unresolved_unintialised_1 = cache.Uninitialized(ref_type, 1102ull);
EXPECT_TRUE(unresolved_unintialised.IsUnresolvedAndUninitializedReference());
EXPECT_FALSE(unresolved_unintialised.Equals(ref_type_unresolved_unintialised_1));
// Create an uninitialized type of this unresolved type with the same PC
- const RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
+ RegType& unresolved_unintialised_2 = cache.Uninitialized(ref_type, 1101ull);
EXPECT_TRUE(unresolved_unintialised.Equals(unresolved_unintialised_2));
}
@@ -397,12 +397,12 @@ TEST_F(RegTypeReferenceTest, Dump) {
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
- const RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
- const RegType& resolved_ref = cache.JavaLangString();
- const RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
- const RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
- const RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
+ RegType& unresolved_ref = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& unresolved_ref_another = cache.FromDescriptor(NULL, "Ljava/lang/DoesNotExistEither;", true);
+ RegType& resolved_ref = cache.JavaLangString();
+ RegType& resolved_unintialiesd = cache.Uninitialized(resolved_ref, 10);
+ RegType& unresolved_unintialized = cache.Uninitialized(unresolved_ref, 12);
+ RegType& unresolved_merged = cache.FromUnresolvedMerge(unresolved_ref, unresolved_ref_another);
std::string expected = "Unresolved Reference: java.lang.DoesNotExist";
EXPECT_EQ(expected, unresolved_ref.Dump());
@@ -422,16 +422,16 @@ TEST_F(RegTypeReferenceTest, JavalangString) {
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangString();
- const RegType& ref_type_2 = cache.JavaLangString();
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
+ RegType& ref_type = cache.JavaLangString();
+ RegType& ref_type_2 = cache.JavaLangString();
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/String;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_2.Equals(ref_type_3));
EXPECT_TRUE(ref_type.IsPreciseReference());
// Create an uninitialized type out of this:
- const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
+ RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
}
@@ -442,9 +442,9 @@ TEST_F(RegTypeReferenceTest, JavalangObject) {
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache(true);
- const RegType& ref_type = cache.JavaLangObject(true);
- const RegType& ref_type_2 = cache.JavaLangObject(true);
- const RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
+ RegType& ref_type = cache.JavaLangObject(true);
+ RegType& ref_type_2 = cache.JavaLangObject(true);
+ RegType& ref_type_3 = cache.FromDescriptor(NULL, "Ljava/lang/Object;", true);
EXPECT_TRUE(ref_type.Equals(ref_type_2));
EXPECT_TRUE(ref_type_3.Equals(ref_type_2));
@@ -455,20 +455,19 @@ TEST_F(RegTypeReferenceTest, Merging) {
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& string = cache_new.JavaLangString();
- const RegType& Object = cache_new.JavaLangObject(true);
+ RegType& string = cache_new.JavaLangString();
+ RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
// Merge two unresolved types.
- const RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
+ RegType& ref_type_0 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
- const RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
+ RegType& ref_type_1 = cache_new.FromDescriptor(NULL, "Ljava/lang/DoesNotExistToo;", true);
EXPECT_FALSE(ref_type_0.Equals(ref_type_1));
- const RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
+ RegType& merged = ref_type_1.Merge(ref_type_0, &cache_new);
EXPECT_TRUE(merged.IsUnresolvedMergedReference());
- RegType& merged_nonconst = const_cast<RegType&>(merged);
- std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged_nonconst))->GetMergedTypes();
+ std::set<uint16_t> merged_ids = (down_cast<UnresolvedMergedType*>(&merged))->GetMergedTypes();
EXPECT_EQ(ref_type_0.GetId(), *(merged_ids.begin()));
EXPECT_EQ(ref_type_1.GetId(), *((++merged_ids.begin())));
}
@@ -479,27 +478,27 @@ TEST_F(RegTypeTest, MergingFloat) {
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& float_type = cache_new.Float();
- const RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
- const RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
+ RegType& float_type = cache_new.Float();
+ RegType& precise_cst = cache_new.FromCat1Const(kTestConstantValue, true);
+ RegType& imprecise_cst = cache_new.FromCat1Const(kTestConstantValue, false);
{
// float MERGE precise cst => float.
- const RegType& merged = float_type.Merge(precise_cst, &cache_new);
+ RegType& merged = float_type.Merge(precise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// precise cst MERGE float => float.
- const RegType& merged = precise_cst.Merge(float_type, &cache_new);
+ RegType& merged = precise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// float MERGE imprecise cst => float.
- const RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
+ RegType& merged = float_type.Merge(imprecise_cst, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
{
// imprecise cst MERGE float => float.
- const RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
+ RegType& merged = imprecise_cst.Merge(float_type, &cache_new);
EXPECT_TRUE(merged.IsFloat());
}
}
@@ -510,50 +509,50 @@ TEST_F(RegTypeTest, MergingLong) {
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& long_lo_type = cache_new.LongLo();
- const RegType& long_hi_type = cache_new.LongHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& long_lo_type = cache_new.LongLo();
+ RegType& long_hi_type = cache_new.LongHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = long_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(long_lo_type, &cache_new);
EXPECT_TRUE(merged.IsLongLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = long_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(long_hi_type, &cache_new);
EXPECT_TRUE(merged.IsLongHi());
}
}
@@ -564,50 +563,50 @@ TEST_F(RegTypeTest, MergingDouble) {
RegTypeCache cache_new(true);
constexpr int32_t kTestConstantValue = 10;
- const RegType& double_lo_type = cache_new.DoubleLo();
- const RegType& double_hi_type = cache_new.DoubleHi();
- const RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
- const RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
- const RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
- const RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
+ RegType& double_lo_type = cache_new.DoubleLo();
+ RegType& double_hi_type = cache_new.DoubleHi();
+ RegType& precise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, true);
+ RegType& imprecise_cst_lo = cache_new.FromCat2ConstLo(kTestConstantValue, false);
+ RegType& precise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, true);
+ RegType& imprecise_cst_hi = cache_new.FromCat2ConstHi(kTestConstantValue, false);
{
// lo MERGE precise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(precise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// precise cst lo MERGE lo => lo.
- const RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = precise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// lo MERGE imprecise cst lo => lo.
- const RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
+ RegType& merged = double_lo_type.Merge(imprecise_cst_lo, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// imprecise cst lo MERGE lo => lo.
- const RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
+ RegType& merged = imprecise_cst_lo.Merge(double_lo_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleLo());
}
{
// hi MERGE precise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(precise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// precise cst hi MERGE hi => hi.
- const RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = precise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// hi MERGE imprecise cst hi => hi.
- const RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
+ RegType& merged = double_hi_type.Merge(imprecise_cst_hi, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
{
// imprecise cst hi MERGE hi => hi.
- const RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
+ RegType& merged = imprecise_cst_hi.Merge(double_hi_type, &cache_new);
EXPECT_TRUE(merged.IsDoubleHi());
}
}
@@ -616,8 +615,8 @@ TEST_F(RegTypeTest, ConstPrecision) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
RegTypeCache cache_new(true);
- const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
- const RegType& precise_const = cache_new.FromCat1Const(10, true);
+ RegType& imprecise_const = cache_new.FromCat1Const(10, false);
+ RegType& precise_const = cache_new.FromCat1Const(10, true);
EXPECT_TRUE(imprecise_const.IsImpreciseConstant());
EXPECT_TRUE(precise_const.IsPreciseConstant());
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 0989cd0c41..378c6d3d61 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -25,7 +25,7 @@
namespace art {
namespace verifier {
-inline const RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
+inline RegType& RegisterLine::GetRegisterType(uint32_t vsrc) const {
// The register index was validated during the static pass, so we don't need to check it here.
DCHECK_LT(vsrc, num_regs_);
return verifier_->GetRegTypeCache()->GetFromId(line_[vsrc]);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 556056ce04..4d67cfb969 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -36,7 +36,7 @@ bool RegisterLine::CheckConstructorReturn() const {
return true;
}
-bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
+bool RegisterLine::SetRegisterType(uint32_t vdst, RegType& new_type) {
DCHECK_LT(vdst, num_regs_);
if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
@@ -53,8 +53,8 @@ bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
return true;
}
-bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1,
- const RegType& new_type2) {
+bool RegisterLine::SetRegisterTypeWide(uint32_t vdst, RegType& new_type1,
+ RegType& new_type2) {
DCHECK_LT(vdst + 1, num_regs_);
if (!new_type1.CheckWidePair(new_type2)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Invalid wide pair '"
@@ -75,21 +75,21 @@ void RegisterLine::SetResultTypeToUnknown() {
result_[1] = result_[0];
}
-void RegisterLine::SetResultRegisterType(const RegType& new_type) {
+void RegisterLine::SetResultRegisterType(RegType& new_type) {
DCHECK(!new_type.IsLowHalf());
DCHECK(!new_type.IsHighHalf());
result_[0] = new_type.GetId();
result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
}
-void RegisterLine::SetResultRegisterTypeWide(const RegType& new_type1,
- const RegType& new_type2) {
+void RegisterLine::SetResultRegisterTypeWide(RegType& new_type1,
+ RegType& new_type2) {
DCHECK(new_type1.CheckWidePair(new_type2));
result_[0] = new_type1.GetId();
result_[1] = new_type2.GetId();
}
-const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
+RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_range) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
@@ -97,7 +97,7 @@ const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- const RegType& this_type = GetRegisterType(this_reg);
+ RegType& this_type = GetRegisterType(this_reg);
if (!this_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
<< this_reg << " (type=" << this_type << ")";
@@ -107,9 +107,9 @@ const RegType& RegisterLine::GetInvocationThis(const Instruction* inst, bool is_
}
bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
- const RegType& check_type) {
+ RegType& check_type) {
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!(check_type.IsAssignableFrom(src_type))) {
enum VerifyError fail_type;
if (!check_type.IsNonZeroReferenceTypes() || !src_type.IsNonZeroReferenceTypes()) {
@@ -125,7 +125,7 @@ bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
return false;
}
if (check_type.IsLowHalf()) {
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -139,17 +139,17 @@ bool RegisterLine::VerifyRegisterType(uint32_t vsrc,
return true;
}
-bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1,
- const RegType& check_type2) {
+bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1,
+ RegType& check_type2) {
DCHECK(check_type1.CheckWidePair(check_type2));
// Verify the src register type against the check type refining the type of the register
- const RegType& src_type = GetRegisterType(vsrc);
+ RegType& src_type = GetRegisterType(vsrc);
if (!check_type1.IsAssignableFrom(src_type)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register v" << vsrc << " has type " << src_type
<< " but expected " << check_type1;
return false;
}
- const RegType& src_type_h = GetRegisterType(vsrc + 1);
+ RegType& src_type_h = GetRegisterType(vsrc + 1);
if (!src_type.CheckWidePair(src_type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register v" << vsrc << " has type "
<< src_type << "/" << src_type_h;
@@ -162,9 +162,9 @@ bool RegisterLine::VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_ty
return true;
}
-void RegisterLine::MarkRefsAsInitialized(const RegType& uninit_type) {
+void RegisterLine::MarkRefsAsInitialized(RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
+ RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
@@ -200,7 +200,7 @@ void RegisterLine::MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc) {
}
}
-std::string RegisterLine::Dump() const {
+std::string RegisterLine::Dump() {
std::string result;
for (size_t i = 0; i < num_regs_; i++) {
result += StringPrintf("%zd:[", i);
@@ -213,7 +213,7 @@ std::string RegisterLine::Dump() const {
return result;
}
-void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
+void RegisterLine::MarkUninitRefsAsInvalid(RegType& uninit_type) {
for (size_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
line_[i] = verifier_->GetRegTypeCache()->Conflict().GetId();
@@ -224,7 +224,7 @@ void RegisterLine::MarkUninitRefsAsInvalid(const RegType& uninit_type) {
void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat) {
DCHECK(cat == kTypeCategory1nr || cat == kTypeCategoryRef);
- const RegType& type = GetRegisterType(vsrc);
+ RegType& type = GetRegisterType(vsrc);
if (!SetRegisterType(vdst, type)) {
return;
}
@@ -238,8 +238,8 @@ void RegisterLine::CopyRegister1(uint32_t vdst, uint32_t vsrc, TypeCategory cat)
}
void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
- const RegType& type_l = GetRegisterType(vsrc);
- const RegType& type_h = GetRegisterType(vsrc + 1);
+ RegType& type_l = GetRegisterType(vsrc);
+ RegType& type_h = GetRegisterType(vsrc + 1);
if (!type_l.CheckWidePair(type_h)) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "copy2 v" << vdst << "<-v" << vsrc
@@ -250,7 +250,7 @@ void RegisterLine::CopyRegister2(uint32_t vdst, uint32_t vsrc) {
}
void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
- const RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
if ((!is_reference && !type.IsCategory1Types()) ||
(is_reference && !type.IsReferenceTypes())) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -267,8 +267,8 @@ void RegisterLine::CopyResultRegister1(uint32_t vdst, bool is_reference) {
* register to another register, and reset the result register.
*/
void RegisterLine::CopyResultRegister2(uint32_t vdst) {
- const RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
- const RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
+ RegType& type_l = verifier_->GetRegTypeCache()->GetFromId(result_[0]);
+ RegType& type_h = verifier_->GetRegTypeCache()->GetFromId(result_[1]);
if (!type_l.IsCategory2Types()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "copyRes2 v" << vdst << "<- result0" << " type=" << type_l;
@@ -281,40 +281,40 @@ void RegisterLine::CopyResultRegister2(uint32_t vdst) {
}
void RegisterLine::CheckUnaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type) {
+ RegType& dst_type,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type) {
if (VerifyRegisterType(inst->VRegB_12x(), src_type)) {
SetRegisterTypeWide(inst->VRegA_12x(), dst_type1, dst_type2);
}
}
void RegisterLine::CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2) {
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2) {
if (VerifyRegisterTypeWide(inst->VRegB_12x(), src_type1, src_type2)) {
SetRegisterType(inst->VRegA_12x(), dst_type);
}
}
void RegisterLine::CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op) {
const uint32_t vregB = inst->VRegB_23x();
const uint32_t vregC = inst->VRegC_23x();
@@ -333,9 +333,9 @@ void RegisterLine::CheckBinaryOp(const Instruction* inst,
}
void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), src_type1_1, src_type1_2) &&
VerifyRegisterTypeWide(inst->VRegC_23x(), src_type2_1, src_type2_2)) {
SetRegisterTypeWide(inst->VRegA_23x(), dst_type1, dst_type2);
@@ -343,8 +343,8 @@ void RegisterLine::CheckBinaryOpWide(const Instruction* inst,
}
void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
if (VerifyRegisterTypeWide(inst->VRegB_23x(), long_lo_type, long_hi_type) &&
VerifyRegisterType(inst->VRegC_23x(), int_type)) {
SetRegisterTypeWide(inst->VRegA_23x(), long_lo_type, long_hi_type);
@@ -352,8 +352,8 @@ void RegisterLine::CheckBinaryOpWideShift(const Instruction* inst,
}
void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1,
- const RegType& src_type2, bool check_boolean_op) {
+ RegType& dst_type, RegType& src_type1,
+ RegType& src_type2, bool check_boolean_op) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterType(vregA, src_type1) &&
@@ -371,9 +371,9 @@ void RegisterLine::CheckBinaryOp2addr(const Instruction* inst,
}
void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2) {
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, src_type1_1, src_type1_2) &&
@@ -383,8 +383,8 @@ void RegisterLine::CheckBinaryOp2addrWide(const Instruction* inst,
}
void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type) {
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type) {
const uint32_t vregA = inst->VRegA_12x();
const uint32_t vregB = inst->VRegB_12x();
if (VerifyRegisterTypeWide(vregA, long_lo_type, long_hi_type) &&
@@ -394,7 +394,7 @@ void RegisterLine::CheckBinaryOp2addrWideShift(const Instruction* inst,
}
void RegisterLine::CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16) {
const uint32_t vregA = is_lit16 ? inst->VRegA_22s() : inst->VRegA_22b();
const uint32_t vregB = is_lit16 ? inst->VRegB_22s() : inst->VRegB_22b();
@@ -413,7 +413,7 @@ void RegisterLine::CheckLiteralOp(const Instruction* inst,
}
void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-enter on non-object (" << reg_type << ")";
} else if (monitors_.size() >= 32) {
@@ -425,7 +425,7 @@ void RegisterLine::PushMonitor(uint32_t reg_idx, int32_t insn_idx) {
}
void RegisterLine::PopMonitor(uint32_t reg_idx) {
- const RegType& reg_type = GetRegisterType(reg_idx);
+ RegType& reg_type = GetRegisterType(reg_idx);
if (!reg_type.IsReferenceTypes()) {
verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "monitor-exit on non-object (" << reg_type << ")";
} else if (monitors_.empty()) {
@@ -460,9 +460,9 @@ bool RegisterLine::MergeRegisters(const RegisterLine* incoming_line) {
DCHECK(incoming_line != nullptr);
for (size_t idx = 0; idx < num_regs_; idx++) {
if (line_[idx] != incoming_line->line_[idx]) {
- const RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
- const RegType& cur_type = GetRegisterType(idx);
- const RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
+ RegType& incoming_reg_type = incoming_line->GetRegisterType(idx);
+ RegType& cur_type = GetRegisterType(idx);
+ RegType& new_type = cur_type.Merge(incoming_reg_type, verifier_->GetRegTypeCache());
changed = changed || !cur_type.Equals(new_type);
line_[idx] = new_type.GetId();
}
@@ -508,7 +508,8 @@ void RegisterLine::WriteReferenceBitMap(std::vector<uint8_t>& data, size_t max_b
std::ostream& operator<<(std::ostream& os, const RegisterLine& rhs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- os << rhs.Dump();
+ RegisterLine& rhs_non_const = const_cast<RegisterLine&>(rhs);
+ os << rhs_non_const.Dump();
return os;
}
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 57c7517713..b0018d2048 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -81,26 +81,26 @@ class RegisterLine {
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
// The register index was validated during the static pass, so we don't need to check it here.
- bool SetRegisterType(uint32_t vdst, const RegType& new_type)
+ bool SetRegisterType(uint32_t vdst, RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool SetRegisterTypeWide(uint32_t vdst, const RegType& new_type1, const RegType& new_type2)
+ bool SetRegisterTypeWide(uint32_t vdst, RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
- void SetResultRegisterType(const RegType& new_type)
+ void SetResultRegisterType(RegType& new_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
+ void SetResultRegisterTypeWide(RegType& new_type1, RegType& new_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Get the type of register vsrc.
- const RegType& GetRegisterType(uint32_t vsrc) const;
+ RegType& GetRegisterType(uint32_t vsrc) const;
- bool VerifyRegisterType(uint32_t vsrc, const RegType& check_type)
+ bool VerifyRegisterType(uint32_t vsrc, RegType& check_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool VerifyRegisterTypeWide(uint32_t vsrc, const RegType& check_type1, const RegType& check_type2)
+ bool VerifyRegisterTypeWide(uint32_t vsrc, RegType& check_type1, RegType& check_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
@@ -110,7 +110,7 @@ class RegisterLine {
reg_to_lock_depths_ = src->reg_to_lock_depths_;
}
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -126,7 +126,7 @@ class RegisterLine {
* to prevent them from being used (otherwise, MarkRefsAsInitialized would mark the old ones and
* the new ones at the same time).
*/
- void MarkUninitRefsAsInvalid(const RegType& uninit_type)
+ void MarkUninitRefsAsInvalid(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -134,7 +134,7 @@ class RegisterLine {
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(const RegType& uninit_type)
+ void MarkRefsAsInitialized(RegType& uninit_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -173,30 +173,30 @@ class RegisterLine {
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
*/
- const RegType& GetInvocationThis(const Instruction* inst, bool is_range)
+ RegType& GetInvocationThis(const Instruction* inst, bool is_range)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(const Instruction* inst, const RegType& dst_type,
- const RegType& src_type)
+ void CheckUnaryOp(const Instruction* inst, RegType& dst_type,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpToWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2)
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -205,19 +205,19 @@ class RegisterLine {
* against vB/vC.
*/
void CheckBinaryOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type, RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -225,20 +225,20 @@ class RegisterLine {
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
void CheckBinaryOp2addr(const Instruction* inst,
- const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ RegType& dst_type,
+ RegType& src_type1, RegType& src_type2,
bool check_boolean_op)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ RegType& dst_type1, RegType& dst_type2,
+ RegType& src_type1_1, RegType& src_type1_2,
+ RegType& src_type2_1, RegType& src_type2_2)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
- const RegType& int_type)
+ RegType& long_lo_type, RegType& long_hi_type,
+ RegType& int_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
@@ -248,7 +248,7 @@ class RegisterLine {
* If "check_boolean_op" is set, we use the constant value in vC.
*/
void CheckLiteralOp(const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
+ RegType& dst_type, RegType& src_type,
bool check_boolean_op, bool is_lit16)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index fdc6e3f487..3a6a72b841 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -47,6 +47,8 @@ jclass WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler;
jclass WellKnownClasses::java_lang_ThreadGroup;
jclass WellKnownClasses::java_lang_Throwable;
jclass WellKnownClasses::java_nio_DirectByteBuffer;
+jclass WellKnownClasses::java_util_Collections;
+jclass WellKnownClasses::libcore_util_EmptyArray;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer;
@@ -150,6 +152,8 @@ void WellKnownClasses::Init(JNIEnv* env) {
java_lang_ThreadGroup = CacheClass(env, "java/lang/ThreadGroup");
java_lang_Throwable = CacheClass(env, "java/lang/Throwable");
java_nio_DirectByteBuffer = CacheClass(env, "java/nio/DirectByteBuffer");
+ java_util_Collections = CacheClass(env, "java/util/Collections");
+ libcore_util_EmptyArray = CacheClass(env, "libcore/util/EmptyArray");
org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index f6c2930909..7639f50c69 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -60,7 +60,9 @@ struct WellKnownClasses {
static jclass java_lang_ThreadGroup;
static jclass java_lang_Thread$UncaughtExceptionHandler;
static jclass java_lang_Throwable;
+ static jclass java_util_Collections;
static jclass java_nio_DirectByteBuffer;
+ static jclass libcore_util_EmptyArray;
static jclass org_apache_harmony_dalvik_ddmc_Chunk;
static jclass org_apache_harmony_dalvik_ddmc_DdmServer;