summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.bp2
-rw-r--r--runtime/arch/arm/fault_handler_arm.cc18
-rw-r--r--runtime/arch/arm/instruction_set_features_arm_test.cc24
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S6
-rw-r--r--runtime/arch/arm64/fault_handler_arm64.cc15
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S14
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc164
-rw-r--r--runtime/arch/mips/fault_handler_mips.cc4
-rw-r--r--runtime/arch/mips/quick_entrypoints_mips.S157
-rw-r--r--runtime/arch/mips64/asm_support_mips64.S6
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc76
-rw-r--r--runtime/arch/mips64/fault_handler_mips64.cc4
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.cc70
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64.h14
-rw-r--r--runtime/arch/mips64/instruction_set_features_mips64_test.cc22
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S182
-rw-r--r--runtime/arch/mips64/registers_mips64.cc9
-rw-r--r--runtime/arch/mips64/registers_mips64.h39
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc21
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S19
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S19
-rw-r--r--runtime/art_field.h4
-rw-r--r--runtime/art_method-inl.h13
-rw-r--r--runtime/art_method.cc13
-rw-r--r--runtime/art_method.h8
-rw-r--r--runtime/backtrace_helper.h86
-rw-r--r--runtime/base/arena_allocator.cc126
-rw-r--r--runtime/base/arena_allocator.h56
-rw-r--r--runtime/base/bit_utils.h113
-rw-r--r--runtime/base/histogram-inl.h3
-rw-r--r--runtime/base/mutex.cc72
-rw-r--r--runtime/base/mutex.h21
-rw-r--r--runtime/base/scoped_arena_allocator.h11
-rw-r--r--runtime/base/scoped_flock.cc5
-rw-r--r--runtime/base/unix_file/fd_file.cc2
-rw-r--r--runtime/bytecode_utils.h180
-rw-r--r--runtime/cha.cc114
-rw-r--r--runtime/cha.h23
-rw-r--r--runtime/class_linker-inl.h15
-rw-r--r--runtime/class_linker.cc321
-rw-r--r--runtime/class_linker.h18
-rw-r--r--runtime/class_linker_test.cc3
-rw-r--r--runtime/class_table.cc22
-rw-r--r--runtime/class_table.h15
-rw-r--r--runtime/common_runtime_test.cc68
-rw-r--r--runtime/common_runtime_test.h2
-rw-r--r--runtime/common_throws.cc8
-rw-r--r--runtime/common_throws.h6
-rw-r--r--runtime/dex_file.cc15
-rw-r--r--runtime/dex_file.h6
-rw-r--r--runtime/dex_file_annotations.cc329
-rw-r--r--runtime/dex_file_verifier.cc6
-rw-r--r--runtime/dexopt_test.cc2
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h4
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc4
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc13
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc58
-rw-r--r--runtime/entrypoints/runtime_asm_entrypoints.h6
-rw-r--r--runtime/entrypoints_order_test.cc3
-rw-r--r--runtime/fault_handler.cc208
-rw-r--r--runtime/fault_handler.h5
-rw-r--r--runtime/gc/accounting/read_barrier_table.h2
-rw-r--r--runtime/gc/allocator/rosalloc.h2
-rw-r--r--runtime/gc/collector/concurrent_copying.cc46
-rw-r--r--runtime/gc/collector/mark_compact.cc4
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h4
-rw-r--r--runtime/gc/heap.cc60
-rw-r--r--runtime/gc/scoped_gc_critical_section.cc8
-rw-r--r--runtime/gc/space/image_space.cc18
-rw-r--r--runtime/gc/space/large_object_space.cc3
-rw-r--r--runtime/gc/space/region_space-inl.h22
-rw-r--r--runtime/gc/space/region_space.cc57
-rw-r--r--runtime/gc/space/region_space.h45
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/imt_conflict_table.h16
-rw-r--r--runtime/imtable.h8
-rw-r--r--runtime/interpreter/interpreter.cc14
-rw-r--r--runtime/interpreter/interpreter_common.h68
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc481
-rw-r--r--runtime/interpreter/interpreter_intrinsics.h41
-rw-r--r--runtime/interpreter/mterp/mterp.cc19
-rw-r--r--runtime/interpreter/unstarted_runtime.cc66
-rw-r--r--runtime/interpreter/unstarted_runtime_list.h5
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc21
-rw-r--r--runtime/java_vm_ext.cc4
-rw-r--r--runtime/jdwp/jdwp.h42
-rw-r--r--runtime/jdwp/jdwp_adb.cc6
-rw-r--r--runtime/jdwp/jdwp_event.cc22
-rw-r--r--runtime/jdwp/jdwp_main.cc6
-rw-r--r--runtime/jdwp/object_registry.cc5
-rw-r--r--runtime/jdwp/object_registry.h1
-rw-r--r--runtime/jit/jit.cc8
-rw-r--r--runtime/jit/jit.h12
-rw-r--r--runtime/jit/jit_code_cache.cc32
-rw-r--r--runtime/jit/jit_code_cache.h6
-rw-r--r--runtime/jit/profile_compilation_info.cc363
-rw-r--r--runtime/jit/profile_compilation_info.h101
-rw-r--r--runtime/jit/profile_compilation_info_test.cc163
-rw-r--r--runtime/jit/profile_saver.cc241
-rw-r--r--runtime/jit/profile_saver.h46
-rw-r--r--runtime/jit/profile_saver_options.h15
-rw-r--r--runtime/jit/profiling_info.h10
-rw-r--r--runtime/jvalue.h8
-rw-r--r--runtime/linear_alloc.cc5
-rw-r--r--runtime/linear_alloc.h1
-rw-r--r--runtime/lock_word.h3
-rw-r--r--runtime/method_handles.cc513
-rw-r--r--runtime/method_info.h77
-rw-r--r--runtime/mirror/array-inl.h4
-rw-r--r--runtime/mirror/array.h7
-rw-r--r--runtime/mirror/class-inl.h21
-rw-r--r--runtime/mirror/class.cc6
-rw-r--r--runtime/mirror/class.h14
-rw-r--r--runtime/mirror/class_ext-inl.h47
-rw-r--r--runtime/mirror/class_ext.cc12
-rw-r--r--runtime/mirror/class_ext.h27
-rw-r--r--runtime/mirror/dex_cache-inl.h78
-rw-r--r--runtime/mirror/dex_cache.cc66
-rw-r--r--runtime/mirror/dex_cache.h139
-rw-r--r--runtime/mirror/dex_cache_test.cc3
-rw-r--r--runtime/mirror/field.cc12
-rw-r--r--runtime/mirror/object-inl.h10
-rw-r--r--runtime/mirror/object.h9
-rw-r--r--runtime/mirror/string.cc25
-rw-r--r--runtime/mirror/string.h2
-rw-r--r--runtime/monitor.h2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc53
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc61
-rw-r--r--runtime/native/java_lang_Class.cc52
-rw-r--r--runtime/native/java_lang_DexCache.cc109
-rw-r--r--runtime/native/java_lang_DexCache.h28
-rw-r--r--runtime/native/java_lang_String.cc5
-rw-r--r--runtime/native/java_lang_reflect_Executable.cc140
-rw-r--r--runtime/native/java_lang_reflect_Field.cc8
-rw-r--r--runtime/native/java_lang_reflect_Method.cc3
-rw-r--r--runtime/native_bridge_art_interface.cc2
-rw-r--r--runtime/native_stack_dump.cc2
-rw-r--r--runtime/non_debuggable_classes.cc8
-rw-r--r--runtime/non_debuggable_classes.h8
-rw-r--r--runtime/nth_caller_visitor.h3
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file_assistant.cc88
-rw-r--r--runtime/oat_file_assistant.h13
-rw-r--r--runtime/oat_file_manager.cc271
-rw-r--r--runtime/oat_quick_method_header.cc13
-rw-r--r--runtime/oat_quick_method_header.h51
-rw-r--r--runtime/openjdkjvmti/Android.bp18
-rw-r--r--runtime/openjdkjvmti/OpenjdkJvmTi.cc175
-rw-r--r--runtime/openjdkjvmti/events-inl.h21
-rw-r--r--runtime/openjdkjvmti/events.h5
-rw-r--r--runtime/openjdkjvmti/fixed_up_dex_file.cc145
-rw-r--r--runtime/openjdkjvmti/fixed_up_dex_file.h82
-rw-r--r--runtime/openjdkjvmti/include/jvmti.h (renamed from runtime/openjdkjvmti/jvmti.h)0
-rw-r--r--runtime/openjdkjvmti/jvmti_weak_table-inl.h389
-rw-r--r--runtime/openjdkjvmti/jvmti_weak_table.h215
-rw-r--r--runtime/openjdkjvmti/object_tagging.cc336
-rw-r--r--runtime/openjdkjvmti/object_tagging.h189
-rw-r--r--runtime/openjdkjvmti/ti_class.cc56
-rw-r--r--runtime/openjdkjvmti/ti_class_definition.cc132
-rw-r--r--runtime/openjdkjvmti/ti_class_definition.h88
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc587
-rw-r--r--runtime/openjdkjvmti/ti_heap.h3
-rw-r--r--runtime/openjdkjvmti/ti_phase.cc13
-rw-r--r--runtime/openjdkjvmti/ti_phase.h1
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc329
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h20
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc42
-rw-r--r--runtime/openjdkjvmti/ti_thread.h10
-rw-r--r--runtime/openjdkjvmti/transform.cc86
-rw-r--r--runtime/openjdkjvmti/transform.h12
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/primitive.cc2
-rw-r--r--runtime/quick/inline_method_analyser.cc15
-rw-r--r--runtime/reflection.cc4
-rw-r--r--runtime/reflection_test.cc1
-rw-r--r--runtime/runtime.cc47
-rw-r--r--runtime/runtime.h7
-rw-r--r--runtime/runtime_options.def4
-rw-r--r--runtime/scoped_thread_state_change.h2
-rw-r--r--runtime/stack.cc4
-rw-r--r--runtime/stack.h16
-rw-r--r--runtime/stack_map.cc10
-rw-r--r--runtime/stack_map.h61
-rw-r--r--runtime/thread-inl.h4
-rw-r--r--runtime/thread.cc19
-rw-r--r--runtime/thread.h22
-rw-r--r--runtime/thread_list.cc2
-rw-r--r--runtime/transaction.h2
-rw-r--r--runtime/transaction_test.cc6
-rw-r--r--runtime/type_lookup_table.h2
-rw-r--r--runtime/utils.cc71
-rw-r--r--runtime/utils.h14
-rw-r--r--runtime/utils/dex_cache_arrays_layout-inl.h25
-rw-r--r--runtime/utils/dex_cache_arrays_layout.h6
-rw-r--r--runtime/vdex_file.cc26
-rw-r--r--runtime/vdex_file.h10
-rw-r--r--runtime/verifier/reg_type.cc7
-rw-r--r--runtime/verifier/reg_type.h122
-rw-r--r--runtime/verifier/verifier_deps.cc9
-rw-r--r--runtime/well_known_classes.cc4
-rw-r--r--runtime/well_known_classes.h2
203 files changed, 7205 insertions, 3259 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 9958814f58..6c3bc0450b 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -99,6 +99,7 @@ cc_defaults {
"intern_table.cc",
"interpreter/interpreter.cc",
"interpreter/interpreter_common.cc",
+ "interpreter/interpreter_intrinsics.cc",
"interpreter/interpreter_switch_impl.cc",
"interpreter/unstarted_runtime.cc",
"java_vm_ext.cc",
@@ -148,7 +149,6 @@ cc_defaults {
"native/dalvik_system_VMStack.cc",
"native/dalvik_system_ZygoteHooks.cc",
"native/java_lang_Class.cc",
- "native/java_lang_DexCache.cc",
"native/java_lang_Object.cc",
"native/java_lang_String.cc",
"native/java_lang_StringFactory.cc",
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index daa2dff060..923ff4ff29 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -47,24 +47,6 @@ static uint32_t GetInstructionSize(uint8_t* pc) {
return instr_size;
}
-void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
- // Note that in this handler we set up the registers and return to
- // longjmp directly rather than going through an assembly language stub. The
- // reason for this is that longjmp is (currently) in ARM mode and that would
- // require switching modes in the stub - incurring an unwanted relocation.
-
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
-
- sc->arm_r0 = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
- sc->arm_r1 = 1;
- sc->arm_pc = reinterpret_cast<uintptr_t>(longjmp);
- VLOG(signals) << "longjmp address: " << reinterpret_cast<void*>(sc->arm_pc);
-}
-
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
diff --git a/runtime/arch/arm/instruction_set_features_arm_test.cc b/runtime/arch/arm/instruction_set_features_arm_test.cc
index 6d5dd6d50d..35823518ce 100644
--- a/runtime/arch/arm/instruction_set_features_arm_test.cc
+++ b/runtime/arch/arm/instruction_set_features_arm_test.cc
@@ -34,6 +34,18 @@ TEST(ArmInstructionSetFeaturesTest, ArmFeaturesFromVariant) {
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM kryo processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ InstructionSetFeatures::FromVariant(kArm, "kryo", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 7U);
+
// Build features for a 32-bit ARM denver processor.
std::unique_ptr<const InstructionSetFeatures> denver_features(
InstructionSetFeatures::FromVariant(kArm, "denver", &error_msg));
@@ -86,6 +98,18 @@ TEST(ArmInstructionSetFeaturesTest, ArmAddFeaturesFromString) {
EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", krait_features->GetFeatureString().c_str());
EXPECT_EQ(krait_features->AsBitmap(), 3U);
+ // Build features for a 32-bit ARM with LPAE and div processor.
+ std::unique_ptr<const InstructionSetFeatures> kryo_features(
+ base_features->AddFeaturesFromString("atomic_ldrd_strd,div", &error_msg));
+ ASSERT_TRUE(kryo_features.get() != nullptr) << error_msg;
+
+ ASSERT_EQ(kryo_features->GetInstructionSet(), kArm);
+ EXPECT_TRUE(kryo_features->Equals(krait_features.get()));
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasDivideInstruction());
+ EXPECT_TRUE(kryo_features->AsArmInstructionSetFeatures()->HasAtomicLdrdAndStrd());
+ EXPECT_STREQ("div,atomic_ldrd_strd,-armv8a", kryo_features->GetFeatureString().c_str());
+ EXPECT_EQ(kryo_features->AsBitmap(), 3U);
+
// Build features for a 32-bit ARM processor with LPAE and div flipped.
std::unique_ptr<const InstructionSetFeatures> denver_features(
base_features->AddFeaturesFromString("div,atomic_ldrd_strd,armv8a", &error_msg));
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 85310911be..029de4680c 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1487,6 +1487,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ mov r0, r12 // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
@@ -1613,6 +1614,11 @@ ENTRY art_quick_to_interpreter_bridge
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+/*
+ * Called to attempt to execute an obsolete method.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
/*
* Routine that intercepts method calls and returns.
*/
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index c02be87e2d..193af58f11 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -39,21 +39,6 @@ extern "C" void art_quick_implicit_suspend();
namespace art {
-void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context) {
- // To match the case used in ARM we return directly to the longjmp function
- // rather than through a trivial assembly language stub.
-
- struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
- struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
- Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
-
- sc->regs[0] = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
- sc->regs[1] = 1;
- sc->pc = reinterpret_cast<uintptr_t>(longjmp);
-}
-
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 7cb50b7118..b2bbd0d560 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1966,6 +1966,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ mov x0, xIP0 // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
@@ -2151,6 +2152,11 @@ ENTRY art_quick_to_interpreter_bridge
RETURN_OR_DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+/*
+ * Called to attempt to execute an obsolete method.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
//
// Instrumentation-related stubs
@@ -2378,9 +2384,8 @@ ENTRY \name
ret
.Lnot_marked_rb_\name:
// Check if the top two bits are one, if this is the case it is a forwarding address.
- mvn wIP0, wIP0
- cmp wzr, wIP0, lsr #30
- beq .Lret_forwarding_address\name
+ tst wIP0, wIP0, lsl #1
+ bmi .Lret_forwarding_address\name
.Lslow_rb_\name:
/*
* Allocate 44 stack slots * 8 = 352 bytes:
@@ -2451,10 +2456,9 @@ ENTRY \name
DECREASE_FRAME 352
ret
.Lret_forwarding_address\name:
- mvn wIP0, wIP0
// Shift left by the forwarding address shift. This clears out the state bits since they are
// in the top 2 bits of the lock word.
- lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
+ lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
ret
END \name
.endm
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 36f9ea78e1..2349620c1d 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -32,6 +32,33 @@ namespace art {
// Cast entrypoints.
extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
+// Read barrier entrypoints.
+// art_quick_read_barrier_mark_regXX uses a non-standard calling
+// convention: it expects its input in register XX+1 and returns its
+// result in that same register, and saves and restores all
+// caller-save registers.
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg14(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -59,9 +86,71 @@ extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
-// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
- bool is_marking ATTRIBUTE_UNUSED) {}
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+ qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg14 = is_marking ? art_quick_read_barrier_mark_reg14 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg17 = is_marking ? art_quick_read_barrier_mark_reg17 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg18 = is_marking ? art_quick_read_barrier_mark_reg18 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg19 = is_marking ? art_quick_read_barrier_mark_reg19 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg20 = is_marking ? art_quick_read_barrier_mark_reg20 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg21 = is_marking ? art_quick_read_barrier_mark_reg21 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg22 = is_marking ? art_quick_read_barrier_mark_reg22 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
+ "Non-direct C stub marked direct.");
+ qpoints->pReadBarrierMarkReg29 = is_marking ? art_quick_read_barrier_mark_reg29 : nullptr;
+ static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
+ "Non-direct C stub marked direct.");
+}
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Note: MIPS has asserts checking for the type of entrypoint. Don't move it
@@ -287,77 +376,19 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
- // Read barriers (and these entry points in particular) are not
- // supported in the compiler on MIPS32.
+ UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
+ // Cannot use the following registers to pass arguments:
+ // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
+ // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
qpoints->pReadBarrierMarkReg00 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg01 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg02 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg03 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg04 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg05 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg06 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg07 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg08 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg09 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg10 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg11 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg12 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg13 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg14 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg15 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg15),
"Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg16 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg16),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg17 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg18 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg19 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg20 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg21 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
- "Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg22 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierMarkReg23 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg23),
"Non-direct C stub marked direct.");
@@ -376,9 +407,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pReadBarrierMarkReg28 = nullptr;
static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg28),
"Non-direct C stub marked direct.");
- qpoints->pReadBarrierMarkReg29 = nullptr;
- static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
- "Non-direct C stub marked direct.");
qpoints->pReadBarrierSlow = artReadBarrierSlow;
static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
index 1792f31578..f9c19e87e6 100644
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ b/runtime/arch/mips/fault_handler_mips.cc
@@ -35,10 +35,6 @@ extern "C" void art_quick_throw_null_pointer_exception_from_signal();
namespace art {
-void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
-}
-
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ec8ae85722..722a67908f 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1763,6 +1763,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
+ move $a0, $t7 # Load interface method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
@@ -1876,6 +1877,14 @@ ENTRY art_quick_to_interpreter_bridge
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+ .extern artInvokeObsoleteMethod
+ENTRY art_invoke_obsolete_method_stub
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ la $t9, artInvokeObsoleteMethod
+ jalr $t9 # (Method* method, Thread* self)
+ move $a1, rSELF # pass Thread::Current
+END art_invoke_obsolete_method_stub
+
/*
* Routine that intercepts method calls and returns.
*/
@@ -2048,11 +2057,12 @@ ENTRY_NO_GP art_quick_indexof
lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
#endif
slt $t1, $a2, $zero # if fromIndex < 0
-#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
+#if defined(_MIPS_ARCH_MIPS32R6)
seleqz $a2, $a2, $t1 # fromIndex = 0;
#else
movn $a2, $zero, $t1 # fromIndex = 0;
#endif
+
#if (STRING_COMPRESSION_FEATURE)
srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length
#endif
@@ -2196,6 +2206,151 @@ ENTRY_NO_GP art_quick_string_compareto
subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i))
END art_quick_string_compareto
+ /*
+ * Create a function `name` calling the ReadBarrier::Mark routine,
+ * getting its argument and returning its result through register
+ * `reg`, saving and restoring all caller-save registers.
+ */
+.macro READ_BARRIER_MARK_REG name, reg
+ENTRY \name
+ /* TODO: optimizations: mark bit, forwarding. */
+ addiu $sp, $sp, -160 # includes 16 bytes of space for argument registers a0-a3
+ .cfi_adjust_cfa_offset 160
+
+ sw $ra, 156($sp)
+ .cfi_rel_offset 31, 156
+ sw $t8, 152($sp)
+ .cfi_rel_offset 24, 152
+ sw $t7, 148($sp)
+ .cfi_rel_offset 15, 148
+ sw $t6, 144($sp)
+ .cfi_rel_offset 14, 144
+ sw $t5, 140($sp)
+ .cfi_rel_offset 13, 140
+ sw $t4, 136($sp)
+ .cfi_rel_offset 12, 136
+ sw $t3, 132($sp)
+ .cfi_rel_offset 11, 132
+ sw $t2, 128($sp)
+ .cfi_rel_offset 10, 128
+ sw $t1, 124($sp)
+ .cfi_rel_offset 9, 124
+ sw $t0, 120($sp)
+ .cfi_rel_offset 8, 120
+ sw $a3, 116($sp)
+ .cfi_rel_offset 7, 116
+ sw $a2, 112($sp)
+ .cfi_rel_offset 6, 112
+ sw $a1, 108($sp)
+ .cfi_rel_offset 5, 108
+ sw $a0, 104($sp)
+ .cfi_rel_offset 4, 104
+ sw $v1, 100($sp)
+ .cfi_rel_offset 3, 100
+ sw $v0, 96($sp)
+ .cfi_rel_offset 2, 96
+
+ la $t9, artReadBarrierMark
+
+ sdc1 $f18, 88($sp)
+ sdc1 $f16, 80($sp)
+ sdc1 $f14, 72($sp)
+ sdc1 $f12, 64($sp)
+ sdc1 $f10, 56($sp)
+ sdc1 $f8, 48($sp)
+ sdc1 $f6, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f2, 24($sp)
+
+ .ifnc \reg, $a0
+ move $a0, \reg # pass obj from `reg` in a0
+ .endif
+ jalr $t9 # v0 <- artReadBarrierMark(obj)
+ sdc1 $f0, 16($sp) # in delay slot
+
+ lw $ra, 156($sp)
+ .cfi_restore 31
+ lw $t8, 152($sp)
+ .cfi_restore 24
+ lw $t7, 148($sp)
+ .cfi_restore 15
+ lw $t6, 144($sp)
+ .cfi_restore 14
+ lw $t5, 140($sp)
+ .cfi_restore 13
+ lw $t4, 136($sp)
+ .cfi_restore 12
+ lw $t3, 132($sp)
+ .cfi_restore 11
+ lw $t2, 128($sp)
+ .cfi_restore 10
+ lw $t1, 124($sp)
+ .cfi_restore 9
+ lw $t0, 120($sp)
+ .cfi_restore 8
+ lw $a3, 116($sp)
+ .cfi_restore 7
+ lw $a2, 112($sp)
+ .cfi_restore 6
+ lw $a1, 108($sp)
+ .cfi_restore 5
+ lw $a0, 104($sp)
+ .cfi_restore 4
+ lw $v1, 100($sp)
+ .cfi_restore 3
+
+ .ifnc \reg, $v0
+ move \reg, $v0 # `reg` <- v0
+ lw $v0, 96($sp)
+ .cfi_restore 2
+ .endif
+
+ ldc1 $f18, 88($sp)
+ ldc1 $f16, 80($sp)
+ ldc1 $f14, 72($sp)
+ ldc1 $f12, 64($sp)
+ ldc1 $f10, 56($sp)
+ ldc1 $f8, 48($sp)
+ ldc1 $f6, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f2, 24($sp)
+ ldc1 $f0, 16($sp)
+
+ jalr $zero, $ra
+ addiu $sp, $sp, 160
+ .cfi_adjust_cfa_offset -160
+END \name
+.endm
+
+// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
+// ZERO (register 0) is reserved.
+// AT (register 1) is reserved as a temporary/scratch register.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7
+// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
+// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
+// K0, K1, GP, SP (registers 26 - 29) are reserved.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
+// RA (register 31) is reserved.
+
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 35f20fbf44..ef82bd239d 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -70,14 +70,16 @@
// Macros to poison (negate) the reference for heap poisoning.
.macro POISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
// Macros to unpoison (negate) the reference for heap poisoning.
.macro UNPOISON_HEAP_REF rRef
#ifdef USE_HEAP_POISONING
- subu \rRef, $zero, \rRef
+ dsubu \rRef, $zero, \rRef
+ dext \rRef, \rRef, 0, 32
#endif // USE_HEAP_POISONING
.endm
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index bc17d47366..66405cbe50 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -32,6 +32,32 @@ namespace art {
// Cast entrypoints.
extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
+// Read barrier entrypoints.
+// art_quick_read_barrier_mark_regXX uses a non-standard calling
+// convention: it expects its input in register XX+1 and returns its
+// result in that same register, and saves and restores all
+// caller-save registers.
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
+
// Math entrypoints.
extern int32_t CmpgDouble(double a, double b);
extern int32_t CmplDouble(double a, double b);
@@ -60,8 +86,28 @@ extern "C" int64_t __divdi3(int64_t, int64_t);
extern "C" int64_t __moddi3(int64_t, int64_t);
// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints ATTRIBUTE_UNUSED,
- bool is_marking ATTRIBUTE_UNUSED) {}
+void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking) {
+ qpoints->pReadBarrierMarkReg01 = is_marking ? art_quick_read_barrier_mark_reg01 : nullptr;
+ qpoints->pReadBarrierMarkReg02 = is_marking ? art_quick_read_barrier_mark_reg02 : nullptr;
+ qpoints->pReadBarrierMarkReg03 = is_marking ? art_quick_read_barrier_mark_reg03 : nullptr;
+ qpoints->pReadBarrierMarkReg04 = is_marking ? art_quick_read_barrier_mark_reg04 : nullptr;
+ qpoints->pReadBarrierMarkReg05 = is_marking ? art_quick_read_barrier_mark_reg05 : nullptr;
+ qpoints->pReadBarrierMarkReg06 = is_marking ? art_quick_read_barrier_mark_reg06 : nullptr;
+ qpoints->pReadBarrierMarkReg07 = is_marking ? art_quick_read_barrier_mark_reg07 : nullptr;
+ qpoints->pReadBarrierMarkReg08 = is_marking ? art_quick_read_barrier_mark_reg08 : nullptr;
+ qpoints->pReadBarrierMarkReg09 = is_marking ? art_quick_read_barrier_mark_reg09 : nullptr;
+ qpoints->pReadBarrierMarkReg10 = is_marking ? art_quick_read_barrier_mark_reg10 : nullptr;
+ qpoints->pReadBarrierMarkReg11 = is_marking ? art_quick_read_barrier_mark_reg11 : nullptr;
+ qpoints->pReadBarrierMarkReg12 = is_marking ? art_quick_read_barrier_mark_reg12 : nullptr;
+ qpoints->pReadBarrierMarkReg13 = is_marking ? art_quick_read_barrier_mark_reg13 : nullptr;
+ qpoints->pReadBarrierMarkReg17 = is_marking ? art_quick_read_barrier_mark_reg17 : nullptr;
+ qpoints->pReadBarrierMarkReg18 = is_marking ? art_quick_read_barrier_mark_reg18 : nullptr;
+ qpoints->pReadBarrierMarkReg19 = is_marking ? art_quick_read_barrier_mark_reg19 : nullptr;
+ qpoints->pReadBarrierMarkReg20 = is_marking ? art_quick_read_barrier_mark_reg20 : nullptr;
+ qpoints->pReadBarrierMarkReg21 = is_marking ? art_quick_read_barrier_mark_reg21 : nullptr;
+ qpoints->pReadBarrierMarkReg22 = is_marking ? art_quick_read_barrier_mark_reg22 : nullptr;
+ qpoints->pReadBarrierMarkReg29 = is_marking ? art_quick_read_barrier_mark_reg29 : nullptr;
+}
void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
DefaultInitEntryPoints(jpoints, qpoints);
@@ -103,38 +149,20 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
// Read barrier.
qpoints->pReadBarrierJni = ReadBarrierJni;
- // Read barriers (and these entry points in particular) are not
- // supported in the compiler on MIPS64.
+ UpdateReadBarrierEntrypoints(qpoints, /*is_marking*/ false);
+ // Cannot use the following registers to pass arguments:
+ // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
+ // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
qpoints->pReadBarrierMarkReg00 = nullptr;
- qpoints->pReadBarrierMarkReg01 = nullptr;
- qpoints->pReadBarrierMarkReg02 = nullptr;
- qpoints->pReadBarrierMarkReg03 = nullptr;
- qpoints->pReadBarrierMarkReg04 = nullptr;
- qpoints->pReadBarrierMarkReg05 = nullptr;
- qpoints->pReadBarrierMarkReg06 = nullptr;
- qpoints->pReadBarrierMarkReg07 = nullptr;
- qpoints->pReadBarrierMarkReg08 = nullptr;
- qpoints->pReadBarrierMarkReg09 = nullptr;
- qpoints->pReadBarrierMarkReg10 = nullptr;
- qpoints->pReadBarrierMarkReg11 = nullptr;
- qpoints->pReadBarrierMarkReg12 = nullptr;
- qpoints->pReadBarrierMarkReg13 = nullptr;
qpoints->pReadBarrierMarkReg14 = nullptr;
qpoints->pReadBarrierMarkReg15 = nullptr;
qpoints->pReadBarrierMarkReg16 = nullptr;
- qpoints->pReadBarrierMarkReg17 = nullptr;
- qpoints->pReadBarrierMarkReg18 = nullptr;
- qpoints->pReadBarrierMarkReg19 = nullptr;
- qpoints->pReadBarrierMarkReg20 = nullptr;
- qpoints->pReadBarrierMarkReg21 = nullptr;
- qpoints->pReadBarrierMarkReg22 = nullptr;
qpoints->pReadBarrierMarkReg23 = nullptr;
qpoints->pReadBarrierMarkReg24 = nullptr;
qpoints->pReadBarrierMarkReg25 = nullptr;
qpoints->pReadBarrierMarkReg26 = nullptr;
qpoints->pReadBarrierMarkReg27 = nullptr;
qpoints->pReadBarrierMarkReg28 = nullptr;
- qpoints->pReadBarrierMarkReg29 = nullptr;
qpoints->pReadBarrierSlow = artReadBarrierSlow;
qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
};
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
index 709cab587c..d668d3ab62 100644
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ b/runtime/arch/mips64/fault_handler_mips64.cc
@@ -35,10 +35,6 @@ extern "C" void art_quick_throw_null_pointer_exception_from_signal();
namespace art {
-void FaultManager::HandleNestedSignal(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
- void* context ATTRIBUTE_UNUSED) {
-}
-
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
index 5757906618..08d0bac2c3 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64.cc
@@ -30,22 +30,52 @@ using android::base::StringPrintf;
Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
+ bool msa = true;
if (variant != "default" && variant != "mips64r6") {
LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
}
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures());
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
}
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap ATTRIBUTE_UNUSED) {
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures());
+Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
+ bool msa = (bitmap & kMsaBitfield) != 0;
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
}
Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures());
+#if defined(_MIPS_ARCH_MIPS64R6)
+ const bool msa = true;
+#else
+ const bool msa = false;
+#endif
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
}
Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
- return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures());
+ // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that
+ // the kernel puts the appropriate feature flags in here. Sometimes it doesn't.
+ bool msa = false;
+
+ std::ifstream in("/proc/cpuinfo");
+ if (!in.fail()) {
+ while (!in.eof()) {
+ std::string line;
+ std::getline(in, line);
+ if (!in.eof()) {
+ LOG(INFO) << "cpuinfo line: " << line;
+ if (line.find("ASEs") != std::string::npos) {
+ LOG(INFO) << "found Application Specific Extensions";
+ if (line.find("msa") != std::string::npos) {
+ msa = true;
+ }
+ }
+ }
+ }
+ in.close();
+ } else {
+ LOG(ERROR) << "Failed to open /proc/cpuinfo";
+ }
+ return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
}
Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
@@ -62,28 +92,40 @@ bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) c
if (kMips64 != other->GetInstructionSet()) {
return false;
}
- return true;
+ const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures();
+ return msa_ == other_as_mips64->msa_;
}
uint32_t Mips64InstructionSetFeatures::AsBitmap() const {
- return 0;
+ return (msa_ ? kMsaBitfield : 0);
}
std::string Mips64InstructionSetFeatures::GetFeatureString() const {
- return "default";
+ std::string result;
+ if (msa_) {
+ result += "msa";
+ } else {
+ result += "-msa";
+ }
+ return result;
}
std::unique_ptr<const InstructionSetFeatures>
Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
const std::vector<std::string>& features, std::string* error_msg) const {
- auto i = features.begin();
- if (i != features.end()) {
- // We don't have any features.
+ bool msa = msa_;
+ for (auto i = features.begin(); i != features.end(); i++) {
std::string feature = android::base::Trim(*i);
- *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
- return nullptr;
+ if (feature == "msa") {
+ msa = true;
+ } else if (feature == "-msa") {
+ msa = false;
+ } else {
+ *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
+ return nullptr;
+ }
}
- return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures());
+ return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(msa));
}
} // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
index c80c466dfc..d9f30c755e 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ b/runtime/arch/mips64/instruction_set_features_mips64.h
@@ -58,6 +58,11 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
std::string GetFeatureString() const OVERRIDE;
+ // Does it have MSA (MIPS SIMD Architecture) support.
+ bool HasMsa() const {
+ return msa_;
+ }
+
virtual ~Mips64InstructionSetFeatures() {}
protected:
@@ -67,9 +72,16 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures {
std::string* error_msg) const OVERRIDE;
private:
- Mips64InstructionSetFeatures() : InstructionSetFeatures() {
+ explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
}
+ // Bitmap positions for encoding features as a bitmap.
+ enum {
+ kMsaBitfield = 1,
+ };
+
+ const bool msa_;
+
DISALLOW_COPY_AND_ASSIGN(Mips64InstructionSetFeatures);
};
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
index 380c4e5433..0ba0bd4c15 100644
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
@@ -20,15 +20,31 @@
namespace art {
-TEST(Mips64InstructionSetFeaturesTest, Mips64Features) {
+TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
std::string error_msg;
std::unique_ptr<const InstructionSetFeatures> mips64_features(
InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64);
EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
- EXPECT_STREQ("default", mips64_features->GetFeatureString().c_str());
- EXPECT_EQ(mips64_features->AsBitmap(), 0U);
+ EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips64_features->AsBitmap(), 1U);
+}
+
+TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
+ std::string error_msg;
+ std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
+ InstructionSetFeatures::FromVariant(kMips64, "mips64r6", &error_msg));
+ ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
+ EXPECT_EQ(mips64r6_features->GetInstructionSet(), kMips64);
+ EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
+ EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
+ EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
+
+ std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
+ InstructionSetFeatures::FromVariant(kMips64, "default", &error_msg));
+ ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
+ EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
}
} // namespace art
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 28d7c77938..9402232996 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1715,6 +1715,7 @@ ENTRY art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
# Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
+ move $a0, $t0 # Load interface method.
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END art_quick_imt_conflict_trampoline
@@ -1817,6 +1818,13 @@ ENTRY art_quick_to_interpreter_bridge
DELIVER_PENDING_EXCEPTION
END art_quick_to_interpreter_bridge
+ .extern artInvokeObsoleteMethod
+ENTRY art_invoke_obsolete_method_stub
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+ jal artInvokeObsoleteMethod # (Method* method, Thread* self)
+ move $a1, rSELF # pass Thread::Current
+END art_invoke_obsolete_method_stub
+
/*
* Routine that intercepts method calls and returns.
*/
@@ -2052,6 +2060,180 @@ ENTRY_NO_GP art_quick_indexof
#endif
END art_quick_indexof
+ /*
+ * Create a function `name` calling the ReadBarrier::Mark routine,
+ * getting its argument and returning its result through register
+ * `reg`, saving and restoring all caller-save registers.
+ */
+.macro READ_BARRIER_MARK_REG name, reg
+ENTRY \name
+ /* TODO: optimizations: mark bit, forwarding. */
+ daddiu $sp, $sp, -320
+ .cfi_adjust_cfa_offset 320
+
+ sd $ra, 312($sp)
+ .cfi_rel_offset 31, 312
+ sd $t8, 304($sp) # save t8 holding caller's gp
+ .cfi_rel_offset 24, 304
+ sd $t3, 296($sp)
+ .cfi_rel_offset 15, 296
+ sd $t2, 288($sp)
+ .cfi_rel_offset 14, 288
+ sd $t1, 280($sp)
+ .cfi_rel_offset 13, 280
+ sd $t0, 272($sp)
+ .cfi_rel_offset 12, 272
+ sd $a7, 264($sp)
+ .cfi_rel_offset 11, 264
+ sd $a6, 256($sp)
+ .cfi_rel_offset 10, 256
+ sd $a5, 248($sp)
+ .cfi_rel_offset 9, 248
+ sd $a4, 240($sp)
+ .cfi_rel_offset 8, 240
+ sd $a3, 232($sp)
+ .cfi_rel_offset 7, 232
+ sd $a2, 224($sp)
+ .cfi_rel_offset 6, 224
+ sd $a1, 216($sp)
+ .cfi_rel_offset 5, 216
+ sd $a0, 208($sp)
+ .cfi_rel_offset 4, 208
+ sd $v1, 200($sp)
+ .cfi_rel_offset 3, 200
+ sd $v0, 192($sp)
+ .cfi_rel_offset 2, 192
+
+ dla $t9, artReadBarrierMark
+
+ sdc1 $f23, 184($sp)
+ sdc1 $f22, 176($sp)
+ sdc1 $f21, 168($sp)
+ sdc1 $f20, 160($sp)
+ sdc1 $f19, 152($sp)
+ sdc1 $f18, 144($sp)
+ sdc1 $f17, 136($sp)
+ sdc1 $f16, 128($sp)
+ sdc1 $f15, 120($sp)
+ sdc1 $f14, 112($sp)
+ sdc1 $f13, 104($sp)
+ sdc1 $f12, 96($sp)
+ sdc1 $f11, 88($sp)
+ sdc1 $f10, 80($sp)
+ sdc1 $f9, 72($sp)
+ sdc1 $f8, 64($sp)
+ sdc1 $f7, 56($sp)
+ sdc1 $f6, 48($sp)
+ sdc1 $f5, 40($sp)
+ sdc1 $f4, 32($sp)
+ sdc1 $f3, 24($sp)
+ sdc1 $f2, 16($sp)
+ sdc1 $f1, 8($sp)
+
+ .ifnc \reg, $a0
+ move $a0, \reg # pass obj from `reg` in a0
+ .endif
+ jalr $t9 # v0 <- artReadBarrierMark(obj)
+ sdc1 $f0, 0($sp) # in delay slot
+
+ ld $ra, 312($sp)
+ .cfi_restore 31
+ ld $t8, 304($sp) # restore t8 holding caller's gp
+ .cfi_restore 24
+ ld $t3, 296($sp)
+ .cfi_restore 15
+ ld $t2, 288($sp)
+ .cfi_restore 14
+ ld $t1, 280($sp)
+ .cfi_restore 13
+ ld $t0, 272($sp)
+ .cfi_restore 12
+ ld $a7, 264($sp)
+ .cfi_restore 11
+ ld $a6, 256($sp)
+ .cfi_restore 10
+ ld $a5, 248($sp)
+ .cfi_restore 9
+ ld $a4, 240($sp)
+ .cfi_restore 8
+ ld $a3, 232($sp)
+ .cfi_restore 7
+ ld $a2, 224($sp)
+ .cfi_restore 6
+ ld $a1, 216($sp)
+ .cfi_restore 5
+ ld $a0, 208($sp)
+ .cfi_restore 4
+ ld $v1, 200($sp)
+ .cfi_restore 3
+
+ .ifnc \reg, $v0
+ move \reg, $v0 # `reg` <- v0
+ ld $v0, 192($sp)
+ .cfi_restore 2
+ .endif
+
+ ldc1 $f23, 184($sp)
+ ldc1 $f22, 176($sp)
+ ldc1 $f21, 168($sp)
+ ldc1 $f20, 160($sp)
+ ldc1 $f19, 152($sp)
+ ldc1 $f18, 144($sp)
+ ldc1 $f17, 136($sp)
+ ldc1 $f16, 128($sp)
+ ldc1 $f15, 120($sp)
+ ldc1 $f14, 112($sp)
+ ldc1 $f13, 104($sp)
+ ldc1 $f12, 96($sp)
+ ldc1 $f11, 88($sp)
+ ldc1 $f10, 80($sp)
+ ldc1 $f9, 72($sp)
+ ldc1 $f8, 64($sp)
+ ldc1 $f7, 56($sp)
+ ldc1 $f6, 48($sp)
+ ldc1 $f5, 40($sp)
+ ldc1 $f4, 32($sp)
+ ldc1 $f3, 24($sp)
+ ldc1 $f2, 16($sp)
+ ldc1 $f1, 8($sp)
+ ldc1 $f0, 0($sp)
+
+ .cpreturn # restore caller's gp from t8
+ jalr $zero, $ra
+ daddiu $sp, $sp, 320
+ .cfi_adjust_cfa_offset -320
+END \name
+.endm
+
+// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
+// ZERO (register 0) is reserved.
+// AT (register 1) is reserved as a temporary/scratch register.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $a4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $a5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $a6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $a7
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t2
+// T3 (register 15) is reserved as a temporary/scratch register.
+// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
+// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
+// K0, K1, GP, SP (registers 26 - 29) are reserved.
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
+// RA (register 31) is reserved.
+
.extern artInvokePolymorphic
ENTRY art_quick_invoke_polymorphic
SETUP_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/mips64/registers_mips64.cc b/runtime/arch/mips64/registers_mips64.cc
index 495920809f..1ee2cdd204 100644
--- a/runtime/arch/mips64/registers_mips64.cc
+++ b/runtime/arch/mips64/registers_mips64.cc
@@ -46,5 +46,14 @@ std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
+ if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
+ os << "w" << static_cast<int>(rhs);
+ } else {
+ os << "VectorRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
} // namespace mips64
} // namespace art
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
index 81fae72b44..30de2cc009 100644
--- a/runtime/arch/mips64/registers_mips64.h
+++ b/runtime/arch/mips64/registers_mips64.h
@@ -107,6 +107,45 @@ enum FpuRegister {
};
std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs);
+// Values for vector registers.
+enum VectorRegister {
+ W0 = 0,
+ W1 = 1,
+ W2 = 2,
+ W3 = 3,
+ W4 = 4,
+ W5 = 5,
+ W6 = 6,
+ W7 = 7,
+ W8 = 8,
+ W9 = 9,
+ W10 = 10,
+ W11 = 11,
+ W12 = 12,
+ W13 = 13,
+ W14 = 14,
+ W15 = 15,
+ W16 = 16,
+ W17 = 17,
+ W18 = 18,
+ W19 = 19,
+ W20 = 20,
+ W21 = 21,
+ W22 = 22,
+ W23 = 23,
+ W24 = 24,
+ W25 = 25,
+ W26 = 26,
+ W27 = 27,
+ W28 = 28,
+ W29 = 29,
+ W30 = 30,
+ W31 = 31,
+ kNumberOfVectorRegisters = 32,
+ kNoVectorRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
+
} // namespace mips64
} // namespace art
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index a4d6bb4444..f407ebf1d1 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -75,12 +75,6 @@ extern "C" void art_quick_throw_null_pointer_exception_from_signal();
extern "C" void art_quick_throw_stack_overflow();
extern "C" void art_quick_test_suspend();
-// Note this is different from the others (no underscore on 64 bit mac) due to
-// the way the symbol is defined in the .S file.
-// TODO: fix the symbols for 64 bit mac - there is a double underscore prefix for some
-// of them.
-extern "C" void art_nested_signal_return();
-
// Get the size of an instruction in bytes.
// Return 0 if the instruction is not handled.
static uint32_t GetInstructionSize(const uint8_t* pc) {
@@ -247,21 +241,6 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
return pc - startpc;
}
-void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
- // For the Intel architectures we need to go to an assembly language
- // stub. This is because the 32 bit call to longjmp is much different
- // from the 64 bit ABI call and pushing things onto the stack inside this
- // handler was unwieldy and ugly. The use of the stub means we can keep
- // this code the same for both 32 and 64 bit.
-
- Thread* self = Thread::Current();
- CHECK(self != nullptr); // This will cause a SIGABRT if self is null.
-
- struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
- uc->CTX_JMP_BUF = reinterpret_cast<uintptr_t>(*self->GetNestedSignalState());
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_nested_signal_return);
-}
-
void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
ArtMethod** out_method,
uintptr_t* out_return_pc, uintptr_t* out_sp) {
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 8c907e0790..6c0bcc9d88 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1806,6 +1806,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ movl %edi, %eax // Load interface method
POP EDI
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
END_FUNCTION art_quick_imt_conflict_trampoline
@@ -1937,6 +1938,11 @@ DEFINE_FUNCTION art_quick_to_interpreter_bridge
END_FUNCTION art_quick_to_interpreter_bridge
/*
+ * Called by managed code, saves callee saves and then calls artInvokeObsoleteMethod
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
+ /*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
@@ -2136,19 +2142,6 @@ DEFINE_FUNCTION art_quick_string_compareto
ret
END_FUNCTION art_quick_string_compareto
-// Return from a nested signal:
-// Entry:
-// eax: address of jmp_buf in TLS
-
-DEFINE_FUNCTION art_nested_signal_return
- SETUP_GOT_NOSAVE ebx // sets %ebx for call into PLT
- movl LITERAL(1), %ecx
- PUSH ecx // second arg to longjmp (1)
- PUSH eax // first arg to longjmp (jmp_buf)
- call PLT_SYMBOL(longjmp)
- UNREACHABLE
-END_FUNCTION art_nested_signal_return
-
// Create a function `name` calling the ReadBarrier::Mark routine,
// getting its argument and returning its result through register
// `reg`, saving and restoring all caller-save registers.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f1be52eeb6..8e2acab3eb 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1662,6 +1662,7 @@ DEFINE_FUNCTION art_quick_imt_conflict_trampoline
.Lconflict_trampoline:
// Call the runtime stub to populate the ImtConflictTable and jump to the
// resolved method.
+ movq %r10, %rdi // Load interface method
INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
@@ -1901,6 +1902,12 @@ DEFINE_FUNCTION art_quick_to_interpreter_bridge
END_FUNCTION art_quick_to_interpreter_bridge
/*
+ * Called to catch an attempt to invoke an obsolete method.
+ * RDI = method being called.
+ */
+ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod
+
+ /*
* Routine that intercepts method calls and returns.
*/
DEFINE_FUNCTION art_quick_instrumentation_entry
@@ -2099,18 +2106,6 @@ DEFINE_FUNCTION art_quick_instance_of
ret
END_FUNCTION art_quick_instance_of
-
-// Return from a nested signal:
-// Entry:
-// rdi: address of jmp_buf in TLS
-
-DEFINE_FUNCTION art_nested_signal_return
- // first arg to longjmp is already in correct register
- movq LITERAL(1), %rsi // second arg to longjmp (1)
- call PLT_SYMBOL(longjmp)
- UNREACHABLE
-END_FUNCTION art_nested_signal_return
-
// Create a function `name` calling the ReadBarrier::Mark routine,
// getting its argument and returning its result through register
// `reg`, saving and restoring all caller-save registers.
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 75dd981136..666ed8a868 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -47,6 +47,10 @@ class ArtField FINAL {
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() {
+ return declaring_class_.AddressWithoutBarrier();
+ }
+
uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 685e26c78d..5cf0e0f90c 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -32,6 +32,7 @@
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array.h"
+#include "mirror/string.h"
#include "oat.h"
#include "obj_ptr-inl.h"
#include "quick/quick_method_frame_info.h"
@@ -56,8 +57,10 @@ inline mirror::Class* ArtMethod::GetDeclaringClass() {
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
if (kCheckDeclaringClassState) {
- CHECK(result->IsIdxLoaded() || result->IsErroneous())
- << result->GetStatus() << " " << result->PrettyClass();
+ if (!(result->IsIdxLoaded() || result->IsErroneous())) {
+ LOG(FATAL_WITHOUT_ABORT) << "Class status: " << result->GetStatus();
+ LOG(FATAL) << result->PrettyClass();
+ }
}
} else {
CHECK(result == nullptr) << this;
@@ -347,7 +350,11 @@ inline const char* ArtMethod::GetDeclaringClassSourceFile() {
inline uint16_t ArtMethod::GetClassDefIndex() {
DCHECK(!IsProxyMethod());
- return GetDeclaringClass()->GetDexClassDefIndex();
+ if (LIKELY(!IsObsolete())) {
+ return GetDeclaringClass()->GetDexClassDefIndex();
+ } else {
+ return FindObsoleteDexClassDefIndex();
+ }
}
inline const DexFile::ClassDef& ArtMethod::GetClassDef() {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 9d74e7c92b..5a71be6eb9 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -104,6 +104,16 @@ mirror::DexCache* ArtMethod::GetObsoleteDexCache() {
UNREACHABLE();
}
+uint16_t ArtMethod::FindObsoleteDexClassDefIndex() {
+ DCHECK(!Runtime::Current()->IsAotCompiler()) << PrettyMethod();
+ DCHECK(IsObsolete());
+ const DexFile* dex_file = GetDexFile();
+ const dex::TypeIndex declaring_class_type = dex_file->GetMethodId(GetDexMethodIndex()).class_idx_;
+ const DexFile::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type);
+ CHECK(class_def != nullptr);
+ return dex_file->GetIndexForClassDef(*class_def);
+}
+
mirror::String* ArtMethod::GetNameAsString(Thread* self) {
CHECK(!IsProxyMethod());
StackHandleScope<1> hs(self);
@@ -327,7 +337,8 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
// Ensure that we won't be accidentally calling quick compiled code when -Xint.
if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) {
CHECK(!runtime->UseJitCompilation());
- const void* oat_quick_code = (IsNative() || !IsInvokable() || IsProxyMethod())
+ const void* oat_quick_code =
+ (IsNative() || !IsInvokable() || IsProxyMethod() || IsObsolete())
? nullptr
: GetOatMethodQuickCode(runtime->GetClassLinker()->GetImagePointerSize());
CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode())
diff --git a/runtime/art_method.h b/runtime/art_method.h
index cd1950c0e2..51b65760a1 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -73,6 +73,10 @@ class ArtMethod FINAL {
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() {
+ return declaring_class_.AddressWithoutBarrier();
+ }
+
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -691,7 +695,7 @@ class ArtMethod FINAL {
// Pointer to JNI function registered to this method, or a function to resolve the JNI function,
// or the profiling data for non-native methods, or an ImtConflictTable, or the
- // single-implementation of an abstract method.
+ // single-implementation of an abstract/interface method.
void* data_;
// Method dispatch from quick compiled code invokes this pointer which may cause bridging into
@@ -700,6 +704,8 @@ class ArtMethod FINAL {
} ptr_sized_fields_;
private:
+ uint16_t FindObsoleteDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_);
+
bool IsAnnotatedWith(jclass klass, uint32_t visibility);
static constexpr size_t PtrSizedFieldsOffset(PointerSize pointer_size) {
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
new file mode 100644
index 0000000000..ace118c50b
--- /dev/null
+++ b/runtime/backtrace_helper.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BACKTRACE_HELPER_H_
+#define ART_RUNTIME_BACKTRACE_HELPER_H_
+
+#include <unwind.h>
+
+namespace art {
+
+// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
+class BacktraceCollector {
+ public:
+ BacktraceCollector(uintptr_t* out_frames, size_t max_depth, size_t skip_count)
+ : out_frames_(out_frames), max_depth_(max_depth), skip_count_(skip_count) {}
+
+ size_t NumFrames() const {
+ return num_frames_;
+ }
+
+ // Collect the backtrace, do not call more than once.
+ void Collect() {
+ _Unwind_Backtrace(&Callback, this);
+ }
+
+ private:
+ static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
+ auto* const state = reinterpret_cast<BacktraceCollector*>(arg);
+ const uintptr_t ip = _Unwind_GetIP(context);
+ // The first stack frame is get_backtrace itself. Skip it.
+ if (ip != 0 && state->skip_count_ > 0) {
+ --state->skip_count_;
+ return _URC_NO_REASON;
+ }
+ // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
+ state->out_frames_[state->num_frames_] = ip;
+ state->num_frames_++;
+ return state->num_frames_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
+ }
+
+ uintptr_t* const out_frames_ = nullptr;
+ size_t num_frames_ = 0u;
+ const size_t max_depth_ = 0u;
+ size_t skip_count_ = 0u;
+};
+
+// A bounded sized backtrace.
+template <size_t kMaxFrames>
+class FixedSizeBacktrace {
+ public:
+ void Collect(size_t skip_count) {
+ BacktraceCollector collector(frames_, kMaxFrames, skip_count);
+ collector.Collect();
+ num_frames_ = collector.NumFrames();
+ }
+
+ uint64_t Hash() const {
+ uint64_t hash = 9314237;
+ for (size_t i = 0; i < num_frames_; ++i) {
+ hash = hash * 2654435761 + frames_[i];
+ hash += (hash >> 13) ^ (hash << 6);
+ }
+ return hash;
+ }
+
+ private:
+ uintptr_t frames_[kMaxFrames];
+ size_t num_frames_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_BACKTRACE_HELPER_H_
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index db433194d3..e763e439b9 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -15,6 +15,7 @@
*/
#include <algorithm>
+#include <cstddef>
#include <iomanip>
#include <numeric>
@@ -27,7 +28,7 @@
namespace art {
-static constexpr size_t kMemoryToolRedZoneBytes = 8;
+constexpr size_t kMemoryToolRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
template <bool kCount>
@@ -165,26 +166,78 @@ void ArenaAllocatorMemoryTool::DoMakeInaccessible(void* ptr, size_t size) {
MEMORY_TOOL_MAKE_NOACCESS(ptr, size);
}
-Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
+Arena::Arena() : bytes_allocated_(0), memory_(nullptr), size_(0), next_(nullptr) {
}
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+ private:
+ static constexpr size_t RequiredOverallocation() {
+ return (alignof(std::max_align_t) < ArenaAllocator::kArenaAlignment)
+ ? ArenaAllocator::kArenaAlignment - alignof(std::max_align_t)
+ : 0u;
+ }
+
+ uint8_t* unaligned_memory_;
+};
+
MallocArena::MallocArena(size_t size) {
- memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
- CHECK(memory_ != nullptr); // Abort on OOM.
- DCHECK_ALIGNED(memory_, ArenaAllocator::kAlignment);
+ // We need to guarantee kArenaAlignment aligned allocation for the new arena.
+ // TODO: Use std::aligned_alloc() when it becomes available with C++17.
+ constexpr size_t overallocation = RequiredOverallocation();
+ unaligned_memory_ = reinterpret_cast<uint8_t*>(calloc(1, size + overallocation));
+ CHECK(unaligned_memory_ != nullptr); // Abort on OOM.
+ DCHECK_ALIGNED(unaligned_memory_, alignof(std::max_align_t));
+ if (overallocation == 0u) {
+ memory_ = unaligned_memory_;
+ } else {
+ memory_ = AlignUp(unaligned_memory_, ArenaAllocator::kArenaAlignment);
+ if (UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ size_t head = memory_ - unaligned_memory_;
+ size_t tail = overallocation - head;
+ MEMORY_TOOL_MAKE_NOACCESS(unaligned_memory_, head);
+ MEMORY_TOOL_MAKE_NOACCESS(memory_ + size, tail);
+ }
+ }
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
size_ = size;
}
MallocArena::~MallocArena() {
- free(reinterpret_cast<void*>(memory_));
+ constexpr size_t overallocation = RequiredOverallocation();
+ if (overallocation != 0u && UNLIKELY(RUNNING_ON_MEMORY_TOOL > 0)) {
+ size_t head = memory_ - unaligned_memory_;
+ size_t tail = overallocation - head;
+ MEMORY_TOOL_MAKE_UNDEFINED(unaligned_memory_, head);
+ MEMORY_TOOL_MAKE_UNDEFINED(memory_ + size_, tail);
+ }
+ free(reinterpret_cast<void*>(unaligned_memory_));
}
+class MemMapArena FINAL : public Arena {
+ public:
+ MemMapArena(size_t size, bool low_4gb, const char* name);
+ virtual ~MemMapArena();
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
MemMapArena::MemMapArena(size_t size, bool low_4gb, const char* name) {
+ // Round up to a full page as that's the smallest unit of allocation for mmap()
+ // and we want to be able to use all memory that we actually allocate.
+ size = RoundUp(size, kPageSize);
std::string error_msg;
map_.reset(MemMap::MapAnonymous(
name, nullptr, size, PROT_READ | PROT_WRITE, low_4gb, false, &error_msg));
CHECK(map_.get() != nullptr) << error_msg;
memory_ = map_->Begin();
+ static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
+ "Arena should not need stronger alignment than kPageSize.");
+ DCHECK_ALIGNED(memory_, ArenaAllocator::kArenaAlignment);
size_ = map_->Size();
}
@@ -332,21 +385,32 @@ void* ArenaAllocator::AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind) {
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
uint8_t* ret;
if (UNLIKELY(rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
- ret = AllocFromNewArena(rounded_bytes);
- uint8_t* noaccess_begin = ret + bytes;
- uint8_t* noaccess_end;
- if (ret == arena_head_->Begin()) {
- DCHECK(ptr_ - rounded_bytes == ret);
- noaccess_end = end_;
- } else {
- // We're still using the old arena but `ret` comes from a new one just after it.
- DCHECK(arena_head_->next_ != nullptr);
- DCHECK(ret == arena_head_->next_->Begin());
- DCHECK_EQ(rounded_bytes, arena_head_->next_->GetBytesAllocated());
- noaccess_end = arena_head_->next_->End();
- }
- MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
+ ret = AllocFromNewArenaWithMemoryTool(rounded_bytes);
+ } else {
+ ret = ptr_;
+ ptr_ += rounded_bytes;
+ }
+ MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
+ // Check that the memory is already zeroed out.
+ DCHECK(std::all_of(ret, ret + bytes, [](uint8_t val) { return val == 0u; }));
+ return ret;
+}
+
+void* ArenaAllocator::AllocWithMemoryToolAlign16(size_t bytes, ArenaAllocKind kind) {
+ // We mark all memory for a newly retrieved arena as inaccessible and then
+ // mark only the actually allocated memory as defined. That leaves red zones
+ // and padding between allocations marked as inaccessible.
+ size_t rounded_bytes = bytes + kMemoryToolRedZoneBytes;
+ DCHECK_ALIGNED(rounded_bytes, 8); // `bytes` is 16-byte aligned, red zone is 8-byte aligned.
+ uintptr_t padding =
+ ((reinterpret_cast<uintptr_t>(ptr_) + 15u) & 15u) - reinterpret_cast<uintptr_t>(ptr_);
+ ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
+ uint8_t* ret;
+ if (UNLIKELY(padding + rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
+ static_assert(kArenaAlignment >= 16, "Expecting sufficient alignment for new Arena.");
+ ret = AllocFromNewArenaWithMemoryTool(rounded_bytes);
} else {
+ ptr_ += padding; // Leave padding inaccessible.
ret = ptr_;
ptr_ += rounded_bytes;
}
@@ -386,6 +450,24 @@ uint8_t* ArenaAllocator::AllocFromNewArena(size_t bytes) {
return new_arena->Begin();
}
+uint8_t* ArenaAllocator::AllocFromNewArenaWithMemoryTool(size_t bytes) {
+ uint8_t* ret = AllocFromNewArena(bytes);
+ uint8_t* noaccess_begin = ret + bytes;
+ uint8_t* noaccess_end;
+ if (ret == arena_head_->Begin()) {
+ DCHECK(ptr_ - bytes == ret);
+ noaccess_end = end_;
+ } else {
+ // We're still using the old arena but `ret` comes from a new one just after it.
+ DCHECK(arena_head_->next_ != nullptr);
+ DCHECK(ret == arena_head_->next_->Begin());
+ DCHECK_EQ(bytes, arena_head_->next_->GetBytesAllocated());
+ noaccess_end = arena_head_->next_->End();
+ }
+ MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, noaccess_end - noaccess_begin);
+ return ret;
+}
+
bool ArenaAllocator::Contains(const void* ptr) const {
if (ptr >= begin_ && ptr < end_) {
return true;
@@ -398,7 +480,9 @@ bool ArenaAllocator::Contains(const void* ptr) const {
return false;
}
-MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
+MemStats::MemStats(const char* name,
+ const ArenaAllocatorStats* stats,
+ const Arena* first_arena,
ssize_t lost_bytes_adjustment)
: name_(name),
stats_(stats),
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 245ab3b24f..c39429ce06 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -34,7 +34,6 @@ class ArenaPool;
class ArenaAllocator;
class ArenaStack;
class ScopedArenaAllocator;
-class MemMap;
class MemStats;
template <typename T>
@@ -89,6 +88,7 @@ enum ArenaAllocKind {
kArenaAllocRegisterAllocator,
kArenaAllocRegisterAllocatorValidate,
kArenaAllocStackMapStream,
+ kArenaAllocVectorNode,
kArenaAllocCodeGenerator,
kArenaAllocAssembler,
kArenaAllocParallelMoveResolver,
@@ -243,22 +243,6 @@ class Arena {
DISALLOW_COPY_AND_ASSIGN(Arena);
};
-class MallocArena FINAL : public Arena {
- public:
- explicit MallocArena(size_t size = Arena::kDefaultSize);
- virtual ~MallocArena();
-};
-
-class MemMapArena FINAL : public Arena {
- public:
- MemMapArena(size_t size, bool low_4gb, const char* name);
- virtual ~MemMapArena();
- void Release() OVERRIDE;
-
- private:
- std::unique_ptr<MemMap> map_;
-};
-
class ArenaPool {
public:
explicit ArenaPool(bool use_malloc = true,
@@ -318,8 +302,31 @@ class ArenaAllocator
return ret;
}
+ // Returns zeroed memory.
+ void* AllocAlign16(size_t bytes, ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
+ // It is an error to request 16-byte aligned allocation of unaligned size.
+ DCHECK_ALIGNED(bytes, 16);
+ if (UNLIKELY(IsRunningOnMemoryTool())) {
+ return AllocWithMemoryToolAlign16(bytes, kind);
+ }
+ uintptr_t padding =
+ ((reinterpret_cast<uintptr_t>(ptr_) + 15u) & 15u) - reinterpret_cast<uintptr_t>(ptr_);
+ ArenaAllocatorStats::RecordAlloc(bytes, kind);
+ if (UNLIKELY(padding + bytes > static_cast<size_t>(end_ - ptr_))) {
+ static_assert(kArenaAlignment >= 16, "Expecting sufficient alignment for new Arena.");
+ return AllocFromNewArena(bytes);
+ }
+ ptr_ += padding;
+ uint8_t* ret = ptr_;
+ DCHECK_ALIGNED(ret, 16);
+ ptr_ += bytes;
+ return ret;
+ }
+
// Realloc never frees the input pointer, it is the caller's job to do this if necessary.
- void* Realloc(void* ptr, size_t ptr_size, size_t new_size,
+ void* Realloc(void* ptr,
+ size_t ptr_size,
+ size_t new_size,
ArenaAllocKind kind = kArenaAllocMisc) ALWAYS_INLINE {
DCHECK_GE(new_size, ptr_size);
DCHECK_EQ(ptr == nullptr, ptr_size == 0u);
@@ -370,12 +377,17 @@ class ArenaAllocator
bool Contains(const void* ptr) const;
- static constexpr size_t kAlignment = 8;
+ // The alignment guaranteed for individual allocations.
+ static constexpr size_t kAlignment = 8u;
+
+ // The alignment required for the whole Arena rather than individual allocations.
+ static constexpr size_t kArenaAlignment = 16u;
private:
void* AllocWithMemoryTool(size_t bytes, ArenaAllocKind kind);
+ void* AllocWithMemoryToolAlign16(size_t bytes, ArenaAllocKind kind);
uint8_t* AllocFromNewArena(size_t bytes);
-
+ uint8_t* AllocFromNewArenaWithMemoryTool(size_t bytes);
void UpdateBytesAllocated();
@@ -395,7 +407,9 @@ class ArenaAllocator
class MemStats {
public:
- MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
+ MemStats(const char* name,
+ const ArenaAllocatorStats* stats,
+ const Arena* first_arena,
ssize_t lost_bytes_adjustment = 0);
void Dump(std::ostream& os) const;
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 4041f5e1ed..f536c72bae 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -27,6 +27,22 @@
namespace art {
+// Like sizeof, but count how many bits a type takes. Pass type explicitly.
+template <typename T>
+constexpr size_t BitSizeOf() {
+ static_assert(std::is_integral<T>::value, "T must be integral");
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!");
+ static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!");
+ return std::numeric_limits<unsigned_type>::digits;
+}
+
+// Like sizeof, but count how many bits a type takes. Infers type from parameter.
+template <typename T>
+constexpr size_t BitSizeOf(T /*x*/) {
+ return BitSizeOf<T>();
+}
+
template<typename T>
constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
@@ -37,6 +53,14 @@ constexpr int CLZ(T x) {
return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x);
}
+// Similar to CLZ except that on zero input it returns bitwidth and supports signed integers.
+template<typename T>
+constexpr int JAVASTYLE_CLZ(T x) {
+ static_assert(std::is_integral<T>::value, "T must be integral");
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ return (x == 0) ? BitSizeOf<T>() : CLZ(static_cast<unsigned_type>(x));
+}
+
template<typename T>
constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
@@ -48,12 +72,32 @@ constexpr int CTZ(T x) {
return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x);
}
+// Similar to CTZ except that on zero input it returns bitwidth and supports signed integers.
+template<typename T>
+constexpr int JAVASTYLE_CTZ(T x) {
+ static_assert(std::is_integral<T>::value, "T must be integral");
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ return (x == 0) ? BitSizeOf<T>() : CTZ(static_cast<unsigned_type>(x));
+}
+
// Return the number of 1-bits in `x`.
template<typename T>
constexpr int POPCOUNT(T x) {
return (sizeof(T) == sizeof(uint32_t)) ? __builtin_popcount(x) : __builtin_popcountll(x);
}
+// Swap bytes.
+template<typename T>
+constexpr T BSWAP(T x) {
+ if (sizeof(T) == sizeof(uint16_t)) {
+ return __builtin_bswap16(x);
+ } else if (sizeof(T) == sizeof(uint32_t)) {
+ return __builtin_bswap32(x);
+ } else {
+ return __builtin_bswap64(x);
+ }
+}
+
// Find the bit position of the most significant bit (0-based), or -1 if there were no bits set.
template <typename T>
constexpr ssize_t MostSignificantBit(T value) {
@@ -169,22 +213,6 @@ inline bool IsAlignedParam(T* x, int n) {
#define DCHECK_ALIGNED_PARAM(value, alignment) \
DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value)
-// Like sizeof, but count how many bits a type takes. Pass type explicitly.
-template <typename T>
-constexpr size_t BitSizeOf() {
- static_assert(std::is_integral<T>::value, "T must be integral");
- using unsigned_type = typename std::make_unsigned<T>::type;
- static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!");
- static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!");
- return std::numeric_limits<unsigned_type>::digits;
-}
-
-// Like sizeof, but count how many bits a type takes. Infers type from parameter.
-template <typename T>
-constexpr size_t BitSizeOf(T /*x*/) {
- return BitSizeOf<T>();
-}
-
inline uint16_t Low16Bits(uint32_t value) {
return static_cast<uint16_t>(value);
}
@@ -363,6 +391,59 @@ IterationRange<HighToLowBitIterator<T>> HighToLowBits(T bits) {
HighToLowBitIterator<T>(bits), HighToLowBitIterator<T>());
}
+// Returns value with bit set in lowest one-bit position or 0 if 0. (java.lang.X.lowestOneBit).
+template <typename kind>
+inline static kind LowestOneBitValue(kind opnd) {
+ // Hacker's Delight, Section 2-1
+ return opnd & -opnd;
+}
+
+// Returns value with bit set in hightest one-bit position or 0 if 0. (java.lang.X.highestOneBit).
+template <typename T>
+inline static T HighestOneBitValue(T opnd) {
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ T res;
+ if (opnd == 0) {
+ res = 0;
+ } else {
+ int bit_position = BitSizeOf<T>() - (CLZ(static_cast<unsigned_type>(opnd)) + 1);
+ res = static_cast<T>(UINT64_C(1) << bit_position);
+ }
+ return res;
+}
+
+// Rotate bits.
+template <typename T, bool left>
+inline static T Rot(T opnd, int distance) {
+ int mask = BitSizeOf<T>() - 1;
+ int unsigned_right_shift = left ? (-distance & mask) : (distance & mask);
+ int signed_left_shift = left ? (distance & mask) : (-distance & mask);
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ return (static_cast<unsigned_type>(opnd) >> unsigned_right_shift) | (opnd << signed_left_shift);
+}
+
+// TUNING: use rbit for arm/arm64
+inline static uint32_t ReverseBits32(uint32_t opnd) {
+ // Hacker's Delight 7-1
+ opnd = ((opnd >> 1) & 0x55555555) | ((opnd & 0x55555555) << 1);
+ opnd = ((opnd >> 2) & 0x33333333) | ((opnd & 0x33333333) << 2);
+ opnd = ((opnd >> 4) & 0x0F0F0F0F) | ((opnd & 0x0F0F0F0F) << 4);
+ opnd = ((opnd >> 8) & 0x00FF00FF) | ((opnd & 0x00FF00FF) << 8);
+ opnd = ((opnd >> 16)) | ((opnd) << 16);
+ return opnd;
+}
+
+// TUNING: use rbit for arm/arm64
+inline static uint64_t ReverseBits64(uint64_t opnd) {
+ // Hacker's Delight 7-1
+ opnd = (opnd & 0x5555555555555555L) << 1 | ((opnd >> 1) & 0x5555555555555555L);
+ opnd = (opnd & 0x3333333333333333L) << 2 | ((opnd >> 2) & 0x3333333333333333L);
+ opnd = (opnd & 0x0f0f0f0f0f0f0f0fL) << 4 | ((opnd >> 4) & 0x0f0f0f0f0f0f0f0fL);
+ opnd = (opnd & 0x00ff00ff00ff00ffL) << 8 | ((opnd >> 8) & 0x00ff00ff00ff00ffL);
+ opnd = (opnd << 48) | ((opnd & 0xffff0000L) << 16) | ((opnd >> 16) & 0xffff0000L) | (opnd >> 48);
+ return opnd;
+}
+
} // namespace art
#endif // ART_RUNTIME_BASE_BIT_UTILS_H_
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index ca9a694144..b28eb729d8 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -48,7 +48,8 @@ template <class Value> inline Histogram<Value>::Histogram(const char* name)
: kAdjust(0),
kInitialBucketCount(0),
name_(name),
- max_buckets_(0) {
+ max_buckets_(0),
+ sample_size_(0) {
}
template <class Value>
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 24846e5ceb..b0394a5255 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -46,7 +46,6 @@ Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
Mutex* Locks::intern_table_lock_ = nullptr;
-Mutex* Locks::jdwp_event_list_lock_ = nullptr;
Mutex* Locks::jni_function_table_lock_ = nullptr;
Mutex* Locks::jni_libraries_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
@@ -74,6 +73,7 @@ ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
+Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
struct AllMutexData {
// A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait).
@@ -118,6 +118,26 @@ class ScopedAllMutexesLock FINAL {
const BaseMutex* const mutex_;
};
+class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL {
+ public:
+ explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
+ while (!Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakAcquire(0,
+ mutex)) {
+ NanoSleep(100);
+ }
+ }
+
+ ~ScopedExpectedMutexesOnWeakRefAccessLock() {
+ while (!Locks::expected_mutexes_on_weak_ref_access_guard_.CompareExchangeWeakRelease(mutex_,
+ 0)) {
+ NanoSleep(100);
+ }
+ }
+
+ private:
+ const BaseMutex* const mutex_;
+};
+
// Scoped class that generates events at the beginning and end of lock contention.
class ScopedContentionRecorder FINAL : public ValueObject {
public:
@@ -999,7 +1019,6 @@ void Locks::Init() {
DCHECK(verifier_deps_lock_ != nullptr);
DCHECK(host_dlopen_handles_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
- DCHECK(jdwp_event_list_lock_ != nullptr);
DCHECK(jni_function_table_lock_ != nullptr);
DCHECK(jni_libraries_lock_ != nullptr);
DCHECK(logging_lock_ != nullptr);
@@ -1042,10 +1061,6 @@ void Locks::Init() {
DCHECK(runtime_shutdown_lock_ == nullptr);
runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kJdwpEventListLock);
- DCHECK(jdwp_event_list_lock_ == nullptr);
- jdwp_event_list_lock_ = new Mutex("JDWP event list lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
DCHECK(profiler_lock_ == nullptr);
profiler_lock_ = new Mutex("profiler lock", current_lock_level);
@@ -1169,14 +1184,9 @@ void Locks::Init() {
#undef UPDATE_CURRENT_LOCK_LEVEL
// List of mutexes that we may hold when accessing a weak ref.
- dex_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(dex_lock_);
- classlinker_classes_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(classlinker_classes_lock_);
- jdwp_event_list_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(jdwp_event_list_lock_);
- jni_libraries_lock_->SetShouldRespondToEmptyCheckpointRequest(true);
- expected_mutexes_on_weak_ref_access_.push_back(jni_libraries_lock_);
+ AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock*/ false);
+ AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock*/ false);
InitConditions();
}
@@ -1196,4 +1206,38 @@ bool Locks::IsSafeToCallAbortRacy() {
return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
}
+void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+ if (need_lock) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(mutex);
+ } else {
+ mutex->SetShouldRespondToEmptyCheckpointRequest(true);
+ expected_mutexes_on_weak_ref_access_.push_back(mutex);
+ }
+}
+
+void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
+ if (need_lock) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ auto it = std::find(list.begin(), list.end(), mutex);
+ DCHECK(it != list.end());
+ list.erase(it);
+ } else {
+ mutex->SetShouldRespondToEmptyCheckpointRequest(false);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ auto it = std::find(list.begin(), list.end(), mutex);
+ DCHECK(it != list.end());
+ list.erase(it);
+ }
+}
+
+bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
+ ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
+ std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
+ return std::find(list.begin(), list.end(), mutex) != list.end();
+}
+
} // namespace art
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index c59664b9cd..2414b5f937 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -62,10 +62,11 @@ enum LockLevel {
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
+ kMarkSweepMarkStackLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
- kMarkSweepMarkStackLock,
+ kTaggingLockLevel,
kTransactionLogLock,
kJniFunctionTableLock,
kJniWeakGlobalsLock,
@@ -516,12 +517,12 @@ class SCOPED_CAPABILITY MutexLock {
// construction and releases it upon destruction.
class SCOPED_CAPABILITY ReaderMutexLock {
public:
- ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) :
+ ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) ACQUIRE(mu) ALWAYS_INLINE :
self_(self), mu_(mu) {
mu_.SharedLock(self_);
}
- ~ReaderMutexLock() RELEASE() {
+ ~ReaderMutexLock() RELEASE() ALWAYS_INLINE {
mu_.SharedUnlock(self_);
}
@@ -583,6 +584,12 @@ class Locks {
// Checks for whether it is safe to call Abort() without using locks.
static bool IsSafeToCallAbortRacy() NO_THREAD_SAFETY_ANALYSIS;
+ // Add a mutex to expected_mutexes_on_weak_ref_access_.
+ static void AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+ // Remove a mutex from expected_mutexes_on_weak_ref_access_.
+ static void RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock = true);
+ // Check if the given mutex is in expected_mutexes_on_weak_ref_access_.
+ static bool IsExpectedOnWeakRefAccess(BaseMutex* mutex);
// Guards allocation entrypoint instrumenting.
static Mutex* instrument_entrypoints_lock_;
@@ -630,12 +637,8 @@ class Locks {
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
- static Mutex* jdwp_event_list_lock_
- ACQUIRED_AFTER(runtime_shutdown_lock_)
- ACQUIRED_BEFORE(breakpoint_lock_);
-
// Guards background profiler global state.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(jdwp_event_list_lock_);
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
// Guards trace (ie traceview) requests.
static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
@@ -738,6 +741,8 @@ class Locks {
// encounter an unexpected mutex on accessing weak refs,
// Thread::CheckEmptyCheckpointFromWeakRefAccess will detect it.
static std::vector<BaseMutex*> expected_mutexes_on_weak_ref_access_;
+ static Atomic<const BaseMutex*> expected_mutexes_on_weak_ref_access_guard_;
+ class ScopedExpectedMutexesOnWeakRefAccessLock;
};
class Roles {
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 55044b34e5..1a0eb5ea07 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -39,8 +39,6 @@ enum class ArenaFreeTag : uint8_t {
kFree,
};
-static constexpr size_t kArenaAlignment = 8;
-
// Holds a list of Arenas for use by ScopedArenaAllocator stack.
// The memory is returned to the ArenaPool when the ArenaStack is destroyed.
class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
@@ -67,6 +65,9 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
return *(reinterpret_cast<ArenaFreeTag*>(ptr) - 1);
}
+ // The alignment guaranteed for individual allocations.
+ static constexpr size_t kAlignment = 8u;
+
private:
struct Peak;
struct Current;
@@ -89,8 +90,8 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
if (UNLIKELY(IsRunningOnMemoryTool())) {
return AllocWithMemoryTool(bytes, kind);
}
- // Add kArenaAlignment for the free or used tag. Required to preserve alignment.
- size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kArenaAlignment : 0u), kArenaAlignment);
+ // Add kAlignment for the free or used tag. Required to preserve alignment.
+ size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kAlignment : 0u), kAlignment);
uint8_t* ptr = top_ptr_;
if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
ptr = AllocateFromNextArena(rounded_bytes);
@@ -98,7 +99,7 @@ class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryToo
CurrentStats()->RecordAlloc(bytes, kind);
top_ptr_ = ptr + rounded_bytes;
if (kIsDebugBuild) {
- ptr += kArenaAlignment;
+ ptr += kAlignment;
ArenaTagForAllocation(ptr) = ArenaFreeTag::kUsed;
}
return ptr;
diff --git a/runtime/base/scoped_flock.cc b/runtime/base/scoped_flock.cc
index d4bb56b62a..5394e53fa3 100644
--- a/runtime/base/scoped_flock.cc
+++ b/runtime/base/scoped_flock.cc
@@ -116,7 +116,10 @@ ScopedFlock::ScopedFlock() { }
ScopedFlock::~ScopedFlock() {
if (file_.get() != nullptr) {
int flock_result = TEMP_FAILURE_RETRY(flock(file_->Fd(), LOCK_UN));
- CHECK_EQ(0, flock_result);
+ if (flock_result != 0) {
+ PLOG(FATAL) << "Unable to unlock file " << file_->GetPath();
+ UNREACHABLE();
+ }
int close_result = -1;
if (file_->ReadOnlyMode()) {
close_result = file_->Close();
diff --git a/runtime/base/unix_file/fd_file.cc b/runtime/base/unix_file/fd_file.cc
index ff2dd1b399..03fc959f6b 100644
--- a/runtime/base/unix_file/fd_file.cc
+++ b/runtime/base/unix_file/fd_file.cc
@@ -73,7 +73,7 @@ void FdFile::Destroy() {
}
if (auto_close_ && fd_ != -1) {
if (Close() != 0) {
- PLOG(WARNING) << "Failed to close file " << file_path_;
+ PLOG(WARNING) << "Failed to close file with fd=" << fd_ << " path=" << file_path_;
}
}
}
diff --git a/runtime/bytecode_utils.h b/runtime/bytecode_utils.h
new file mode 100644
index 0000000000..fa87b1d6da
--- /dev/null
+++ b/runtime/bytecode_utils.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BYTECODE_UTILS_H_
+#define ART_RUNTIME_BYTECODE_UTILS_H_
+
+#include "base/arena_object.h"
+#include "dex_file.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+
+namespace art {
+
+class CodeItemIterator : public ValueObject {
+ public:
+ explicit CodeItemIterator(const DexFile::CodeItem& code_item) : CodeItemIterator(code_item, 0u) {}
+ CodeItemIterator(const DexFile::CodeItem& code_item, uint32_t start_dex_pc)
+ : code_ptr_(code_item.insns_ + start_dex_pc),
+ code_end_(code_item.insns_ + code_item.insns_size_in_code_units_),
+ dex_pc_(start_dex_pc) {}
+
+ bool Done() const { return code_ptr_ >= code_end_; }
+ bool IsLast() const { return code_ptr_ + CurrentInstruction().SizeInCodeUnits() >= code_end_; }
+
+ const Instruction& CurrentInstruction() const { return *Instruction::At(code_ptr_); }
+ uint32_t CurrentDexPc() const { return dex_pc_; }
+
+ void Advance() {
+ DCHECK(!Done());
+ size_t instruction_size = CurrentInstruction().SizeInCodeUnits();
+ code_ptr_ += instruction_size;
+ dex_pc_ += instruction_size;
+ }
+
+ private:
+ const uint16_t* code_ptr_;
+ const uint16_t* const code_end_;
+ uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeItemIterator);
+};
+
+class DexSwitchTable : public ValueObject {
+ public:
+ DexSwitchTable(const Instruction& instruction, uint32_t dex_pc)
+ : instruction_(instruction),
+ dex_pc_(dex_pc),
+ sparse_(instruction.Opcode() == Instruction::SPARSE_SWITCH) {
+ int32_t table_offset = instruction.VRegB_31t();
+ const uint16_t* table = reinterpret_cast<const uint16_t*>(&instruction) + table_offset;
+ DCHECK_EQ(table[0], sparse_ ? static_cast<uint16_t>(Instruction::kSparseSwitchSignature)
+ : static_cast<uint16_t>(Instruction::kPackedSwitchSignature));
+ num_entries_ = table[1];
+ values_ = reinterpret_cast<const int32_t*>(&table[2]);
+ }
+
+ uint16_t GetNumEntries() const {
+ return num_entries_;
+ }
+
+ void CheckIndex(size_t index) const {
+ if (sparse_) {
+ // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+ DCHECK_LT(index, 2 * static_cast<size_t>(num_entries_));
+ } else {
+ // In a packed table, we have the starting key and num_entries_ values.
+ DCHECK_LT(index, 1 + static_cast<size_t>(num_entries_));
+ }
+ }
+
+ int32_t GetEntryAt(size_t index) const {
+ CheckIndex(index);
+ return values_[index];
+ }
+
+ uint32_t GetDexPcForIndex(size_t index) const {
+ CheckIndex(index);
+ return dex_pc_ +
+ (reinterpret_cast<const int16_t*>(values_ + index) -
+ reinterpret_cast<const int16_t*>(&instruction_));
+ }
+
+ // Index of the first value in the table.
+ size_t GetFirstValueIndex() const {
+ if (sparse_) {
+ // In a sparse table, we have num_entries_ keys and num_entries_ values, in that order.
+ return num_entries_;
+ } else {
+ // In a packed table, we have the starting key and num_entries_ values.
+ return 1;
+ }
+ }
+
+ bool IsSparse() const { return sparse_; }
+
+ bool ShouldBuildDecisionTree() {
+ return IsSparse() || GetNumEntries() <= kSmallSwitchThreshold;
+ }
+
+ private:
+ const Instruction& instruction_;
+ const uint32_t dex_pc_;
+
+ // Whether this is a sparse-switch table (or a packed-switch one).
+ const bool sparse_;
+
+ // This can't be const as it needs to be computed off of the given instruction, and complicated
+ // expressions in the initializer list seemed very ugly.
+ uint16_t num_entries_;
+
+ const int32_t* values_;
+
+ // The number of entries in a packed switch before we use a jump table or specified
+ // compare/jump series.
+ static constexpr uint16_t kSmallSwitchThreshold = 3;
+
+ DISALLOW_COPY_AND_ASSIGN(DexSwitchTable);
+};
+
+class DexSwitchTableIterator {
+ public:
+ explicit DexSwitchTableIterator(const DexSwitchTable& table)
+ : table_(table),
+ num_entries_(static_cast<size_t>(table_.GetNumEntries())),
+ first_target_offset_(table_.GetFirstValueIndex()),
+ index_(0u) {}
+
+ bool Done() const { return index_ >= num_entries_; }
+ bool IsLast() const { return index_ == num_entries_ - 1; }
+
+ void Advance() {
+ DCHECK(!Done());
+ index_++;
+ }
+
+ int32_t CurrentKey() const {
+ return table_.IsSparse() ? table_.GetEntryAt(index_) : table_.GetEntryAt(0) + index_;
+ }
+
+ int32_t CurrentTargetOffset() const {
+ return table_.GetEntryAt(index_ + first_target_offset_);
+ }
+
+ uint32_t GetDexPcForCurrentIndex() const { return table_.GetDexPcForIndex(index_); }
+
+ private:
+ const DexSwitchTable& table_;
+ const size_t num_entries_;
+ const size_t first_target_offset_;
+
+ size_t index_;
+};
+
+inline const Instruction& GetDexInstructionAt(const DexFile::CodeItem& code_item, uint32_t dex_pc) {
+ return CodeItemIterator(code_item, dex_pc).CurrentInstruction();
+}
+
+inline bool IsThrowingDexInstruction(const Instruction& instruction) {
+ // Special-case MONITOR_EXIT which is a throwing instruction but the verifier
+ // guarantees that it will never throw. This is necessary to avoid rejecting
+ // 'synchronized' blocks/methods.
+ return instruction.IsThrow() && instruction.Opcode() != Instruction::MONITOR_EXIT;
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BYTECODE_UTILS_H_
diff --git a/runtime/cha.cc b/runtime/cha.cc
index eaba01b2ce..7948c29e5d 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -210,7 +210,7 @@ void ClassHierarchyAnalysis::VerifyNonSingleImplementation(mirror::Class* verify
}
}
-void ClassHierarchyAnalysis::CheckSingleImplementationInfo(
+void ClassHierarchyAnalysis::CheckVirtualMethodSingleImplementationInfo(
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
@@ -290,8 +290,9 @@ void ClassHierarchyAnalysis::CheckSingleImplementationInfo(
// A non-abstract method overrides an abstract method.
if (method_in_super->GetSingleImplementation(pointer_size) == nullptr) {
// Abstract method_in_super has no implementation yet.
- // We need to grab cha_lock_ for further checking/updating due to possible
- // races.
+ // We need to grab cha_lock_ since there may be multiple class linking
+ // going on that can check/modify the single-implementation flag/method
+ // of method_in_super.
MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
if (!method_in_super->HasSingleImplementation()) {
return;
@@ -362,6 +363,55 @@ void ClassHierarchyAnalysis::CheckSingleImplementationInfo(
}
}
+void ClassHierarchyAnalysis::CheckInterfaceMethodSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* interface_method,
+ ArtMethod* implementation_method,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size) {
+ DCHECK(klass->IsInstantiable());
+ DCHECK(interface_method->IsAbstract() || interface_method->IsDefault());
+
+ if (!interface_method->HasSingleImplementation()) {
+ return;
+ }
+
+ if (implementation_method->IsAbstract()) {
+ // An instantiable class doesn't supply an implementation for
+ // interface_method. Invoking the interface method on the class will throw
+ // AbstractMethodError. This is an uncommon case, so we simply treat
+ // interface_method as not having single-implementation.
+ invalidated_single_impl_methods.insert(interface_method);
+ return;
+ }
+
+ // We need to grab cha_lock_ since there may be multiple class linking going
+ // on that can check/modify the single-implementation flag/method of
+ // interface_method.
+ MutexLock cha_mu(Thread::Current(), *Locks::cha_lock_);
+ // Do this check again after we grab cha_lock_.
+ if (!interface_method->HasSingleImplementation()) {
+ return;
+ }
+
+ ArtMethod* single_impl = interface_method->GetSingleImplementation(pointer_size);
+ if (single_impl == nullptr) {
+ // implementation_method becomes the first implementation for
+ // interface_method.
+ interface_method->SetSingleImplementation(implementation_method, pointer_size);
+ // Keep interface_method's single-implementation status.
+ return;
+ }
+ DCHECK(!single_impl->IsAbstract());
+ if (single_impl->GetDeclaringClass() == implementation_method->GetDeclaringClass()) {
+ // Same implementation. Since implementation_method may be a copy of a default
+ // method, we need to check the declaring class for equality.
+ return;
+ }
+ // Another implementation for interface_method.
+ invalidated_single_impl_methods.insert(interface_method);
+}
+
void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class> klass,
ArtMethod* method,
PointerSize pointer_size) {
@@ -382,6 +432,7 @@ void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class>
// Rare case, but we do accept it (such as 800-smali/smali/b_26143249.smali).
// Do not attempt to devirtualize it.
method->SetHasSingleImplementation(false);
+ DCHECK(method->GetSingleImplementation(pointer_size) == nullptr);
} else {
// Abstract method starts with single-implementation flag set and null
// implementation method.
@@ -396,9 +447,15 @@ void ClassHierarchyAnalysis::InitSingleImplementationFlag(Handle<mirror::Class>
}
void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
+ PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
if (klass->IsInterface()) {
+ for (ArtMethod& method : klass->GetDeclaredVirtualMethods(image_pointer_size)) {
+ DCHECK(method.IsAbstract() || method.IsDefault());
+ InitSingleImplementationFlag(klass, &method, image_pointer_size);
+ }
return;
}
+
mirror::Class* super_class = klass->GetSuperClass();
if (super_class == nullptr) {
return;
@@ -408,7 +465,6 @@ void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
// is invalidated by linking `klass`.
std::unordered_set<ArtMethod*> invalidated_single_impl_methods;
- PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
// Do an entry-by-entry comparison of vtable contents with super's vtable.
for (int32_t i = 0; i < super_class->GetVTableLength(); ++i) {
ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
@@ -418,33 +474,59 @@ void ClassHierarchyAnalysis::UpdateAfterLoadingOf(Handle<mirror::Class> klass) {
if (method->IsAbstract() && klass->IsInstantiable()) {
// An instantiable class that inherits an abstract method is treated as
// supplying an implementation that throws AbstractMethodError.
- CheckSingleImplementationInfo(klass,
- method,
- method_in_super,
- invalidated_single_impl_methods,
- image_pointer_size);
+ CheckVirtualMethodSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods,
+ image_pointer_size);
}
continue;
}
InitSingleImplementationFlag(klass, method, image_pointer_size);
- CheckSingleImplementationInfo(klass,
- method,
- method_in_super,
- invalidated_single_impl_methods,
- image_pointer_size);
+ CheckVirtualMethodSingleImplementationInfo(klass,
+ method,
+ method_in_super,
+ invalidated_single_impl_methods,
+ image_pointer_size);
}
-
// For new virtual methods that don't override.
for (int32_t i = super_class->GetVTableLength(); i < klass->GetVTableLength(); ++i) {
ArtMethod* method = klass->GetVTableEntry(i, image_pointer_size);
InitSingleImplementationFlag(klass, method, image_pointer_size);
}
- Runtime* const runtime = Runtime::Current();
+ if (klass->IsInstantiable()) {
+ auto* iftable = klass->GetIfTable();
+ const size_t ifcount = klass->GetIfTableCount();
+ for (size_t i = 0; i < ifcount; ++i) {
+ mirror::Class* interface = iftable->GetInterface(i);
+ for (size_t j = 0, count = iftable->GetMethodArrayCount(i); j < count; ++j) {
+ ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size);
+ mirror::PointerArray* method_array = iftable->GetMethodArray(i);
+ ArtMethod* implementation_method =
+ method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size);
+ DCHECK(implementation_method != nullptr) << klass->PrettyClass();
+ CheckInterfaceMethodSingleImplementationInfo(klass,
+ interface_method,
+ implementation_method,
+ invalidated_single_impl_methods,
+ image_pointer_size);
+ }
+ }
+ }
+
+ InvalidateSingleImplementationMethods(invalidated_single_impl_methods);
+}
+
+void ClassHierarchyAnalysis::InvalidateSingleImplementationMethods(
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods) {
if (!invalidated_single_impl_methods.empty()) {
+ Runtime* const runtime = Runtime::Current();
Thread *self = Thread::Current();
// Method headers for compiled code to be invalidated.
std::unordered_set<OatQuickMethodHeader*> dependent_method_headers;
+ PointerSize image_pointer_size =
+ Runtime::Current()->GetClassLinker()->GetImagePointerSize();
{
// We do this under cha_lock_. Committing code also grabs this lock to
diff --git a/runtime/cha.h b/runtime/cha.h
index a56a752d8c..99c49d2bca 100644
--- a/runtime/cha.h
+++ b/runtime/cha.h
@@ -117,11 +117,13 @@ class ClassHierarchyAnalysis {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Check/update single-implementation info when one virtual method
+ // overrides another.
// `virtual_method` in `klass` overrides `method_in_super`.
- // This will invalidate some assumptions on single-implementation.
+ // This may invalidate some assumptions on single-implementation.
// Append methods that should have their single-implementation flag invalidated
// to `invalidated_single_impl_methods`.
- void CheckSingleImplementationInfo(
+ void CheckVirtualMethodSingleImplementationInfo(
Handle<mirror::Class> klass,
ArtMethod* virtual_method,
ArtMethod* method_in_super,
@@ -129,6 +131,23 @@ class ClassHierarchyAnalysis {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Check/update single-implementation info when one method
+ // implements an interface method.
+ // `implementation_method` in `klass` implements `interface_method`.
+ // Append `interface_method` to `invalidated_single_impl_methods`
+ // if `interface_method` gets a new implementation.
+ void CheckInterfaceMethodSingleImplementationInfo(
+ Handle<mirror::Class> klass,
+ ArtMethod* interface_method,
+ ArtMethod* implementation_method,
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods,
+ PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void InvalidateSingleImplementationMethods(
+ std::unordered_set<ArtMethod*>& invalidated_single_impl_methods)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// For all methods in vtable slot at `verify_index` of `verify_class` and its
// superclasses, single-implementation status should be false, except if the
// method is `excluded_method`.
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index bd510ca0e1..9ddc6cf0ae 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -161,9 +161,15 @@ inline ArtMethod* ClassLinker::ResolveMethod(Thread* self,
return resolved_method;
}
-inline ArtField* ClassLinker::GetResolvedField(uint32_t field_idx,
- ObjPtr<mirror::DexCache> dex_cache) {
- return dex_cache->GetResolvedField(field_idx, image_pointer_size_);
+inline ArtField* ClassLinker::LookupResolvedField(uint32_t field_idx,
+ ArtMethod* referrer,
+ bool is_static) {
+ ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
+ ArtField* field = dex_cache->GetResolvedField(field_idx, image_pointer_size_);
+ if (field == nullptr) {
+ field = LookupResolvedField(field_idx, dex_cache, referrer->GetClassLoader(), is_static);
+ }
+ return field;
}
inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
@@ -171,7 +177,8 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
bool is_static) {
Thread::PoisonObjectPointersIfDebug();
ObjPtr<mirror::Class> declaring_class = referrer->GetDeclaringClass();
- ArtField* resolved_field = GetResolvedField(field_idx, referrer->GetDexCache());
+ ArtField* resolved_field =
+ referrer->GetDexCache()->GetResolvedField(field_idx, image_pointer_size_);
if (UNLIKELY(resolved_field == nullptr)) {
StackHandleScope<2> hs(Thread::Current());
Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eaa35fe12d..b8ff2c22f3 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -906,7 +906,6 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) {
runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
DCHECK(!oat_files.empty());
const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
- CHECK_EQ(default_oat_header.GetImageFileLocationOatChecksum(), 0U);
CHECK_EQ(default_oat_header.GetImageFileLocationOatDataBegin(), 0U);
const char* image_file_location = oat_files[0]->GetOatHeader().
GetStoreValueByKey(OatHeader::kImageLocationKey);
@@ -1025,7 +1024,8 @@ bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
class_loader->GetClass();
}
-static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
+static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
+ ObjPtr<mirror::String>* out_name)
REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
@@ -1037,17 +1037,20 @@ static mirror::String* GetDexPathListElementName(ObjPtr<mirror::Object> element)
CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf();
ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
if (dex_file == nullptr) {
- return nullptr;
+ // Null dex file means it was probably a jar with no dex files, return a null string.
+ *out_name = nullptr;
+ return true;
}
ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
if (name_object != nullptr) {
- return name_object->AsString();
+ *out_name = name_object->AsString();
+ return true;
}
- return nullptr;
+ return false;
}
static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
- std::list<mirror::String*>* out_dex_file_names,
+ std::list<ObjPtr<mirror::String>>* out_dex_file_names,
std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
@@ -1083,12 +1086,14 @@ static bool FlattenPathClassLoader(ObjPtr<mirror::ClassLoader> class_loader,
*error_msg = StringPrintf("Null dex element at index %d", i);
return false;
}
- ObjPtr<mirror::String> const name = GetDexPathListElementName(element);
- if (name == nullptr) {
- *error_msg = StringPrintf("Null name for dex element at index %d", i);
+ ObjPtr<mirror::String> name;
+ if (!GetDexPathListElementName(element, &name)) {
+ *error_msg = StringPrintf("Invalid dex path list element at index %d", i);
return false;
}
- out_dex_file_names->push_front(name.Ptr());
+ if (name != nullptr) {
+ out_dex_file_names->push_front(name.Ptr());
+ }
}
}
}
@@ -1157,9 +1162,7 @@ class VerifyDirectInterfacesInTableClassVisitor {
for (ObjPtr<mirror::Class> klass : classes_) {
for (uint32_t i = 0, num = klass->NumDirectInterfaces(); i != num; ++i) {
CHECK(klass->GetDirectInterface(self_, klass, i) != nullptr)
- << klass->PrettyDescriptor() << " iface #" << i
- << klass->GetDexFile().StringByTypeIdx(klass->GetDirectInterfaceTypeIdx(i))
- << " Bug: 34839984";
+ << klass->PrettyDescriptor() << " iface #" << i;
}
}
}
@@ -1276,7 +1279,10 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
num_types = dex_file->NumTypeIds();
}
const size_t num_methods = dex_file->NumMethodIds();
- const size_t num_fields = dex_file->NumFieldIds();
+ size_t num_fields = mirror::DexCache::kDexCacheFieldCacheSize;
+ if (dex_file->NumFieldIds() < num_fields) {
+ num_fields = dex_file->NumFieldIds();
+ }
size_t num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
if (dex_file->NumProtoIds() < num_method_types) {
num_method_types = dex_file->NumProtoIds();
@@ -1320,17 +1326,22 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
dex_cache->SetResolvedMethods(methods);
}
if (num_fields != 0u) {
- ArtField** const fields =
- reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
- for (size_t j = 0; kIsDebugBuild && j < num_fields; ++j) {
- DCHECK(fields[j] == nullptr);
+ mirror::FieldDexCacheType* const image_resolved_fields = dex_cache->GetResolvedFields();
+ mirror::FieldDexCacheType* const fields =
+ reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
+ for (size_t j = 0; j < num_fields; ++j) {
+ DCHECK_EQ(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size_).index,
+ 0u);
+ DCHECK(mirror::DexCache::GetNativePairPtrSize(fields, j, image_pointer_size_).object ==
+ nullptr);
+ mirror::DexCache::SetNativePairPtrSize(
+ fields,
+ j,
+ mirror::DexCache::GetNativePairPtrSize(image_resolved_fields,
+ j,
+ image_pointer_size_),
+ image_pointer_size_);
}
- CopyNonNull(dex_cache->GetResolvedFields(),
- num_fields,
- fields,
- [] (const ArtField* field) {
- return field == nullptr;
- });
dex_cache->SetResolvedFields(fields);
}
if (num_method_types != 0u) {
@@ -1769,14 +1780,14 @@ bool ClassLinker::AddImageSpace(
*error_msg = "Unexpected BootClassLoader in app image";
return false;
}
- std::list<mirror::String*> image_dex_file_names;
+ std::list<ObjPtr<mirror::String>> image_dex_file_names;
std::string temp_error_msg;
if (!FlattenPathClassLoader(image_class_loader.Get(), &image_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten image class loader hierarchy '%s'",
temp_error_msg.c_str());
return false;
}
- std::list<mirror::String*> loader_dex_file_names;
+ std::list<ObjPtr<mirror::String>> loader_dex_file_names;
if (!FlattenPathClassLoader(class_loader.Get(), &loader_dex_file_names, &temp_error_msg)) {
*error_msg = StringPrintf("Failed to flatten class loader hierarchy '%s'",
temp_error_msg.c_str());
@@ -1788,7 +1799,10 @@ bool ClassLinker::AddImageSpace(
ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
if (element != nullptr) {
// If we are somewhere in the middle of the array, there may be nulls at the end.
- loader_dex_file_names.push_back(GetDexPathListElementName(element));
+ ObjPtr<mirror::String> name;
+ if (GetDexPathListElementName(element, &name) && name != nullptr) {
+ loader_dex_file_names.push_back(name);
+ }
}
}
// Ignore the number of image dex files since we are adding those to the class loader anyways.
@@ -1904,12 +1918,22 @@ bool ClassLinker::AddImageSpace(
// Since it ensures classes are in the class table.
VerifyClassInTableArtMethodVisitor visitor2(class_table);
header.VisitPackedArtMethods(&visitor2, space->Begin(), kRuntimePointerSize);
- }
- if (app_image) {
- // TODO: Restrict this check to debug builds. Bug: 34839984
+ // Verify that all direct interfaces of classes in the class table are also resolved.
VerifyDirectInterfacesInTableClassVisitor visitor(class_loader.Get());
class_table->Visit(visitor);
visitor.Check();
+ // Check that all non-primitive classes in dex caches are also in the class table.
+ for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
+ ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+ mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
+ for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
+ ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
+ if (klass != nullptr && !klass->IsPrimitive()) {
+ CHECK(class_table->Contains(klass)) << klass->PrettyDescriptor()
+ << " " << dex_cache->GetDexFile()->GetLocation();
+ }
+ }
+ }
}
VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time);
return true;
@@ -3481,13 +3505,12 @@ ObjPtr<mirror::DexCache> ClassLinker::FindDexCache(Thread* self, const DexFile&
return dex_cache;
}
// Failure, dump diagnostic and abort.
- std::string location(dex_file.GetLocation());
for (const DexCacheData& data : dex_caches_) {
if (DecodeDexCache(self, data) != nullptr) {
- LOG(ERROR) << "Registered dex file " << data.dex_file->GetLocation();
+ LOG(FATAL_WITHOUT_ABORT) << "Registered dex file " << data.dex_file->GetLocation();
}
}
- LOG(FATAL) << "Failed to find DexCache for DexFile " << location;
+ LOG(FATAL) << "Failed to find DexCache for DexFile " << dex_file.GetLocation();
UNREACHABLE();
}
@@ -3891,8 +3914,10 @@ bool ClassLinker::AttemptSupertypeVerification(Thread* self,
if (!supertype->IsVerified() && !supertype->IsErroneous()) {
VerifyClass(self, supertype);
}
- if (supertype->IsCompileTimeVerified()) {
- // Either we are verified or we soft failed and need to retry at runtime.
+
+ if (supertype->IsVerified() || supertype->ShouldVerifyAtRuntime()) {
+ // The supertype is either verified, or we soft failed at AOT time.
+ DCHECK(supertype->IsVerified() || Runtime::Current()->IsAotCompiler());
return true;
}
// If we got this far then we have a hard failure.
@@ -3958,13 +3983,16 @@ verifier::MethodVerifier::FailureKind ClassLinker::VerifyClass(
return verifier::MethodVerifier::kHardFailure;
}
- // Don't attempt to re-verify if already sufficiently verified.
+ // Don't attempt to re-verify if already verified.
if (klass->IsVerified()) {
EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
return verifier::MethodVerifier::kNoFailure;
}
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
- return verifier::MethodVerifier::kNoFailure;
+
+ // For AOT, don't attempt to re-verify if we have already found we should
+ // verify at runtime.
+ if (Runtime::Current()->IsAotCompiler() && klass->ShouldVerifyAtRuntime()) {
+ return verifier::MethodVerifier::kSoftFailure;
}
if (klass->GetStatus() == mirror::Class::kStatusResolved) {
@@ -4380,9 +4408,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable&
decoded_name->ToModifiedUtf8().c_str()));
CHECK_EQ(ArtField::PrettyField(klass->GetStaticField(1)), throws_field_name);
- CHECK_EQ(klass.Get()->GetInterfaces(),
+ CHECK_EQ(klass.Get()->GetProxyInterfaces(),
soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces));
- CHECK_EQ(klass.Get()->GetThrows(),
+ CHECK_EQ(klass.Get()->GetProxyThrows(),
soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws));
}
return klass.Get();
@@ -4524,108 +4552,6 @@ bool ClassLinker::CanWeInitializeClass(ObjPtr<mirror::Class> klass, bool can_ini
return CanWeInitializeClass(super_class, can_init_statics, can_init_parents);
}
-std::string DescribeSpace(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
- std::ostringstream oss;
- gc::Heap* heap = Runtime::Current()->GetHeap();
- gc::space::ContinuousSpace* cs = heap->FindContinuousSpaceFromAddress(klass.Ptr());
- if (cs != nullptr) {
- if (cs->IsImageSpace()) {
- oss << "image/" << cs->GetName() << "/" << cs->AsImageSpace()->GetImageFilename();
- } else {
- oss << "continuous/" << cs->GetName();
- }
- } else {
- gc::space::DiscontinuousSpace* ds =
- heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true);
- if (ds != nullptr) {
- oss << "discontinuous/" << ds->GetName();
- } else {
- oss << "invalid";
- }
- }
- return oss.str();
-}
-
-std::string DescribeLoaders(ObjPtr<mirror::Class> klass, const char* iface_descriptor)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- std::ostringstream oss;
- uint32_t hash = ComputeModifiedUtf8Hash(iface_descriptor);
- ScopedObjectAccessUnchecked soa(Thread::Current());
- ObjPtr<mirror::Class> path_class_loader =
- soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader);
- ObjPtr<mirror::Class> dex_class_loader =
- soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader);
-
- // Print the class loader chain.
- bool found_iface;
- const char* loader_separator = "";
- for (ObjPtr<mirror::ClassLoader> loader = klass->GetClassLoader();
- loader != nullptr;
- loader = loader->GetParent()) {
- oss << loader_separator << loader->GetClass()->PrettyDescriptor();
- loader_separator = ";";
- // If we didn't find the interface yet, try to find it in the current class loader.
- if (!found_iface) {
- ClassTable* table = Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(loader);
- ObjPtr<mirror::Class> iface =
- (table != nullptr) ? table->Lookup(iface_descriptor, hash) : nullptr;
- if (iface != nullptr) {
- found_iface = true;
- oss << "[hit:" << DescribeSpace(iface) << "]";
- }
- }
-
- // For PathClassLoader or DexClassLoader also dump the dex file locations.
- if (loader->GetClass() == path_class_loader || loader->GetClass() == dex_class_loader) {
- ArtField* const cookie_field =
- jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
- ArtField* const dex_file_field =
- jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
- ObjPtr<mirror::Object> dex_path_list =
- jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)->
- GetObject(loader);
- if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) {
- ObjPtr<mirror::Object> dex_elements_obj =
- jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)->
- GetObject(dex_path_list);
- if (dex_elements_obj != nullptr) {
- ObjPtr<mirror::ObjectArray<mirror::Object>> dex_elements =
- dex_elements_obj->AsObjectArray<mirror::Object>();
- oss << "(";
- const char* path_separator = "";
- for (int32_t i = 0; i != dex_elements->GetLength(); ++i) {
- ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
- ObjPtr<mirror::Object> dex_file =
- (element != nullptr) ? dex_file_field->GetObject(element) : nullptr;
- ObjPtr<mirror::LongArray> long_array =
- (dex_file != nullptr) ? cookie_field->GetObject(dex_file)->AsLongArray() : nullptr;
- if (long_array != nullptr) {
- int32_t long_array_size = long_array->GetLength();
- // First element is the oat file.
- for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) {
- const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>(
- static_cast<uintptr_t>(long_array->GetWithoutChecks(j)));
- oss << path_separator << cp_dex_file->GetLocation();
- path_separator = ":";
- }
- }
- }
- oss << ")";
- }
- }
- }
- }
-
- // Do a paranoid check that the `klass` itself is in the class table.
- ClassTable* table =
- Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(klass->GetClassLoader());
- ObjPtr<mirror::Class> k = (table != nullptr) ? table->LookupByDescriptor(klass) : nullptr;
- if (k != klass) {
- oss << "{FAIL:" << k.Ptr() << "!=" << klass.Ptr() << "}";
- }
- return oss.str();
-}
-
bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
bool can_init_statics, bool can_init_parents) {
// see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
@@ -4773,15 +4699,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr));
for (size_t i = 0; i < num_direct_interfaces; i++) {
handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass.Get(), i));
- if (UNLIKELY(handle_scope_iface == nullptr)) {
- const char* iface_descriptor =
- klass->GetDexFile().StringByTypeIdx(klass->GetDirectInterfaceTypeIdx(i));
- LOG(FATAL) << "Check failed: handle_scope_iface != nullptr "
- << "Debug data for bug 34839984: "
- << klass->PrettyDescriptor() << " iface #" << i << " " << iface_descriptor
- << " space: " << DescribeSpace(klass.Get())
- << " loaders: " << DescribeLoaders(klass.Get(), iface_descriptor);
- }
+ CHECK(handle_scope_iface != nullptr) << klass->PrettyDescriptor() << " iface #" << i;
CHECK(handle_scope_iface->IsInterface());
if (handle_scope_iface->HasBeenRecursivelyInitialized()) {
// We have already done this for this interface. Skip it.
@@ -4917,7 +4835,7 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self,
// First we initialize all of iface's super-interfaces recursively.
for (size_t i = 0; i < num_direct_ifaces; i++) {
ObjPtr<mirror::Class> super_iface = mirror::Class::GetDirectInterface(self, iface.Get(), i);
- DCHECK(super_iface != nullptr);
+ CHECK(super_iface != nullptr) << iface->PrettyDescriptor() << " iface #" << i;
if (!super_iface->HasBeenRecursivelyInitialized()) {
// Recursive step
handle_super_iface.Assign(super_iface);
@@ -6805,18 +6723,20 @@ static void CheckClassOwnsVTableEntries(Thread* self,
ArtMethod* m = check_vtable->GetElementPtrSize<ArtMethod*>(i, pointer_size);
CHECK(m != nullptr);
- CHECK_EQ(m->GetMethodIndexDuringLinking(), i)
- << m->PrettyMethod()
- << " has an unexpected method index for its spot in the vtable for class"
- << klass->PrettyClass();
+ if (m->GetMethodIndexDuringLinking() != i) {
+ LOG(WARNING) << m->PrettyMethod()
+ << " has an unexpected method index for its spot in the vtable for class"
+ << klass->PrettyClass();
+ }
ArraySlice<ArtMethod> virtuals = klass->GetVirtualMethodsSliceUnchecked(pointer_size);
auto is_same_method = [m] (const ArtMethod& meth) {
return &meth == m;
};
- CHECK((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) ||
- std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())
- << m->PrettyMethod() << " does not seem to be owned by current class "
- << klass->PrettyClass() << " or any of its superclasses!";
+ if (!((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) ||
+ std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())) {
+ LOG(WARNING) << m->PrettyMethod() << " does not seem to be owned by current class "
+ << klass->PrettyClass() << " or any of its superclasses!";
+ }
}
}
@@ -6844,14 +6764,15 @@ static void CheckVTableHasNoDuplicates(Thread* self,
other_entry->GetAccessFlags())) {
continue;
}
- CHECK(vtable_entry != other_entry &&
- !name_comparator.HasSameNameAndSignature(
- other_entry->GetInterfaceMethodIfProxy(pointer_size)))
- << "vtable entries " << i << " and " << j << " are identical for "
- << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod() << " (0x"
- << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and "
- << other_entry->PrettyMethod() << " (0x" << std::hex
- << reinterpret_cast<uintptr_t>(other_entry) << ")";
+ if (vtable_entry == other_entry ||
+ name_comparator.HasSameNameAndSignature(
+ other_entry->GetInterfaceMethodIfProxy(pointer_size))) {
+ LOG(WARNING) << "vtable entries " << i << " and " << j << " are identical for "
+ << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod()
+ << " (0x" << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and "
+ << other_entry->PrettyMethod() << " (0x" << std::hex
+ << reinterpret_cast<uintptr_t>(other_entry) << ")";
+ }
}
}
}
@@ -7900,7 +7821,7 @@ mirror::String* ClassLinker::ResolveString(const DexFile& dex_file,
mirror::String* ClassLinker::LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache) {
+ ObjPtr<mirror::DexCache> dex_cache) {
DCHECK(dex_cache != nullptr);
ObjPtr<mirror::String> resolved = dex_cache->GetResolvedString(string_idx);
if (resolved != nullptr) {
@@ -8238,6 +8159,43 @@ ArtMethod* ClassLinker::ResolveMethodWithoutInvokeType(const DexFile& dex_file,
return resolved;
}
+ArtField* ClassLinker::LookupResolvedField(uint32_t field_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ bool is_static) {
+ const DexFile& dex_file = *dex_cache->GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field_idx);
+ ObjPtr<mirror::Class> klass = dex_cache->GetResolvedType(field_id.class_idx_);
+ if (klass == nullptr) {
+ klass = LookupResolvedType(dex_file, field_id.class_idx_, dex_cache, class_loader);
+ }
+ if (klass == nullptr) {
+ // The class has not been resolved yet, so the field is also unresolved.
+ return nullptr;
+ }
+ DCHECK(klass->IsResolved());
+ Thread* self = is_static ? Thread::Current() : nullptr;
+
+ // First try to find a field declared directly by `klass` by the field index.
+ ArtField* resolved_field = is_static
+ ? mirror::Class::FindStaticField(self, klass, dex_cache, field_idx)
+ : klass->FindInstanceField(dex_cache, field_idx);
+
+ if (resolved_field == nullptr) {
+ // If not found in `klass` by field index, search the class hierarchy using the name and type.
+ const char* name = dex_file.GetFieldName(field_id);
+ const char* type = dex_file.GetFieldTypeDescriptor(field_id);
+ resolved_field = is_static
+ ? mirror::Class::FindStaticField(self, klass, name, type)
+ : klass->FindInstanceField(name, type);
+ }
+
+ if (resolved_field != nullptr) {
+ dex_cache->SetResolvedField(field_idx, resolved_field, image_pointer_size_);
+ }
+ return resolved_field;
+}
+
ArtField* ClassLinker::ResolveField(const DexFile& dex_file,
uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
@@ -8298,9 +8256,8 @@ ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
return nullptr;
}
- StringPiece name(dex_file.StringDataByIdx(field_id.name_idx_));
- StringPiece type(dex_file.StringDataByIdx(
- dex_file.GetTypeId(field_id.type_idx_).descriptor_idx_));
+ StringPiece name(dex_file.GetFieldName(field_id));
+ StringPiece type(dex_file.GetFieldTypeDescriptor(field_id));
resolved = mirror::Class::FindField(self, klass, name, type);
if (resolved != nullptr) {
dex_cache->SetResolvedField(field_idx, resolved, image_pointer_size_);
@@ -8550,6 +8507,15 @@ void ClassLinker::SetEntryPointsToInterpreter(ArtMethod* method) const {
}
}
+void ClassLinker::SetEntryPointsForObsoleteMethod(ArtMethod* method) const {
+ DCHECK(method->IsObsolete());
+ // We cannot mess with the entrypoints of native methods because they are used to determine how
+ // large the method's quick stack frame is. Without this information we cannot walk the stacks.
+ if (!method->IsNative()) {
+ method->SetEntryPointFromQuickCompiledCode(GetInvokeObsoleteMethodStub());
+ }
+}
+
void ClassLinker::DumpForSigQuit(std::ostream& os) {
ScopedObjectAccess soa(Thread::Current());
ReaderMutexLock mu(soa.Self(), *Locks::classlinker_classes_lock_);
@@ -8924,7 +8890,7 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo
return ret;
}
-std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
+std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes) {
ScopedTrace trace(__PRETTY_FUNCTION__);
std::unordered_set<std::string> ret;
@@ -8939,14 +8905,13 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
if (dex_cache != nullptr) {
const DexFile* dex_file = dex_cache->GetDexFile();
// There could be duplicates if two dex files with the same location are mapped.
- location_to_dex_file.emplace(
- ProfileCompilationInfo::GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+ location_to_dex_file.emplace(dex_file->GetLocation(), dex_file);
}
}
}
for (const DexCacheResolvedClasses& info : classes) {
- const std::string& profile_key = info.GetDexLocation();
- auto found = location_to_dex_file.find(profile_key);
+ const std::string& location = info.GetDexLocation();
+ auto found = location_to_dex_file.find(location);
if (found != location_to_dex_file.end()) {
const DexFile* dex_file = found->second;
VLOG(profiler) << "Found opened dex file for " << dex_file->GetLocation() << " with "
@@ -8958,7 +8923,7 @@ std::unordered_set<std::string> ClassLinker::GetClassDescriptorsForProfileKeys(
ret.insert(descriptor);
}
} else {
- VLOG(class_linker) << "Failed to find opened dex file for profile key " << profile_key;
+ VLOG(class_linker) << "Failed to find opened dex file for location " << location;
}
}
return ret;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 33eed3c8e3..a26e63b49e 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -247,7 +247,7 @@ class ClassLinker {
// result in the DexCache if found. Return null if not found.
mirror::String* LookupString(const DexFile& dex_file,
dex::StringIndex string_idx,
- Handle<mirror::DexCache> dex_cache)
+ ObjPtr<mirror::DexCache> dex_cache)
REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
@@ -333,7 +333,7 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
- ArtField* GetResolvedField(uint32_t field_idx, ObjPtr<mirror::DexCache> dex_cache)
+ ArtField* LookupResolvedField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
REQUIRES_SHARED(Locks::mutator_lock_)
@@ -544,6 +544,10 @@ class ClassLinker {
void SetEntryPointsToInterpreter(ArtMethod* method) const
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Set the entrypoints up for an obsolete method.
+ void SetEntryPointsForObsoleteMethod(ArtMethod* method) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
@@ -617,7 +621,8 @@ class ClassLinker {
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
REQUIRES(!Locks::dex_lock_);
- std::unordered_set<std::string> GetClassDescriptorsForProfileKeys(
+ // Returns the class descriptors for loaded dex files.
+ std::unordered_set<std::string> GetClassDescriptorsForResolvedClasses(
const std::set<DexCacheResolvedClasses>& classes)
REQUIRES(!Locks::dex_lock_);
@@ -841,6 +846,13 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Find a field by its field index.
+ ArtField* LookupResolvedField(uint32_t field_idx,
+ ObjPtr<mirror::DexCache> dex_cache,
+ ObjPtr<mirror::ClassLoader> class_loader,
+ bool is_static)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
void RegisterDexFileLocked(const DexFile& dex_file,
ObjPtr<mirror::DexCache> dex_cache,
ObjPtr<mirror::ClassLoader> class_loader)
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index e5722a13a7..b421810113 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -618,7 +618,7 @@ struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_dex_caches_), "obsoleteDexCaches");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_methods_), "obsoleteMethods");
- addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_bytes_), "originalDexFile");
+ addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_), "originalDexFile");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
}
};
@@ -668,7 +668,6 @@ struct ProxyOffsets : public CheckOffsets<mirror::Proxy> {
struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
- addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_call_sites_), "numResolvedCallSites");
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index af4f998fdf..374b711aa8 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -55,6 +55,12 @@ mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
return nullptr;
}
+// To take into account http://b/35845221
+#pragma clang diagnostic push
+#if __clang_major__ < 4
+#pragma clang diagnostic ignored "-Wunreachable-code"
+#endif
+
mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
WriterMutexLock mu(Thread::Current(), lock_);
// Should only be updating latest table.
@@ -80,6 +86,8 @@ mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* kl
return existing;
}
+#pragma clang diagnostic pop
+
size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader,
const ClassSet& set) const {
size_t count = 0;
@@ -105,6 +113,20 @@ size_t ClassTable::NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_load
return CountDefiningLoaderClasses(defining_loader, classes_.back());
}
+size_t ClassTable::NumReferencedZygoteClasses() const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ size_t sum = 0;
+ for (size_t i = 0; i < classes_.size() - 1; ++i) {
+ sum += classes_[i].Size();
+ }
+ return sum;
+}
+
+size_t ClassTable::NumReferencedNonZygoteClasses() const {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ return classes_.back().Size();
+}
+
mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
DescriptorHashPair pair(descriptor, hash);
ReaderMutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 711eae45b8..430edbba4e 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -144,16 +144,26 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Returns the number of classes in previous snapshots.
+ // Returns the number of classes in previous snapshots defined by `defining_loader`.
size_t NumZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Returns all off the classes in the lastest snapshot.
+ // Returns all off the classes in the lastest snapshot defined by `defining_loader`.
size_t NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns the number of classes in previous snapshots no matter the defining loader.
+ size_t NumReferencedZygoteClasses() const
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Returns all off the classes in the lastest snapshot no matter the defining loader.
+ size_t NumReferencedNonZygoteClasses() const
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
REQUIRES(!lock_)
@@ -246,6 +256,7 @@ class ClassTable {
}
private:
+ // Only copies classes.
void CopyWithoutLocks(const ClassTable& source_table) NO_THREAD_SAFETY_ANALYSIS;
void InsertWithoutLocks(ObjPtr<mirror::Class> klass) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 78ba6e7510..15724a1027 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -68,6 +68,74 @@ namespace art {
using android::base::StringPrintf;
+static const uint8_t kBase64Map[256] = {
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
+ 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
+ 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
+ 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
+ 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255
+};
+
+uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+ CHECK(dst_size != nullptr);
+ std::vector<uint8_t> tmp;
+ uint32_t t = 0, y = 0;
+ int g = 3;
+ for (size_t i = 0; src[i] != '\0'; ++i) {
+ uint8_t c = kBase64Map[src[i] & 0xFF];
+ if (c == 255) continue;
+ // the final = symbols are read and used to trim the remaining bytes
+ if (c == 254) {
+ c = 0;
+ // prevent g < 0 which would potentially allow an overflow later
+ if (--g < 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ } else if (g != 3) {
+ // we only allow = to be at the end
+ *dst_size = 0;
+ return nullptr;
+ }
+ t = (t << 6) | c;
+ if (++y == 4) {
+ tmp.push_back((t >> 16) & 255);
+ if (g > 1) {
+ tmp.push_back((t >> 8) & 255);
+ }
+ if (g > 2) {
+ tmp.push_back(t & 255);
+ }
+ y = t = 0;
+ }
+ }
+ if (y != 0) {
+ *dst_size = 0;
+ return nullptr;
+ }
+ std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
+ *dst_size = tmp.size();
+ std::copy(tmp.begin(), tmp.end(), dst.get());
+ return dst.release();
+}
+
ScratchFile::ScratchFile() {
// ANDROID_DATA needs to be set
CHECK_NE(static_cast<char*>(nullptr), getenv("ANDROID_DATA")) <<
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index d7abe2a8f4..bfa273d42b 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -44,6 +44,8 @@ class JavaVMExt;
class Runtime;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
+uint8_t* DecodeBase64(const char* src, size_t* dst_size);
+
class ScratchFile {
public:
ScratchFile();
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 4f4bed0169..6758d75e47 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -313,6 +313,14 @@ void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method) {
ArtMethod::PrettyMethod(method).c_str()).c_str());
}
+// InternalError
+
+void ThrowInternalError(const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+ ThrowException("Ljava/lang/InternalError;", nullptr, fmt, &args);
+ va_end(args);
+}
// IOException
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 55a89388ea..4afef7993d 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -151,6 +151,12 @@ void ThrowIncompatibleClassChangeError(ObjPtr<mirror::Class> referrer, const cha
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+// InternalError
+
+void ThrowInternalError(const char* fmt, ...)
+ __attribute__((__format__(__printf__, 1, 2)))
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index b6a2e09719..85100ae49d 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -23,6 +23,7 @@
#include <string.h>
#include <sys/file.h>
#include <sys/stat.h>
+#include <zlib.h>
#include <memory>
#include <sstream>
@@ -67,6 +68,12 @@ const uint8_t DexFile::kDexMagicVersions[DexFile::kNumDexVersions][DexFile::kDex
{'0', '3', '8', '\0'}
};
+uint32_t DexFile::CalculateChecksum() const {
+ const uint32_t non_sum = OFFSETOF_MEMBER(DexFile::Header, signature_);
+ const uint8_t* non_sum_ptr = Begin() + non_sum;
+ return adler32(adler32(0L, Z_NULL, 0), non_sum_ptr, Size() - non_sum);
+}
+
struct DexFile::AnnotationValue {
JValue value_;
uint8_t type_;
@@ -179,6 +186,14 @@ std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
std::string* error_msg) {
ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
map->Size(),
location,
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 58b8e792ee..1b18d21cb1 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1088,6 +1088,9 @@ class DexFile {
static int64_t ReadSignedLong(const uint8_t* ptr, int zwidth);
static uint64_t ReadUnsignedLong(const uint8_t* ptr, int zwidth, bool fill_on_right);
+ // Recalculates the checksum of the dex file. Does not use the current value in the header.
+ uint32_t CalculateChecksum() const;
+
// Returns a human-readable form of the method at an index.
std::string PrettyMethod(uint32_t method_idx, bool with_signature = true) const;
// Returns a human-readable form of the field at an index.
@@ -1320,6 +1323,9 @@ class ClassDataItemIterator {
uint32_t NumVirtualMethods() const {
return header_.virtual_methods_size_;
}
+ bool IsAtMethod() const {
+ return pos_ >= EndOfInstanceFieldsPos();
+ }
bool HasNextStaticField() const {
return pos_ < EndOfStaticFieldsPos();
}
diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc
index d39ea35a90..6b9654dc49 100644
--- a/runtime/dex_file_annotations.cc
+++ b/runtime/dex_file_annotations.cc
@@ -41,7 +41,80 @@ struct DexFile::AnnotationValue {
};
namespace {
-mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
+
+// A helper class that contains all the data needed to do annotation lookup.
+class ClassData {
+ public:
+ explicit ClassData(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
+ : ClassData(ScopedNullHandle<mirror::Class>(), // klass
+ method,
+ *method->GetDexFile(),
+ &method->GetClassDef()) {}
+
+ // Requires Scope to be able to create at least 1 handles.
+ template <typename Scope>
+ ClassData(Scope& hs, ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_)
+ : ClassData(hs.NewHandle(field->GetDeclaringClass())) { }
+
+ explicit ClassData(Handle<mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_)
+ : ClassData(klass, // klass
+ nullptr, // method
+ klass->GetDexFile(),
+ klass->GetClassDef()) {}
+
+ const DexFile& GetDexFile() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return dex_file_;
+ }
+
+ const DexFile::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ return class_def_;
+ }
+
+ ObjPtr<mirror::DexCache> GetDexCache() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (method_ != nullptr) {
+ return method_->GetDexCache();
+ } else {
+ return real_klass_->GetDexCache();
+ }
+ }
+
+ ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (method_ != nullptr) {
+ return method_->GetDeclaringClass()->GetClassLoader();
+ } else {
+ return real_klass_->GetClassLoader();
+ }
+ }
+
+ ObjPtr<mirror::Class> GetRealClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (method_ != nullptr) {
+ return method_->GetDeclaringClass();
+ } else {
+ return real_klass_.Get();
+ }
+ }
+
+ private:
+ ClassData(Handle<mirror::Class> klass,
+ ArtMethod* method,
+ const DexFile& dex_file,
+ const DexFile::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_)
+ : real_klass_(klass),
+ method_(method),
+ dex_file_(dex_file),
+ class_def_(class_def) {
+ DCHECK((method_ == nullptr) || real_klass_.IsNull());
+ }
+
+ Handle<mirror::Class> real_klass_;
+ ArtMethod* method_;
+ const DexFile& dex_file_;
+ const DexFile::ClassDef* class_def_;
+
+ DISALLOW_COPY_AND_ASSIGN(ClassData);
+};
+
+mirror::Object* CreateAnnotationMember(const ClassData& klass,
Handle<mirror::Class> annotation_class,
const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -185,9 +258,8 @@ const uint8_t* SearchEncodedAnnotation(const DexFile& dex_file,
const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = method->GetDexFile();
- mirror::Class* klass = method->GetDeclaringClass();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file->GetAnnotationsDirectory(method->GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -209,9 +281,8 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method)
const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = method->GetDexFile();
- mirror::Class* klass = method->GetDeclaringClass();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file->GetAnnotationsDirectory(method->GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -230,30 +301,34 @@ const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod*
return nullptr;
}
-const DexFile::AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass)
+const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file.GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file.GetAnnotationsDirectory(*klass.GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
return dex_file.GetClassAnnotationSet(annotations_dir);
}
-mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint8_t** annotation)
+mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation)
REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t type_index = DecodeUnsignedLeb128(annotation);
uint32_t size = DecodeUnsignedLeb128(annotation);
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
- StackHandleScope<2> hs(self);
+ StackHandleScope<4> hs(self);
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
Handle<mirror::Class> annotation_class(hs.NewHandle(
- class_linker->ResolveType(klass->GetDexFile(), dex::TypeIndex(type_index), klass.Get())));
+ class_linker->ResolveType(klass.GetDexFile(),
+ dex::TypeIndex(type_index),
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()))));
if (annotation_class == nullptr) {
- LOG(INFO) << "Unable to resolve " << klass->PrettyClass() << " annotation class " << type_index;
+ LOG(INFO) << "Unable to resolve " << klass.GetRealClass()->PrettyClass()
+ << " annotation class " << type_index;
DCHECK(Thread::Current()->IsExceptionPending());
Thread::Current()->ClearException();
return nullptr;
@@ -300,13 +375,13 @@ mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint
}
template <bool kTransactionActive>
-bool ProcessAnnotationValue(Handle<mirror::Class> klass,
+bool ProcessAnnotationValue(const ClassData& klass,
const uint8_t** annotation_ptr,
DexFile::AnnotationValue* annotation_value,
Handle<mirror::Class> array_class,
DexFile::AnnotationResultStyle result_style)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ObjPtr<mirror::Object> element_object = nullptr;
bool set_object = false;
@@ -361,9 +436,8 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
annotation_value->value_.SetI(index);
} else {
StackHandleScope<1> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
element_object = Runtime::Current()->GetClassLinker()->ResolveString(
- klass->GetDexFile(), dex::StringIndex(index), dex_cache);
+ klass.GetDexFile(), dex::StringIndex(index), hs.NewHandle(klass.GetDexCache()));
set_object = true;
if (element_object == nullptr) {
return false;
@@ -377,8 +451,12 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
annotation_value->value_.SetI(index);
} else {
dex::TypeIndex type_index(index);
+ StackHandleScope<2> hs(self);
element_object = Runtime::Current()->GetClassLinker()->ResolveType(
- klass->GetDexFile(), type_index, klass.Get());
+ klass.GetDexFile(),
+ type_index,
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()));
set_object = true;
if (element_object == nullptr) {
CHECK(self->IsExceptionPending());
@@ -399,12 +477,13 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
if (result_style == DexFile::kAllRaw) {
annotation_value->value_.SetI(index);
} else {
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<2> hs(self);
ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
- klass->GetDexFile(), index, dex_cache, class_loader);
+ klass.GetDexFile(),
+ index,
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()));
if (method == nullptr) {
return false;
}
@@ -439,10 +518,11 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
annotation_value->value_.SetI(index);
} else {
StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(
- klass->GetDexFile(), index, dex_cache, class_loader);
+ klass.GetDexFile(),
+ index,
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()));
if (field == nullptr) {
return false;
}
@@ -467,10 +547,12 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
annotation_value->value_.SetI(index);
} else {
StackHandleScope<3> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
ArtField* enum_field = Runtime::Current()->GetClassLinker()->ResolveField(
- klass->GetDexFile(), index, dex_cache, class_loader, true);
+ klass.GetDexFile(),
+ index,
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()),
+ true);
if (enum_field == nullptr) {
return false;
} else {
@@ -595,10 +677,10 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass,
return true;
}
-mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
+mirror::Object* CreateAnnotationMember(const ClassData& klass,
Handle<mirror::Class> annotation_class,
const uint8_t** annotation) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
StackHandleScope<5> hs(self);
@@ -666,12 +748,12 @@ mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
}
const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
- Handle<mirror::Class> klass,
+ const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
for (uint32_t i = 0; i < annotation_set->size_; ++i) {
const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i);
if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) {
@@ -679,12 +761,16 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
}
const uint8_t* annotation = annotation_item->annotation_;
uint32_t type_index = DecodeUnsignedLeb128(&annotation);
+ StackHandleScope<2> hs(Thread::Current());
mirror::Class* resolved_class = Runtime::Current()->GetClassLinker()->ResolveType(
- klass->GetDexFile(), dex::TypeIndex(type_index), klass.Get());
+ klass.GetDexFile(),
+ dex::TypeIndex(type_index),
+ hs.NewHandle(klass.GetDexCache()),
+ hs.NewHandle(klass.GetClassLoader()));
if (resolved_class == nullptr) {
std::string temp;
LOG(WARNING) << StringPrintf("Unable to resolve %s annotation class %d",
- klass->GetDescriptor(&temp), type_index);
+ klass.GetRealClass()->GetDescriptor(&temp), type_index);
CHECK(Thread::Current()->IsExceptionPending());
Thread::Current()->ClearException();
continue;
@@ -698,7 +784,7 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet(
}
mirror::Object* GetAnnotationObjectFromAnnotationSet(
- Handle<mirror::Class> klass,
+ const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class)
@@ -712,13 +798,13 @@ mirror::Object* GetAnnotationObjectFromAnnotationSet(
return ProcessEncodedAnnotation(klass, &annotation);
}
-mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass,
+mirror::Object* GetAnnotationValue(const ClassData& klass,
const DexFile::AnnotationItem* annotation_item,
const char* annotation_name,
Handle<mirror::Class> array_class,
uint32_t expected_type)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
const uint8_t* annotation =
SearchEncodedAnnotation(dex_file, annotation_item->annotation_, annotation_name);
if (annotation == nullptr) {
@@ -745,10 +831,10 @@ mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass,
return annotation_value.value_.GetL();
}
-mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
StackHandleScope<1> hs(Thread::Current());
const DexFile::AnnotationItem* annotation_item =
SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Signature;",
@@ -771,10 +857,10 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> kla
return obj->AsObjectArray<mirror::String>();
}
-mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
+mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
StackHandleScope<1> hs(Thread::Current());
const DexFile::AnnotationItem* annotation_item =
SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;",
@@ -798,11 +884,11 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
}
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
- Handle<mirror::Class> klass,
+ const ClassData& klass,
const DexFile::AnnotationSetItem* annotation_set,
uint32_t visibility)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
StackHandleScope<2> hs(self);
@@ -856,11 +942,11 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(
}
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(
- Handle<mirror::Class> klass,
+ const ClassData& klass,
const DexFile::AnnotationSetRefList* set_ref_list,
uint32_t size)
REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile& dex_file = klass->GetDexFile();
+ const DexFile& dex_file = klass.GetDexFile();
Thread* self = Thread::Current();
ScopedObjectAccessUnchecked soa(self);
StackHandleScope<1> hs(self);
@@ -899,15 +985,17 @@ mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> ann
return nullptr;
}
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
- return GetAnnotationObjectFromAnnotationSet(field_class, annotation_set,
- DexFile::kDexVisibilityRuntime, annotation_class);
+ const ClassData field_class(hs, field);
+ return GetAnnotationObjectFromAnnotationSet(field_class,
+ annotation_set,
+ DexFile::kDexVisibilityRuntime,
+ annotation_class);
}
mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field);
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ const ClassData field_class(hs, field);
return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime);
}
@@ -917,7 +1005,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* fi
return nullptr;
}
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ const ClassData field_class(hs, field);
return GetSignatureValue(field_class, annotation_set);
}
@@ -927,17 +1015,17 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_
return false;
}
StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass()));
+ const ClassData field_class(hs, field);
const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
field_class, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
return annotation_item != nullptr;
}
mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
- const DexFile* dex_file = method->GetDexFile();
- mirror::Class* klass = method->GetDeclaringClass();
+ const ClassData klass(method);
+ const DexFile* dex_file = &klass.GetDexFile();
const DexFile::AnnotationsDirectoryItem* annotations_dir =
- dex_file->GetAnnotationsDirectory(*klass->GetClassDef());
+ dex_file->GetAnnotationsDirectory(*klass.GetClassDef());
if (annotations_dir == nullptr) {
return nullptr;
}
@@ -965,10 +1053,9 @@ mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) {
return nullptr;
}
DexFile::AnnotationValue annotation_value;
- StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+ StackHandleScope<1> hs(Thread::Current());
Handle<mirror::Class> return_type(hs.NewHandle(method->GetReturnType(true /* resolve */)));
- if (!ProcessAnnotationValue<false>(h_klass,
+ if (!ProcessAnnotationValue<false>(klass,
&annotation,
&annotation_value,
return_type,
@@ -983,17 +1070,15 @@ mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class>
if (annotation_set == nullptr) {
return nullptr;
}
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return GetAnnotationObjectFromAnnotationSet(method_class, annotation_set,
+ return GetAnnotationObjectFromAnnotationSet(ClassData(method), annotation_set,
DexFile::kDexVisibilityRuntime, annotation_class);
}
mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) {
const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method);
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return ProcessAnnotationSet(method_class, annotation_set, DexFile::kDexVisibilityRuntime);
+ return ProcessAnnotationSet(ClassData(method),
+ annotation_set,
+ DexFile::kDexVisibilityRuntime);
}
mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) {
@@ -1001,9 +1086,7 @@ mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method
if (annotation_set == nullptr) {
return nullptr;
}
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return GetThrowsValue(method_class, annotation_set);
+ return GetThrowsValue(ClassData(method), annotation_set);
}
mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) {
@@ -1019,9 +1102,7 @@ mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method)
return nullptr;
}
uint32_t size = set_ref_list->size_;
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return ProcessAnnotationSetRefList(method_class, set_ref_list, size);
+ return ProcessAnnotationSetRefList(ClassData(method), set_ref_list, size);
}
mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
@@ -1045,9 +1126,7 @@ mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method,
const DexFile::AnnotationSetItem* annotation_set =
dex_file->GetSetRefItemItem(annotation_set_ref);
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return GetAnnotationObjectFromAnnotationSet(method_class,
+ return GetAnnotationObjectFromAnnotationSet(ClassData(method),
annotation_set,
DexFile::kDexVisibilityRuntime,
annotation_class);
@@ -1072,7 +1151,7 @@ bool GetParametersMetadataForMethod(ArtMethod* method,
return false;
}
- StackHandleScope<5> hs(Thread::Current());
+ StackHandleScope<4> hs(Thread::Current());
// Extract the parameters' names String[].
ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString();
@@ -1082,9 +1161,9 @@ bool GetParametersMetadataForMethod(ArtMethod* method,
return false;
}
- Handle<mirror::Class> klass = hs.NewHandle(method->GetDeclaringClass());
+ ClassData data(method);
Handle<mirror::Object> names_obj =
- hs.NewHandle(GetAnnotationValue(klass,
+ hs.NewHandle(GetAnnotationValue(data,
annotation_item,
"names",
string_array_class,
@@ -1099,7 +1178,7 @@ bool GetParametersMetadataForMethod(ArtMethod* method,
return false;
}
Handle<mirror::Object> access_flags_obj =
- hs.NewHandle(GetAnnotationValue(klass,
+ hs.NewHandle(GetAnnotationValue(data,
annotation_item,
"accessFlags",
int_array_class,
@@ -1118,9 +1197,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod*
if (annotation_set == nullptr) {
return nullptr;
}
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
- return GetSignatureValue(method_class, annotation_set);
+ return GetSignatureValue(ClassData(method), annotation_set);
}
bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class,
@@ -1129,37 +1206,39 @@ bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotati
if (annotation_set == nullptr) {
return false;
}
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass()));
const DexFile::AnnotationItem* annotation_item =
- GetAnnotationItemFromAnnotationSet(method_class, annotation_set, visibility,
- annotation_class);
+ GetAnnotationItemFromAnnotationSet(ClassData(method),
+ annotation_set, visibility, annotation_class);
return annotation_item != nullptr;
}
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- return GetAnnotationObjectFromAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime,
+ return GetAnnotationObjectFromAnnotationSet(data,
+ annotation_set,
+ DexFile::kDexVisibilityRuntime,
annotation_class);
}
mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
- return ProcessAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime);
}
mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) {
- const DexFile& dex_file = klass->GetDexFile();
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
const DexFile::AnnotationItem* annotation_item =
- SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/MemberClasses;",
+ SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/MemberClasses;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return nullptr;
@@ -1172,7 +1251,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
return nullptr;
}
mirror::Object* obj =
- GetAnnotationValue(klass, annotation_item, "value", class_array_class,
+ GetAnnotationValue(data, annotation_item, "value", class_array_class,
DexFile::kDexAnnotationArray);
if (obj == nullptr) {
return nullptr;
@@ -1181,18 +1260,18 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla
}
mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
- const DexFile& dex_file = klass->GetDexFile();
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
const DexFile::AnnotationItem* annotation_item =
- SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingClass;",
+ SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/EnclosingClass;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return nullptr;
}
- mirror::Object* obj = GetAnnotationValue(klass, annotation_item, "value",
+ mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value",
ScopedNullHandle<mirror::Class>(),
DexFile::kDexAnnotationType);
if (obj == nullptr) {
@@ -1202,28 +1281,30 @@ mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) {
}
mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
- const DexFile& dex_file = klass->GetDexFile();
mirror::Class* declaring_class = GetDeclaringClass(klass);
if (declaring_class != nullptr) {
return declaring_class;
}
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
const DexFile::AnnotationItem* annotation_item =
- SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;",
+ SearchAnnotationSet(data.GetDexFile(),
+ annotation_set,
+ "Ldalvik/annotation/EnclosingMethod;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return nullptr;
}
const uint8_t* annotation =
- SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "value");
+ SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "value");
if (annotation == nullptr) {
return nullptr;
}
DexFile::AnnotationValue annotation_value;
- if (!ProcessAnnotationValue<false>(klass,
+ if (!ProcessAnnotationValue<false>(data,
&annotation,
&annotation_value,
ScopedNullHandle<mirror::Class>(),
@@ -1234,10 +1315,11 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
return nullptr;
}
StackHandleScope<2> hs(Thread::Current());
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType(
- klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader);
+ data.GetDexFile(),
+ annotation_value.value_.GetI(),
+ hs.NewHandle(data.GetDexCache()),
+ hs.NewHandle(data.GetClassLoader()));
if (method == nullptr) {
return nullptr;
}
@@ -1245,39 +1327,44 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) {
}
mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) {
- const DexFile& dex_file = klass->GetDexFile();
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
const DexFile::AnnotationItem* annotation_item =
- SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;",
+ SearchAnnotationSet(data.GetDexFile(),
+ annotation_set,
+ "Ldalvik/annotation/EnclosingMethod;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return nullptr;
}
- return GetAnnotationValue(klass, annotation_item, "value", ScopedNullHandle<mirror::Class>(),
+ return GetAnnotationValue(data, annotation_item, "value", ScopedNullHandle<mirror::Class>(),
DexFile::kDexAnnotationMethod);
}
bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
- const DexFile& dex_file = klass->GetDexFile();
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(
- dex_file, annotation_set, "Ldalvik/annotation/InnerClass;", DexFile::kDexVisibilitySystem);
+ data.GetDexFile(),
+ annotation_set,
+ "Ldalvik/annotation/InnerClass;",
+ DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return false;
}
const uint8_t* annotation =
- SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "name");
+ SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "name");
if (annotation == nullptr) {
return false;
}
DexFile::AnnotationValue annotation_value;
- if (!ProcessAnnotationValue<false>(klass,
+ if (!ProcessAnnotationValue<false>(data,
&annotation,
&annotation_value,
ScopedNullHandle<mirror::Class>(),
@@ -1293,24 +1380,24 @@ bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) {
}
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) {
- const DexFile& dex_file = klass->GetDexFile();
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
const DexFile::AnnotationItem* annotation_item =
- SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/InnerClass;",
+ SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/InnerClass;",
DexFile::kDexVisibilitySystem);
if (annotation_item == nullptr) {
return false;
}
const uint8_t* annotation =
- SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "accessFlags");
+ SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "accessFlags");
if (annotation == nullptr) {
return false;
}
DexFile::AnnotationValue annotation_value;
- if (!ProcessAnnotationValue<false>(klass,
+ if (!ProcessAnnotationValue<false>(data,
&annotation,
&annotation_value,
ScopedNullHandle<mirror::Class>(),
@@ -1325,20 +1412,22 @@ bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) {
}
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return nullptr;
}
- return GetSignatureValue(klass, annotation_set);
+ return GetSignatureValue(data, annotation_set);
}
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) {
- const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass);
+ ClassData data(klass);
+ const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
if (annotation_set == nullptr) {
return false;
}
const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet(
- klass, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
+ data, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class);
return annotation_item != nullptr;
}
diff --git a/runtime/dex_file_verifier.cc b/runtime/dex_file_verifier.cc
index 0b3f16a3cb..11b3cd025a 100644
--- a/runtime/dex_file_verifier.cc
+++ b/runtime/dex_file_verifier.cc
@@ -17,7 +17,6 @@
#include "dex_file_verifier.h"
#include <inttypes.h>
-#include <zlib.h>
#include <limits>
#include <memory>
@@ -368,11 +367,8 @@ bool DexFileVerifier::CheckHeader() {
return false;
}
+ uint32_t adler_checksum = dex_file_->CalculateChecksum();
// Compute and verify the checksum in the header.
- uint32_t adler_checksum = adler32(0L, Z_NULL, 0);
- const uint32_t non_sum = sizeof(header_->magic_) + sizeof(header_->checksum_);
- const uint8_t* non_sum_ptr = reinterpret_cast<const uint8_t*>(header_) + non_sum;
- adler_checksum = adler32(adler_checksum, non_sum_ptr, expected_size - non_sum);
if (adler_checksum != header_->checksum_) {
if (verify_checksum_) {
ErrorStringPrintf("Bad checksum (%08x, expected %08x)", adler_checksum, header_->checksum_);
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index 51678699b4..db65e40da5 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -111,7 +111,7 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location,
&error_msg));
ASSERT_TRUE(image_header != nullptr) << error_msg;
const OatHeader& oat_header = odex_file->GetOatHeader();
- uint32_t combined_checksum = OatFileAssistant::CalculateCombinedImageChecksum();
+ uint32_t combined_checksum = image_header->GetOatChecksum();
if (CompilerFilter::DependsOnImageChecksum(filter)) {
if (with_alternate_image) {
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 3bc49b8506..ba8cec3a52 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -43,6 +43,7 @@
namespace art {
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
+ const MethodInfo& method_info,
const InlineInfo& inline_info,
const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
@@ -56,7 +57,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
return inline_info.GetArtMethodAtDepth(encoding, inlining_depth);
}
- uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth);
+ uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth);
if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) {
// "charAt" special case. It is the only non-leaf method we inline across dex files.
ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt);
@@ -68,6 +69,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
ArtMethod* caller = outer_method;
if (inlining_depth != 0) {
caller = GetResolvedMethod(outer_method,
+ method_info,
inline_info,
encoding,
inlining_depth - 1);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 6301362e09..b5130d7999 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -138,7 +138,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size;
CHECK_LT(throws_index, static_cast<int>(num_virtuals));
mirror::ObjectArray<mirror::Class>* declared_exceptions =
- proxy_class->GetThrows()->Get(throws_index);
+ proxy_class->GetProxyThrows()->Get(throws_index);
mirror::Class* exception_class = exception->GetClass();
for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
mirror::Class* declared_exception = declared_exceptions->Get(i);
@@ -201,12 +201,14 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
DCHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
CodeInfo code_info = current_code->GetOptimizedCodeInfo();
+ MethodInfo method_info = current_code->GetOptimizedMethodInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
caller = GetResolvedMethod(outer_method,
+ method_info,
inline_info,
encoding.inline_info.encoding,
inline_info.GetDepth(encoding.inline_info.encoding) - 1);
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index c8ee99a5d9..565b4edcc3 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -29,6 +29,15 @@ extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
self->QuickDeliverException();
}
+extern "C" NO_RETURN uint64_t artInvokeObsoleteMethod(ArtMethod* method, Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(method->IsObsolete());
+ ScopedQuickEntrypointChecks sqec(self);
+ ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
+ method->PrettyMethod().c_str());
+ self->QuickDeliverException();
+}
+
// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -62,9 +71,7 @@ extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- self->NoteSignalBeingHandled();
ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
- self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
@@ -95,9 +102,7 @@ extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Th
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
- self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4c3990aad6..25073a8b79 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -372,10 +372,11 @@ class QuickArgumentVisitor {
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
CodeInfo code_info = current_code->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ MethodInfo method_info = current_code->GetOptimizedMethodInfo();
InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding));
if (invoke.IsValid()) {
*invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding));
- *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding);
+ *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info);
return true;
}
return false;
@@ -2323,48 +2324,26 @@ extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
-// Determine target of interface dispatch. This object is known non-null. First argument
-// is there for consistency but should not be used, as some architectures overwrite it
-// in the assembly trampoline.
-extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUTE_UNUSED,
+// Determine target of interface dispatch. The interface method and this object are known non-null.
+// The interface method is the method returned by the dex cache in the conflict trampoline.
+extern "C" TwoWordReturn artInvokeInterfaceTrampoline(ArtMethod* interface_method,
mirror::Object* raw_this_object,
Thread* self,
ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ CHECK(interface_method != nullptr);
ObjPtr<mirror::Object> this_object(raw_this_object);
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
ArtMethod* caller_method = QuickArgumentVisitor::GetCallingMethod(sp);
-
- // Fetch the dex_method_idx of the target interface method from the caller.
- uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
-
- const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
- CHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- CHECK(instr_code == Instruction::INVOKE_INTERFACE ||
- instr_code == Instruction::INVOKE_INTERFACE_RANGE)
- << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
- uint32_t dex_method_idx;
- if (instr_code == Instruction::INVOKE_INTERFACE) {
- dex_method_idx = instr->VRegB_35c();
- } else {
- CHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
- dex_method_idx = instr->VRegB_3rc();
- }
-
- ArtMethod* interface_method = caller_method->GetDexCacheResolvedMethod(
- dex_method_idx, kRuntimePointerSize);
- DCHECK(interface_method != nullptr) << dex_method_idx << " " << caller_method->PrettyMethod();
ArtMethod* method = nullptr;
ImTable* imt = cls->GetImt(kRuntimePointerSize);
if (LIKELY(interface_method->GetDexMethodIndex() != DexFile::kDexNoIndex)) {
- // If the dex cache already resolved the interface method, look whether we have
- // a match in the ImtConflictTable.
+ // If the interface method is already resolved, look whether we have a match in the
+ // ImtConflictTable.
ArtMethod* conflict_method = imt->Get(ImTable::GetImtIndex(interface_method),
kRuntimePointerSize);
if (LIKELY(conflict_method->IsRuntimeMethod())) {
@@ -2389,9 +2368,26 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT
return GetTwoWordFailureValue(); // Failure.
}
} else {
- // The dex cache did not resolve the method, look it up in the dex file
- // of the caller,
+ // The interface method is unresolved, so look it up in the dex file of the caller.
DCHECK_EQ(interface_method, Runtime::Current()->GetResolutionMethod());
+
+ // Fetch the dex_method_idx of the target interface method from the caller.
+ uint32_t dex_method_idx;
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ const DexFile::CodeItem* code_item = caller_method->GetCodeItem();
+ DCHECK_LT(dex_pc, code_item->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code_item->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ DCHECK(instr_code == Instruction::INVOKE_INTERFACE ||
+ instr_code == Instruction::INVOKE_INTERFACE_RANGE)
+ << "Unexpected call into interface trampoline: " << instr->DumpString(nullptr);
+ if (instr_code == Instruction::INVOKE_INTERFACE) {
+ dex_method_idx = instr->VRegB_35c();
+ } else {
+ DCHECK_EQ(instr_code, Instruction::INVOKE_INTERFACE_RANGE);
+ dex_method_idx = instr->VRegB_3rc();
+ }
+
const DexFile* dex_file = caller_method->GetDeclaringClass()->GetDexCache()
->GetDexFile();
uint32_t shorty_len;
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index 2842c5a5a6..4ca52de2a2 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -40,6 +40,12 @@ static inline const void* GetQuickToInterpreterBridge() {
return reinterpret_cast<const void*>(art_quick_to_interpreter_bridge);
}
+// Return the address of stub code for attempting to invoke an obsolete method.
+extern "C" void art_invoke_obsolete_method_stub(ArtMethod*);
+static inline const void* GetInvokeObsoleteMethodStub() {
+ return reinterpret_cast<const void*>(art_invoke_obsolete_method_stub);
+}
+
// Return the address of quick stub code for handling JNI calls.
extern "C" void art_quick_generic_jni_trampoline(ArtMethod*);
static inline const void* GetQuickGenericJniStub() {
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index d0687ce7b0..55a4625c39 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -133,9 +133,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_top, thread_local_alloc_stack_end,
sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_alloc_stack_end, held_mutexes, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, held_mutexes, nested_signal_state,
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, held_mutexes, flip_function,
sizeof(void*) * kLockLevelCount);
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, nested_signal_state, flip_function, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
EXPECT_OFFSET_DIFF(Thread, tlsPtr_.thread_local_mark_stack, Thread, wait_mutex_, sizeof(void*),
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index f9345b64a8..4220250c38 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -28,47 +28,6 @@
#include "thread-inl.h"
#include "verify_object-inl.h"
-// Note on nested signal support
-// -----------------------------
-//
-// Typically a signal handler should not need to deal with signals that occur within it.
-// However, when a SIGSEGV occurs that is in generated code and is not one of the
-// handled signals (implicit checks), we call a function to try to dump the stack
-// to the log. This enhances the debugging experience but may have the side effect
-// that it may not work. If the cause of the original SIGSEGV is a corrupted stack or other
-// memory region, the stack backtrace code may run into trouble and may either crash
-// or fail with an abort (SIGABRT). In either case we don't want that (new) signal to
-// mask the original signal and thus prevent useful debug output from being presented.
-//
-// In order to handle this situation, before we call the stack tracer we do the following:
-//
-// 1. shutdown the fault manager so that we are talking to the real signal management
-// functions rather than those in sigchain.
-// 2. use pthread_sigmask to allow SIGSEGV and SIGABRT signals to be delivered to the
-// thread running the signal handler.
-// 3. set the handler for SIGSEGV and SIGABRT to a secondary signal handler.
-// 4. save the thread's state to the TLS of the current thread using 'setjmp'
-//
-// We then call the stack tracer and one of two things may happen:
-// a. it completes successfully
-// b. it crashes and a signal is raised.
-//
-// In the former case, we fall through and everything is fine. In the latter case
-// our secondary signal handler gets called in a signal context. This results in
-// a call to FaultManager::HandledNestedSignal(), an archirecture specific function
-// whose purpose is to call 'longjmp' on the jmp_buf saved in the TLS of the current
-// thread. This results in a return with a non-zero value from 'setjmp'. We detect this
-// and write something to the log to tell the user that it happened.
-//
-// Regardless of how we got there, we reach the code after the stack tracer and we
-// restore the signal states to their original values, reinstate the fault manager (thus
-// reestablishing the signal chain) and continue.
-
-// This is difficult to test with a runtime test. To invoke the nested signal code
-// on any signal, uncomment the following line and run something that throws a
-// NullPointerException.
-// #define TEST_NESTED_SIGNAL
-
namespace art {
// Static fault manger object accessed by signal handler.
FaultManager fault_manager;
@@ -79,13 +38,8 @@ extern "C" __attribute__((visibility("default"))) void art_sigsegv_fault() {
}
// Signal handler called on SIGSEGV.
-static void art_fault_handler(int sig, siginfo_t* info, void* context) {
- fault_manager.HandleFault(sig, info, context);
-}
-
-// Signal handler for dealing with a nested signal.
-static void art_nested_signal_handler(int sig, siginfo_t* info, void* context) {
- fault_manager.HandleNestedSignal(sig, info, context);
+static bool art_fault_handler(int sig, siginfo_t* info, void* context) {
+ return fault_manager.HandleFault(sig, info, context);
}
FaultManager::FaultManager() : initialized_(false) {
@@ -95,43 +49,15 @@ FaultManager::FaultManager() : initialized_(false) {
FaultManager::~FaultManager() {
}
-static void SetUpArtAction(struct sigaction* action) {
- action->sa_sigaction = art_fault_handler;
- sigemptyset(&action->sa_mask);
- action->sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
- action->sa_restorer = nullptr;
-#endif
-}
-
-void FaultManager::EnsureArtActionInFrontOfSignalChain() {
- if (initialized_) {
- struct sigaction action;
- SetUpArtAction(&action);
- EnsureFrontOfChain(SIGSEGV, &action);
- } else {
- LOG(WARNING) << "Can't call " << __FUNCTION__ << " due to unitialized fault manager";
- }
-}
-
void FaultManager::Init() {
CHECK(!initialized_);
- struct sigaction action;
- SetUpArtAction(&action);
-
- // Set our signal handler now.
- int e = sigaction(SIGSEGV, &action, &oldaction_);
- if (e != 0) {
- VLOG(signals) << "Failed to claim SEGV: " << strerror(errno);
- }
- // Make sure our signal handler is called before any user handlers.
- ClaimSignalChain(SIGSEGV, &oldaction_);
+ AddSpecialSignalHandlerFn(SIGSEGV, art_fault_handler);
initialized_ = true;
}
void FaultManager::Release() {
if (initialized_) {
- UnclaimSignalChain(SIGSEGV);
+ RemoveSpecialSignalHandlerFn(SIGSEGV, art_fault_handler);
initialized_ = false;
}
}
@@ -156,130 +82,44 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
DCHECK(self != nullptr);
DCHECK(Runtime::Current() != nullptr);
DCHECK(Runtime::Current()->IsStarted());
-
- // Now set up the nested signal handler.
-
- // TODO: add SIGSEGV back to the nested signals when we can handle running out stack gracefully.
- static const int handled_nested_signals[] = {SIGABRT};
- constexpr size_t num_handled_nested_signals = arraysize(handled_nested_signals);
-
- // Release the fault manager so that it will remove the signal chain for
- // SIGSEGV and we call the real sigaction.
- fault_manager.Release();
-
- // The action for SIGSEGV should be the default handler now.
-
- // Unblock the signals we allow so that they can be delivered in the signal handler.
- sigset_t sigset;
- sigemptyset(&sigset);
- for (int signal : handled_nested_signals) {
- sigaddset(&sigset, signal);
- }
- pthread_sigmask(SIG_UNBLOCK, &sigset, nullptr);
-
- // If we get a signal in this code we want to invoke our nested signal
- // handler.
- struct sigaction action;
- struct sigaction oldactions[num_handled_nested_signals];
- action.sa_sigaction = art_nested_signal_handler;
-
- // Explicitly mask out SIGSEGV and SIGABRT from the nested signal handler. This
- // should be the default but we definitely don't want these happening in our
- // nested signal handler.
- sigemptyset(&action.sa_mask);
- for (int signal : handled_nested_signals) {
- sigaddset(&action.sa_mask, signal);
- }
-
- action.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
- action.sa_restorer = nullptr;
-#endif
-
- // Catch handled signals to invoke our nested handler.
- bool success = true;
- for (size_t i = 0; i < num_handled_nested_signals; ++i) {
- success = sigaction(handled_nested_signals[i], &action, &oldactions[i]) == 0;
- if (!success) {
- PLOG(ERROR) << "Unable to set up nested signal handler";
- break;
+ for (const auto& handler : other_handlers_) {
+ if (handler->Action(sig, info, context)) {
+ return true;
}
}
-
- if (success) {
- // Save the current state and call the handlers. If anything causes a signal
- // our nested signal handler will be invoked and this will longjmp to the saved
- // state.
- if (setjmp(*self->GetNestedSignalState()) == 0) {
- for (const auto& handler : other_handlers_) {
- if (handler->Action(sig, info, context)) {
- // Restore the signal handlers, reinit the fault manager and return. Signal was
- // handled.
- for (size_t i = 0; i < num_handled_nested_signals; ++i) {
- success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
- if (!success) {
- PLOG(ERROR) << "Unable to restore signal handler";
- }
- }
- fault_manager.Init();
- return true;
- }
- }
- } else {
- LOG(ERROR) << "Nested signal detected - original signal being reported";
- }
-
- // Restore the signal handlers.
- for (size_t i = 0; i < num_handled_nested_signals; ++i) {
- success = sigaction(handled_nested_signals[i], &oldactions[i], nullptr) == 0;
- if (!success) {
- PLOG(ERROR) << "Unable to restore signal handler";
- }
- }
- }
-
- // Now put the fault manager back in place.
- fault_manager.Init();
return false;
}
-void FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- // BE CAREFUL ALLOCATING HERE INCLUDING USING LOG(...)
- //
- // If malloc calls abort, it will be holding its lock.
- // If the handler tries to call malloc, it will deadlock.
+bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
VLOG(signals) << "Handling fault";
+
+#ifdef TEST_NESTED_SIGNAL
+ // Simulate a crash in a handler.
+ raise(SIGSEGV);
+#endif
+
if (IsInGeneratedCode(info, context, true)) {
VLOG(signals) << "in generated code, looking for handler";
for (const auto& handler : generated_code_handlers_) {
VLOG(signals) << "invoking Action on handler " << handler;
if (handler->Action(sig, info, context)) {
-#ifdef TEST_NESTED_SIGNAL
- // In test mode we want to fall through to stack trace handler
- // on every signal (in reality this will cause a crash on the first
- // signal).
- break;
-#else
// We have handled a signal so it's time to return from the
// signal handler to the appropriate place.
- return;
-#endif
+ return true;
}
}
// We hit a signal we didn't handle. This might be something for which
- // we can give more information about so call all registered handlers to see
- // if it is.
+ // we can give more information about so call all registered handlers to
+ // see if it is.
if (HandleFaultByOtherHandlers(sig, info, context)) {
- return;
+ return true;
}
}
// Set a breakpoint in this function to catch unhandled signals.
art_sigsegv_fault();
-
- // Pass this on to the next handler in the chain, or the default if none.
- InvokeUserSignalHandler(sig, info, context);
+ return false;
}
void FaultManager::AddHandler(FaultHandler* handler, bool generated_code) {
@@ -417,11 +257,7 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
-#ifdef TEST_NESTED_SIGNAL
- bool in_generated_code = true;
-#else
bool in_generated_code = manager_->IsInGeneratedCode(siginfo, context, false);
-#endif
if (in_generated_code) {
LOG(ERROR) << "Dumping java stack trace for crash in generated code";
ArtMethod* method = nullptr;
@@ -432,12 +268,6 @@ bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo,
manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
// Inside of generated code, sp[0] is the method, so sp is the frame.
self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
-#ifdef TEST_NESTED_SIGNAL
- // To test the nested signal handler we raise a signal here. This will cause the
- // nested signal handler to be called and perform a longjmp back to the setjmp
- // above.
- abort();
-#endif
self->DumpJavaStack(LOG_STREAM(ERROR));
}
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index 56e0fb78c1..d56cf17861 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -42,10 +42,9 @@ class FaultManager {
// Unclaim signals and delete registered handlers.
void Shutdown();
- void EnsureArtActionInFrontOfSignalChain();
- void HandleFault(int sig, siginfo_t* info, void* context);
- void HandleNestedSignal(int sig, siginfo_t* info, void* context);
+ // Try to handle a fault, returns true if successful.
+ bool HandleFault(int sig, siginfo_t* info, void* context);
// Added handlers are owned by the fault handler and will be freed on Shutdown().
void AddHandler(FaultHandler* handler, bool generated_code);
diff --git a/runtime/gc/accounting/read_barrier_table.h b/runtime/gc/accounting/read_barrier_table.h
index 86266e2500..e77a5b8e39 100644
--- a/runtime/gc/accounting/read_barrier_table.h
+++ b/runtime/gc/accounting/read_barrier_table.h
@@ -80,7 +80,7 @@ class ReadBarrierTable {
}
// This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
private:
static constexpr uint64_t kHeapCapacity = 4ULL * GB; // low 4gb.
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 1fa2d1ac8a..562fc750ed 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -141,7 +141,7 @@ class RosAlloc {
template<bool kUseTail = true>
class SlotFreeList {
public:
- SlotFreeList() : head_(0U), tail_(0), size_(0) {}
+ SlotFreeList() : head_(0U), tail_(0), size_(0), padding_(0) {}
Slot* Head() const {
return reinterpret_cast<Slot*>(head_);
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 8f9c187e1d..24ba52f0c5 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -72,12 +72,19 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
rb_mark_bit_stack_full_(false),
mark_stack_lock_("concurrent copying mark stack lock", kMarkSweepMarkStackLock),
thread_running_gc_(nullptr),
- is_marking_(false), is_active_(false), is_asserting_to_space_invariant_(false),
+ is_marking_(false),
+ is_active_(false),
+ is_asserting_to_space_invariant_(false),
region_space_bitmap_(nullptr),
- heap_mark_bitmap_(nullptr), live_stack_freeze_size_(0), mark_stack_mode_(kMarkStackModeOff),
+ heap_mark_bitmap_(nullptr),
+ live_stack_freeze_size_(0),
+ from_space_num_objects_at_first_pause_(0),
+ from_space_num_bytes_at_first_pause_(0),
+ mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
+ mark_from_read_barrier_measurements_(false),
rb_slow_path_ns_(0),
rb_slow_path_count_(0),
rb_slow_path_count_gc_(0),
@@ -87,6 +94,7 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
rb_slow_path_count_gc_total_(0),
rb_table_(heap_->GetReadBarrierTable()),
force_evacuate_all_(false),
+ gc_grays_immune_objects_(false),
immune_gray_stack_lock_("concurrent copying immune gray stack lock",
kMarkSweepMarkStackLock) {
static_assert(space::RegionSpace::kRegionSize == accounting::ReadBarrierTable::kRegionSize,
@@ -1644,10 +1652,10 @@ void ConcurrentCopying::ReclaimPhase() {
// Record freed objects.
TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
// Don't include thread-locals that are in the to-space.
- uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
- uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
- uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
- uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
+ const uint64_t from_bytes = region_space_->GetBytesAllocatedInFromSpace();
+ const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace();
+ const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace();
+ const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace();
uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent();
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
@@ -1658,8 +1666,18 @@ void ConcurrentCopying::ReclaimPhase() {
}
CHECK_LE(to_objects, from_objects);
CHECK_LE(to_bytes, from_bytes);
- int64_t freed_bytes = from_bytes - to_bytes;
- int64_t freed_objects = from_objects - to_objects;
+ // cleared_bytes and cleared_objects may be greater than the from space equivalents since
+ // ClearFromSpace may clear empty unevac regions.
+ uint64_t cleared_bytes;
+ uint64_t cleared_objects;
+ {
+ TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ CHECK_GE(cleared_bytes, from_bytes);
+ CHECK_GE(cleared_objects, from_objects);
+ }
+ int64_t freed_bytes = cleared_bytes - to_bytes;
+ int64_t freed_objects = cleared_objects - to_objects;
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
@@ -1678,11 +1696,6 @@ void ConcurrentCopying::ReclaimPhase() {
}
{
- TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace();
- }
-
- {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
Sweep(false);
SwapBitmaps();
@@ -2166,7 +2179,12 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
fall_back_to_non_moving = true;
to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
&non_moving_space_bytes_allocated, nullptr, &dummy);
- CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
+ if (UNLIKELY(to_ref == nullptr)) {
+ LOG(FATAL_WITHOUT_ABORT) << "Fall-back non-moving space allocation failed for a "
+ << obj_size << " byte object in region type "
+ << region_space_->GetRegionType(from_ref);
+ LOG(FATAL) << "Object address=" << from_ref << " type=" << from_ref->PrettyTypeOf();
+ }
bytes_allocated = non_moving_space_bytes_allocated;
// Mark it in the mark bitmap.
accounting::ContinuousSpaceBitmap* mark_bitmap =
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 00393881e9..c61f69dad3 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -52,8 +52,12 @@ void MarkCompact::BindBitmaps() {
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
+ mark_stack_(nullptr),
space_(nullptr),
+ mark_bitmap_(nullptr),
collector_name_(name_),
+ bump_pointer_(nullptr),
+ live_objects_in_space_(0),
updating_references_(false) {}
void MarkCompact::RunPhases() {
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index eef4fba20d..f0e1029f85 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -59,6 +59,8 @@ enum CollectorType {
kCollectorTypeHprof,
// Fake collector for installing/removing a system-weak holder.
kCollectorTypeAddRemoveSystemWeakHolder,
+ // Fake collector type for GetObjectsAllocated
+ kCollectorTypeGetObjectsAllocated,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 9e34346686..c1c1cad861 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -40,6 +40,7 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseJitCodeCache: return "JitCodeCache";
case kGcCauseAddRemoveSystemWeakHolder: return "SystemWeakHolder";
case kGcCauseHprof: return "Hprof";
+ case kGcCauseGetObjectsAllocated: return "ObjectsAllocated";
}
LOG(FATAL) << "Unreachable";
UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 9b285b12a4..eb27547768 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -53,8 +53,10 @@ enum GcCause {
kGcCauseJitCodeCache,
// Not a real GC cause, used to add or remove system-weak holders.
kGcCauseAddRemoveSystemWeakHolder,
- // Not a real GC cause, used to hprof running in the middle of GC.
+ // Not a real GC cause, used to prevent hprof running in the middle of GC.
kGcCauseHprof,
+ // Not a real GC cause, used to prevent GetObjectsAllocated running in the middle of GC.
+ kGcCauseGetObjectsAllocated,
};
const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 12b9701845..f04bc896f1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -18,13 +18,13 @@
#include <limits>
#include <memory>
-#include <unwind.h> // For GC verification.
#include <vector>
#include "android-base/stringprintf.h"
#include "allocation_listener.h"
#include "art_field-inl.h"
+#include "backtrace_helper.h"
#include "base/allocator.h"
#include "base/arena_allocator.h"
#include "base/dumpable.h"
@@ -1835,6 +1835,11 @@ void Heap::SetTargetHeapUtilization(float target) {
size_t Heap::GetObjectsAllocated() const {
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingForGetObjectsAllocated);
+ // Prevent GC running during GetObjectsALlocated since we may get a checkpoint request that tells
+ // us to suspend while we are doing SuspendAll. b/35232978
+ gc::ScopedGCCriticalSection gcs(Thread::Current(),
+ gc::kGcCauseGetObjectsAllocated,
+ gc::kCollectorTypeGetObjectsAllocated);
// Need SuspendAll here to prevent lock violation if RosAlloc does it during InspectAll.
ScopedSuspendAll ssa(__FUNCTION__);
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -3559,11 +3564,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
collector::GcType gc_type = collector_ran->GetGcType();
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- // Ensure at least 2.5 MB to temporarily fix excessive GC caused by TLAB ergonomics.
- const uint64_t adjusted_min_free = std::max(static_cast<uint64_t>(min_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
- const uint64_t adjusted_max_free = std::max(static_cast<uint64_t>(max_free_ * multiplier),
- static_cast<uint64_t>(5 * MB / 2));
+ const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
+ const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
@@ -4063,42 +4065,6 @@ void Heap::BroadcastForNewAllocationRecords() const {
}
}
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
-class StackCrawlState {
- public:
- StackCrawlState(uintptr_t* frames, size_t max_depth, size_t skip_count)
- : frames_(frames), frame_count_(0), max_depth_(max_depth), skip_count_(skip_count) {
- }
- size_t GetFrameCount() const {
- return frame_count_;
- }
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<StackCrawlState*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->frames_[state->frame_count_] = ip;
- state->frame_count_++;
- return state->frame_count_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
- private:
- uintptr_t* const frames_;
- size_t frame_count_;
- const size_t max_depth_;
- size_t skip_count_;
-};
-
-static size_t get_backtrace(uintptr_t* frames, size_t max_depth) {
- StackCrawlState state(frames, max_depth, 0u);
- _Unwind_Backtrace(&StackCrawlState::Callback, &state);
- return state.GetFrameCount();
-}
-
void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
auto* const runtime = Runtime::Current();
if (gc_stress_mode_ && runtime->GetClassLinker()->IsInitialized() &&
@@ -4107,13 +4073,9 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
bool new_backtrace = false;
{
static constexpr size_t kMaxFrames = 16u;
- uintptr_t backtrace[kMaxFrames];
- const size_t frames = get_backtrace(backtrace, kMaxFrames);
- uint64_t hash = 0;
- for (size_t i = 0; i < frames; ++i) {
- hash = hash * 2654435761 + backtrace[i];
- hash += (hash >> 13) ^ (hash << 6);
- }
+ FixedSizeBacktrace<kMaxFrames> backtrace;
+ backtrace.Collect(/* skip_frames */ 2);
+ uint64_t hash = backtrace.Hash();
MutexLock mu(self, *backtrace_lock_);
new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
if (new_backtrace) {
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index b5eb9795de..f937d2c778 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -29,10 +29,14 @@ ScopedGCCriticalSection::ScopedGCCriticalSection(Thread* self,
CollectorType collector_type)
: self_(self) {
Runtime::Current()->GetHeap()->StartGC(self, cause, collector_type);
- old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ if (self != nullptr) {
+ old_cause_ = self->StartAssertNoThreadSuspension("ScopedGCCriticalSection");
+ }
}
ScopedGCCriticalSection::~ScopedGCCriticalSection() {
- self_->EndAssertNoThreadSuspension(old_cause_);
+ if (self_ != nullptr) {
+ self_->EndAssertNoThreadSuspension(old_cause_);
+ }
Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 010ef1156a..662efe2c8d 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -886,7 +886,7 @@ class ImageSpaceLoader {
explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
template <typename T>
- T* operator()(T* obj) const {
+ T* operator()(T* obj, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
return ForwardObject(obj);
}
};
@@ -976,7 +976,8 @@ class ImageSpaceLoader {
ForwardObject(obj));
}
- void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj) const
+ NO_THREAD_SAFETY_ANALYSIS {
if (visited_->Test(obj)) {
// Already visited.
return;
@@ -1259,17 +1260,18 @@ class ImageSpaceLoader {
}
}
}
- ArtField** fields = dex_cache->GetResolvedFields();
+ mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
if (fields != nullptr) {
- ArtField** new_fields = fixup_adapter.ForwardObject(fields);
+ mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
if (fields != new_fields) {
dex_cache->SetResolvedFields(new_fields);
}
for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- ArtField* orig = mirror::DexCache::GetElementPtrSize(new_fields, j, pointer_size);
- ArtField* copy = fixup_adapter.ForwardObject(orig);
- if (orig != copy) {
- mirror::DexCache::SetElementPtrSize(new_fields, j, copy, pointer_size);
+ mirror::FieldDexCachePair orig =
+ mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
+ mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
+ if (orig.object != copy.object) {
+ mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
}
}
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 4c6b5bfadd..3988073de8 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,13 +16,12 @@
#include "large_object_space.h"
-#include <valgrind.h>
#include <memory>
-#include <memcheck/memcheck.h>
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
+#include "base/memory_tool.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
#include "image.h"
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5d282f1ae9..5809027235 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -78,7 +78,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
@@ -91,7 +91,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
@@ -233,14 +233,12 @@ void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
continue;
}
if (r->IsLarge()) {
- if (r->LiveBytes() > 0) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
- DCHECK(obj->GetClass() != nullptr);
- callback(obj, arg);
- }
+ // Avoid visiting dead large objects since they may contain dangling pointers to the
+ // from-space.
+ DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
+ DCHECK(obj->GetClass() != nullptr);
+ callback(obj, arg);
} else if (r->IsLargeTail()) {
// Do nothing.
} else {
@@ -314,13 +312,13 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_EQ(left + num_regs, right);
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
- first_reg->UnfreeLarge(time_);
+ first_reg->UnfreeLarge(this, time_);
++num_non_free_regions_;
first_reg->SetTop(first_reg->Begin() + num_bytes);
for (size_t p = left + 1; p < right; ++p) {
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
- regions_[p].UnfreeLargeTail(time_);
+ regions_[p].UnfreeLargeTail(this, time_);
++num_non_free_regions_;
}
*bytes_allocated = num_bytes;
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 321524cbbd..1ad48438ba 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -86,6 +86,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
num_regions_ = mem_map_size / kRegionSize;
num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
+ non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -192,7 +193,11 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
MutexLock mu(Thread::Current(), region_lock_);
size_t num_expected_large_tails = 0;
bool prev_large_evacuated = false;
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
Region* r = &regions_[i];
RegionState state = r->State();
RegionType type = r->Type();
@@ -236,18 +241,50 @@ void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool forc
}
}
}
+ DCHECK_EQ(num_expected_large_tails, 0U);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
-void RegionSpace::ClearFromSpace() {
+void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+ DCHECK(cleared_bytes != nullptr);
+ DCHECK(cleared_objects != nullptr);
+ *cleared_bytes = 0;
+ *cleared_objects = 0;
MutexLock mu(Thread::Current(), region_lock_);
- for (size_t i = 0; i < num_regions_; ++i) {
+ VerifyNonFreeRegionLimit();
+ size_t new_non_free_region_index_limit = 0;
+ for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Region* r = &regions_[i];
if (r->IsInFromSpace()) {
- r->Clear();
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
--num_non_free_regions_;
+ r->Clear();
} else if (r->IsInUnevacFromSpace()) {
+ if (r->LiveBytes() == 0) {
+ // Special case for 0 live bytes, this means all of the objects in the region are dead and
+ // we can clear it. This is important for large objects since we must not visit dead ones in
+ // RegionSpace::Walk because they may contain dangling references to invalid objects.
+ // It is also better to clear these regions now instead of at the end of the next GC to
+ // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
+ // live percent evacuation logic.
+ size_t free_regions = 1;
+ // Also release RAM for large tails.
+ while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
+ DCHECK(r->IsLarge());
+ regions_[i + free_regions].Clear();
+ ++free_regions;
+ }
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ num_non_free_regions_ -= free_regions;
+ r->Clear();
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ continue;
+ }
size_t full_count = 0;
while (r->IsInUnevacFromSpace()) {
Region* const cur = &regions_[i + full_count];
@@ -255,6 +292,7 @@ void RegionSpace::ClearFromSpace() {
cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) {
break;
}
+ DCHECK(cur->IsInUnevacFromSpace());
if (full_count != 0) {
cur->SetUnevacFromSpaceAsToSpace();
}
@@ -271,7 +309,15 @@ void RegionSpace::ClearFromSpace() {
i += full_count - 1;
}
}
+ // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
+ Region* last_checked_region = &regions_[i];
+ if (!last_checked_region->IsFree()) {
+ new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
+ last_checked_region->Idx() + 1);
+ }
}
+ // Update non_free_region_index_limit_.
+ SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
}
@@ -324,6 +370,7 @@ void RegionSpace::Clear() {
}
r->Clear();
}
+ SetNonFreeRegionLimit(0);
current_region_ = &full_region_;
evac_region_ = &full_region_;
}
@@ -390,7 +437,7 @@ bool RegionSpace::AllocNewTlab(Thread* self) {
for (size_t i = 0; i < num_regions_; ++i) {
Region* r = &regions_[i];
if (r->IsFree()) {
- r->Unfree(time_);
+ r->Unfree(this, time_);
++num_non_free_regions_;
r->SetNewlyAllocated();
r->SetTop(r->End());
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index da36f5c55d..253792993b 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -167,7 +167,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
// The region size.
- static constexpr size_t kRegionSize = 1 * MB;
+ static constexpr size_t kRegionSize = 256 * KB;
bool IsInFromSpace(mirror::Object* ref) {
if (HasAddress(ref)) {
@@ -215,7 +215,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace() REQUIRES(!region_lock_);
+ void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
@@ -308,25 +308,31 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
// Given a free region, declare it non-free (allocated).
- void Unfree(uint32_t alloc_time) {
+ void Unfree(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateAllocated;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLarge(uint32_t alloc_time) {
+ void UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLarge;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
- void UnfreeLargeTail(uint32_t alloc_time) {
+ void UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time)
+ REQUIRES(region_space->region_lock_) {
DCHECK(IsFree());
state_ = RegionState::kRegionStateLargeTail;
type_ = RegionType::kRegionTypeToSpace;
alloc_time_ = alloc_time;
+ region_space->AdjustNonFreeRegionLimit(idx_);
}
void SetNewlyAllocated() {
@@ -342,7 +348,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
bool IsLarge() const {
bool is_large = state_ == RegionState::kRegionStateLarge;
if (is_large) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
}
return is_large;
}
@@ -429,7 +435,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t ObjectsAllocated() const {
if (IsLarge()) {
- DCHECK_LT(begin_ + 1 * MB, Top());
+ DCHECK_LT(begin_ + kRegionSize, Top());
DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
return 1;
} else if (IsLargeTail()) {
@@ -520,6 +526,27 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
mirror::Object* GetNextObject(mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void AdjustNonFreeRegionLimit(size_t new_non_free_region_index) REQUIRES(region_lock_) {
+ DCHECK_LT(new_non_free_region_index, num_regions_);
+ non_free_region_index_limit_ = std::max(non_free_region_index_limit_,
+ new_non_free_region_index + 1);
+ VerifyNonFreeRegionLimit();
+ }
+
+ void SetNonFreeRegionLimit(size_t new_non_free_region_index_limit) REQUIRES(region_lock_) {
+ DCHECK_LE(new_non_free_region_index_limit, num_regions_);
+ non_free_region_index_limit_ = new_non_free_region_index_limit;
+ VerifyNonFreeRegionLimit();
+ }
+
+ void VerifyNonFreeRegionLimit() REQUIRES(region_lock_) {
+ if (kIsDebugBuild && non_free_region_index_limit_ < num_regions_) {
+ for (size_t i = non_free_region_index_limit_; i < num_regions_; ++i) {
+ CHECK(regions_[i].IsFree());
+ }
+ }
+ }
+
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
@@ -527,6 +554,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
size_t num_non_free_regions_; // The number of non-free regions in this space.
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+ // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
+ // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
+ // true.
+ size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
Region* current_region_; // The region that's being allocated currently.
Region* evac_region_; // The region that's being evacuated to currently.
Region full_region_; // The dummy/sentinel region that looks full.
diff --git a/runtime/image.cc b/runtime/image.cc
index 243051e3bd..b153ea0e02 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -25,7 +25,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '3', '9', '\0' }; // Enable string compression.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '3', '\0' }; // hash-based DexCache fields
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/imt_conflict_table.h b/runtime/imt_conflict_table.h
index fdd10fefc4..35868642e1 100644
--- a/runtime/imt_conflict_table.h
+++ b/runtime/imt_conflict_table.h
@@ -81,6 +81,14 @@ class ImtConflictTable {
return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size);
}
+ void** AddressOfInterfaceMethod(size_t index, PointerSize pointer_size) {
+ return AddressOfMethod(index * kMethodCount + kMethodInterface, pointer_size);
+ }
+
+ void** AddressOfImplementationMethod(size_t index, PointerSize pointer_size) {
+ return AddressOfMethod(index * kMethodCount + kMethodImplementation, pointer_size);
+ }
+
// Return true if two conflict tables are the same.
bool Equals(ImtConflictTable* other, PointerSize pointer_size) const {
size_t num = NumEntries(pointer_size);
@@ -169,6 +177,14 @@ class ImtConflictTable {
}
private:
+ void** AddressOfMethod(size_t index, PointerSize pointer_size) {
+ if (pointer_size == PointerSize::k64) {
+ return reinterpret_cast<void**>(&data64_[index]);
+ } else {
+ return reinterpret_cast<void**>(&data32_[index]);
+ }
+ }
+
ArtMethod* GetMethod(size_t index, PointerSize pointer_size) const {
if (pointer_size == PointerSize::k64) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index]));
diff --git a/runtime/imtable.h b/runtime/imtable.h
index b7066bd521..aa0a5043b5 100644
--- a/runtime/imtable.h
+++ b/runtime/imtable.h
@@ -37,9 +37,13 @@ class ImTable {
// (non-marker) interfaces.
static constexpr size_t kSize = IMT_SIZE;
+ uint8_t* AddressOfElement(size_t index, PointerSize pointer_size) {
+ return reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ }
+
ArtMethod* Get(size_t index, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
uint32_t value = *reinterpret_cast<uint32_t*>(ptr);
return reinterpret_cast<ArtMethod*>(value);
@@ -51,7 +55,7 @@ class ImTable {
void Set(size_t index, ArtMethod* method, PointerSize pointer_size) {
DCHECK_LT(index, kSize);
- uint8_t* ptr = reinterpret_cast<uint8_t*>(this) + OffsetOfElement(index, pointer_size);
+ uint8_t* ptr = AddressOfElement(index, pointer_size);
if (pointer_size == PointerSize::k32) {
uintptr_t value = reinterpret_cast<uintptr_t>(method);
DCHECK_EQ(static_cast<uint32_t>(value), value); // Check that we dont lose any non 0 bits.
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 1b3d339f36..bf49e84760 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -232,12 +232,6 @@ enum InterpreterImplKind {
kSwitchImplKind, // Switch-based interpreter implementation.
kMterpImplKind // Assembly interpreter
};
-static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
- os << ((rhs == kSwitchImplKind)
- ? "Switch-based interpreter"
- : "Asm interpreter");
- return os;
-}
static constexpr InterpreterImplKind kInterpreterImplKind = kMterpImplKind;
@@ -366,6 +360,14 @@ void EnterInterpreterFromInvoke(Thread* self,
return;
}
+ // This can happen if we are in forced interpreter mode and an obsolete method is called using
+ // reflection.
+ if (UNLIKELY(method->IsObsolete())) {
+ ThrowInternalError("Attempting to invoke obsolete version of '%s'.",
+ method->PrettyMethod().c_str());
+ return;
+ }
+
const char* old_cause = self->StartAssertNoThreadSuspension("EnterInterpreterFromInvoke");
const DexFile::CodeItem* code_item = method->GetCodeItem();
uint16_t num_regs;
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6b22af9829..2589ad046b 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
#include "interpreter.h"
+#include "interpreter_intrinsics.h"
#include <math.h>
@@ -104,13 +105,58 @@ void AbortTransactionV(Thread* self, const char* fmt, va_list args)
void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
-// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
-// DoInvokeVirtualQuick functions.
+// Invokes the given method. This is part of the invocation support and is used by DoInvoke,
+// DoFastInvoke and DoInvokeVirtualQuick functions.
// Returns true on success, otherwise throws an exception and returns false.
template<bool is_range, bool do_assignability_check>
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result);
+// Handles streamlined non-range invoke static, direct and virtual instructions originating in
+// mterp. Access checks and instrumentation other than jit profiling are not supported, but does
+// support interpreter intrinsics if applicable.
+// Returns true on success, otherwise throws an exception and returns false.
+template<InvokeType type>
+static inline bool DoFastInvoke(Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result) {
+ const uint32_t method_idx = inst->VRegB_35c();
+ const uint32_t vregC = inst->VRegC_35c();
+ ObjPtr<mirror::Object> receiver = (type == kStatic)
+ ? nullptr
+ : shadow_frame.GetVRegReference(vregC);
+ ArtMethod* sf_method = shadow_frame.GetMethod();
+ ArtMethod* const called_method = FindMethodFromCode<type, false>(
+ method_idx, &receiver, sf_method, self);
+ // The shadow frame should already be pushed, so we don't need to update it.
+ if (UNLIKELY(called_method == nullptr)) {
+ CHECK(self->IsExceptionPending());
+ result->SetJ(0);
+ return false;
+ } else if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError();
+ result->SetJ(0);
+ return false;
+ } else {
+ if (called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
+ shadow_frame.GetResultRegister())) {
+ return !self->IsExceptionPending();
+ }
+ }
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ if (type == kVirtual) {
+ jit->InvokeVirtualOrInterface(receiver, sf_method, shadow_frame.GetDexPC(), called_method);
+ }
+ jit->AddSamples(self, sf_method, 1, /*with_backedges*/false);
+ }
+ return DoCall<false, false>(called_method, self, shadow_frame, inst, inst_data, result);
+ }
+}
+
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
template<InvokeType type, bool is_range, bool do_access_check>
@@ -495,8 +541,9 @@ void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoInvoke<_type, _is_range, _do_check>(Thread* self, \
+ ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -514,6 +561,19 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface) // invoke-interface/range.
#undef EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL
+// Explicitly instantiate all DoFastInvoke functions.
+#define EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(_type) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
+ bool DoFastInvoke<_type>(Thread* self, \
+ ShadowFrame& shadow_frame, \
+ const Instruction* inst, uint16_t inst_data, \
+ JValue* result)
+
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kStatic); // invoke-static
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kDirect); // invoke-direct
+EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL(kVirtual); // invoke-virtual
+#undef EXPLICIT_DO_FAST_INVOKE_TEMPLATE_DECL
+
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
new file mode 100644
index 0000000000..869d43061b
--- /dev/null
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -0,0 +1,481 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "interpreter/interpreter_intrinsics.h"
+
+#include "compiler/intrinsics_enum.h"
+#include "dex_instruction.h"
+#include "interpreter/interpreter_common.h"
+
+namespace art {
+namespace interpreter {
+
+
+#define BINARY_INTRINSIC(name, op, get1, get2, set) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ result_register->set(op(shadow_frame->get1, shadow_frame->get2)); \
+ return true; \
+}
+
+#define BINARY_II_INTRINSIC(name, op, set) \
+ BINARY_INTRINSIC(name, op, GetVReg(arg[0]), GetVReg(arg[1]), set)
+
+#define BINARY_JJ_INTRINSIC(name, op, set) \
+ BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVRegLong(arg[2]), set)
+
+#define BINARY_JI_INTRINSIC(name, op, set) \
+ BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVReg(arg[2]), set)
+
+#define UNARY_INTRINSIC(name, op, get, set) \
+static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ result_register->set(op(shadow_frame->get(arg[0]))); \
+ return true; \
+}
+
+
+// java.lang.Integer.reverse(I)I
+UNARY_INTRINSIC(MterpIntegerReverse, ReverseBits32, GetVReg, SetI);
+
+// java.lang.Integer.reverseBytes(I)I
+UNARY_INTRINSIC(MterpIntegerReverseBytes, BSWAP, GetVReg, SetI);
+
+// java.lang.Integer.bitCount(I)I
+UNARY_INTRINSIC(MterpIntegerBitCount, POPCOUNT, GetVReg, SetI);
+
+// java.lang.Integer.compare(II)I
+BINARY_II_INTRINSIC(MterpIntegerCompare, Compare, SetI);
+
+// java.lang.Integer.highestOneBit(I)I
+UNARY_INTRINSIC(MterpIntegerHighestOneBit, HighestOneBitValue, GetVReg, SetI);
+
+// java.lang.Integer.LowestOneBit(I)I
+UNARY_INTRINSIC(MterpIntegerLowestOneBit, LowestOneBitValue, GetVReg, SetI);
+
+// java.lang.Integer.numberOfLeadingZeros(I)I
+UNARY_INTRINSIC(MterpIntegerNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVReg, SetI);
+
+// java.lang.Integer.numberOfTrailingZeros(I)I
+UNARY_INTRINSIC(MterpIntegerNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVReg, SetI);
+
+// java.lang.Integer.rotateRight(II)I
+BINARY_II_INTRINSIC(MterpIntegerRotateRight, (Rot<int32_t, false>), SetI);
+
+// java.lang.Integer.rotateLeft(II)I
+BINARY_II_INTRINSIC(MterpIntegerRotateLeft, (Rot<int32_t, true>), SetI);
+
+// java.lang.Integer.signum(I)I
+UNARY_INTRINSIC(MterpIntegerSignum, Signum, GetVReg, SetI);
+
+// java.lang.Long.reverse(I)I
+UNARY_INTRINSIC(MterpLongReverse, ReverseBits64, GetVRegLong, SetJ);
+
+// java.lang.Long.reverseBytes(J)J
+UNARY_INTRINSIC(MterpLongReverseBytes, BSWAP, GetVRegLong, SetJ);
+
+// java.lang.Long.bitCount(J)I
+UNARY_INTRINSIC(MterpLongBitCount, POPCOUNT, GetVRegLong, SetI);
+
+// java.lang.Long.compare(JJ)I
+BINARY_JJ_INTRINSIC(MterpLongCompare, Compare, SetI);
+
+// java.lang.Long.highestOneBit(J)J
+UNARY_INTRINSIC(MterpLongHighestOneBit, HighestOneBitValue, GetVRegLong, SetJ);
+
+// java.lang.Long.lowestOneBit(J)J
+UNARY_INTRINSIC(MterpLongLowestOneBit, LowestOneBitValue, GetVRegLong, SetJ);
+
+// java.lang.Long.numberOfLeadingZeros(J)I
+UNARY_INTRINSIC(MterpLongNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVRegLong, SetJ);
+
+// java.lang.Long.numberOfTrailingZeros(J)I
+UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
+
+// java.lang.Long.rotateRight(JI)J
+BINARY_JJ_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
+
+// java.lang.Long.rotateLeft(JI)J
+BINARY_JJ_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
+
+// java.lang.Long.signum(J)I
+UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
+
+// java.lang.Short.reverseBytes(S)S
+UNARY_INTRINSIC(MterpShortReverseBytes, BSWAP, GetVRegShort, SetS);
+
+// java.lang.Math.min(II)I
+BINARY_II_INTRINSIC(MterpMathMinIntInt, std::min, SetI);
+
+// java.lang.Math.min(JJ)J
+BINARY_JJ_INTRINSIC(MterpMathMinLongLong, std::min, SetJ);
+
+// java.lang.Math.max(II)I
+BINARY_II_INTRINSIC(MterpMathMaxIntInt, std::max, SetI);
+
+// java.lang.Math.max(JJ)J
+BINARY_JJ_INTRINSIC(MterpMathMaxLongLong, std::max, SetJ);
+
+// java.lang.Math.abs(I)I
+UNARY_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI);
+
+// java.lang.Math.abs(J)J
+UNARY_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ);
+
+// java.lang.Math.abs(F)F
+UNARY_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI);
+
+// java.lang.Math.abs(D)D
+UNARY_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ);
+
+// java.lang.Math.sqrt(D)D
+UNARY_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD);
+
+// java.lang.Math.ceil(D)D
+UNARY_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD);
+
+// java.lang.Math.floor(D)D
+UNARY_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD);
+
+// java.lang.Math.sin(D)D
+UNARY_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD);
+
+// java.lang.Math.cos(D)D
+UNARY_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD);
+
+// java.lang.Math.tan(D)D
+UNARY_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD);
+
+// java.lang.Math.asin(D)D
+UNARY_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD);
+
+// java.lang.Math.acos(D)D
+UNARY_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD);
+
+// java.lang.Math.atan(D)D
+UNARY_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD);
+
+// java.lang.String.charAt(I)C
+static ALWAYS_INLINE bool MterpStringCharAt(ShadowFrame* shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {};
+ inst->GetVarArgs(arg, inst_data);
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ int length = str->GetLength();
+ int index = shadow_frame->GetVReg(arg[1]);
+ uint16_t res;
+ if (UNLIKELY(index < 0) || (index >= length)) {
+ return false; // Punt and let non-intrinsic version deal with the throw.
+ }
+ if (str->IsCompressed()) {
+ res = str->GetValueCompressed()[index];
+ } else {
+ res = str->GetValue()[index];
+ }
+ result_register->SetC(res);
+ return true;
+}
+
+// java.lang.String.compareTo(Ljava/lang/string)I
+static ALWAYS_INLINE bool MterpStringCompareTo(ShadowFrame* shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {};
+ inst->GetVarArgs(arg, inst_data);
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ mirror::Object* arg1 = shadow_frame->GetVRegReference(arg[1]);
+ if (arg1 == nullptr) {
+ return false;
+ }
+ result_register->SetI(str->CompareTo(arg1->AsString()));
+ return true;
+}
+
+#define STRING_INDEXOF_INTRINSIC(name, starting_pos) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
+ int ch = shadow_frame->GetVReg(arg[1]); \
+ if (ch >= 0x10000) { \
+ /* Punt if supplementary char. */ \
+ return false; \
+ } \
+ result_register->SetI(str->FastIndexOf(ch, starting_pos)); \
+ return true; \
+}
+
+// java.lang.String.indexOf(I)I
+STRING_INDEXOF_INTRINSIC(StringIndexOf, 0);
+
+// java.lang.String.indexOf(II)I
+STRING_INDEXOF_INTRINSIC(StringIndexOfAfter, shadow_frame->GetVReg(arg[2]));
+
+#define SIMPLE_STRING_INTRINSIC(name, operation) \
+static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ JValue* result_register) \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
+ inst->GetVarArgs(arg, inst_data); \
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
+ result_register->operation; \
+ return true; \
+}
+
+// java.lang.String.isEmpty()Z
+SIMPLE_STRING_INTRINSIC(StringIsEmpty, SetZ(str->GetLength() == 0))
+
+// java.lang.String.length()I
+SIMPLE_STRING_INTRINSIC(StringLength, SetI(str->GetLength()))
+
+// java.lang.String.getCharsNoCheck(II[CI)V
+static ALWAYS_INLINE bool MterpStringGetCharsNoCheck(ShadowFrame* shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Start, end & index already checked by caller - won't throw. Destination is uncompressed.
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {};
+ inst->GetVarArgs(arg, inst_data);
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ int32_t start = shadow_frame->GetVReg(arg[1]);
+ int32_t end = shadow_frame->GetVReg(arg[2]);
+ int32_t index = shadow_frame->GetVReg(arg[4]);
+ mirror::CharArray* array = shadow_frame->GetVRegReference(arg[3])->AsCharArray();
+ uint16_t* dst = array->GetData() + index;
+ int32_t len = (end - start);
+ if (str->IsCompressed()) {
+ const uint8_t* src_8 = str->GetValueCompressed() + start;
+ for (int i = 0; i < len; i++) {
+ dst[i] = src_8[i];
+ }
+ } else {
+ uint16_t* src_16 = str->GetValue() + start;
+ memcpy(dst, src_16, len * sizeof(uint16_t));
+ }
+ return true;
+}
+
+// java.lang.String.equalsLjava/lang/Object;)Z
+static ALWAYS_INLINE bool MterpStringEquals(ShadowFrame* shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t arg[Instruction::kMaxVarArgRegs] = {};
+ inst->GetVarArgs(arg, inst_data);
+ mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString();
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg[1]);
+ bool res = false; // Assume not equal.
+ if ((obj != nullptr) && obj->IsString()) {
+ mirror::String* str2 = obj->AsString();
+ if (str->GetCount() == str2->GetCount()) {
+ // Length & compression status are same. Can use block compare.
+ void* bytes1;
+ void* bytes2;
+ int len = str->GetLength();
+ if (str->IsCompressed()) {
+ bytes1 = str->GetValueCompressed();
+ bytes2 = str2->GetValueCompressed();
+ } else {
+ len *= sizeof(uint16_t);
+ bytes1 = str->GetValue();
+ bytes2 = str2->GetValue();
+ }
+ res = (memcmp(bytes1, bytes2, len) == 0);
+ }
+ }
+ result_register->SetZ(res);
+ return true;
+}
+
+// Macro to help keep track of what's left to implement.
+#define UNIMPLEMENTED_CASE(name) \
+ case Intrinsics::k##name: \
+ res = false; \
+ break;
+
+#define INTRINSIC_CASE(name) \
+ case Intrinsics::k##name: \
+ res = Mterp##name(shadow_frame, inst, inst_data, result_register); \
+ break;
+
+bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
+ ArtMethod* const called_method,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Intrinsics intrinsic = static_cast<Intrinsics>(called_method->GetIntrinsic());
+ bool res = false; // Assume failure
+ switch (intrinsic) {
+ UNIMPLEMENTED_CASE(DoubleDoubleToRawLongBits /* (D)J */)
+ UNIMPLEMENTED_CASE(DoubleDoubleToLongBits /* (D)J */)
+ UNIMPLEMENTED_CASE(DoubleIsInfinite /* (D)Z */)
+ UNIMPLEMENTED_CASE(DoubleIsNaN /* (D)Z */)
+ UNIMPLEMENTED_CASE(DoubleLongBitsToDouble /* (J)D */)
+ UNIMPLEMENTED_CASE(FloatFloatToRawIntBits /* (F)I */)
+ UNIMPLEMENTED_CASE(FloatFloatToIntBits /* (F)I */)
+ UNIMPLEMENTED_CASE(FloatIsInfinite /* (F)Z */)
+ UNIMPLEMENTED_CASE(FloatIsNaN /* (F)Z */)
+ UNIMPLEMENTED_CASE(FloatIntBitsToFloat /* (I)F */)
+ INTRINSIC_CASE(IntegerReverse)
+ INTRINSIC_CASE(IntegerReverseBytes)
+ INTRINSIC_CASE(IntegerBitCount)
+ INTRINSIC_CASE(IntegerCompare)
+ INTRINSIC_CASE(IntegerHighestOneBit)
+ INTRINSIC_CASE(IntegerLowestOneBit)
+ INTRINSIC_CASE(IntegerNumberOfLeadingZeros)
+ INTRINSIC_CASE(IntegerNumberOfTrailingZeros)
+ INTRINSIC_CASE(IntegerRotateRight)
+ INTRINSIC_CASE(IntegerRotateLeft)
+ INTRINSIC_CASE(IntegerSignum)
+ INTRINSIC_CASE(LongReverse)
+ INTRINSIC_CASE(LongReverseBytes)
+ INTRINSIC_CASE(LongBitCount)
+ INTRINSIC_CASE(LongCompare)
+ INTRINSIC_CASE(LongHighestOneBit)
+ INTRINSIC_CASE(LongLowestOneBit)
+ INTRINSIC_CASE(LongNumberOfLeadingZeros)
+ INTRINSIC_CASE(LongNumberOfTrailingZeros)
+ INTRINSIC_CASE(LongRotateRight)
+ INTRINSIC_CASE(LongRotateLeft)
+ INTRINSIC_CASE(LongSignum)
+ INTRINSIC_CASE(ShortReverseBytes)
+ INTRINSIC_CASE(MathAbsDouble)
+ INTRINSIC_CASE(MathAbsFloat)
+ INTRINSIC_CASE(MathAbsLong)
+ INTRINSIC_CASE(MathAbsInt)
+ UNIMPLEMENTED_CASE(MathMinDoubleDouble /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathMinFloatFloat /* (FF)F */)
+ INTRINSIC_CASE(MathMinLongLong)
+ INTRINSIC_CASE(MathMinIntInt)
+ UNIMPLEMENTED_CASE(MathMaxDoubleDouble /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathMaxFloatFloat /* (FF)F */)
+ INTRINSIC_CASE(MathMaxLongLong)
+ INTRINSIC_CASE(MathMaxIntInt)
+ INTRINSIC_CASE(MathCos)
+ INTRINSIC_CASE(MathSin)
+ INTRINSIC_CASE(MathAcos)
+ INTRINSIC_CASE(MathAsin)
+ INTRINSIC_CASE(MathAtan)
+ UNIMPLEMENTED_CASE(MathAtan2 /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathCbrt /* (D)D */)
+ UNIMPLEMENTED_CASE(MathCosh /* (D)D */)
+ UNIMPLEMENTED_CASE(MathExp /* (D)D */)
+ UNIMPLEMENTED_CASE(MathExpm1 /* (D)D */)
+ UNIMPLEMENTED_CASE(MathHypot /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathLog /* (D)D */)
+ UNIMPLEMENTED_CASE(MathLog10 /* (D)D */)
+ UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathSinh /* (D)D */)
+ INTRINSIC_CASE(MathTan)
+ UNIMPLEMENTED_CASE(MathTanh /* (D)D */)
+ INTRINSIC_CASE(MathSqrt)
+ INTRINSIC_CASE(MathCeil)
+ INTRINSIC_CASE(MathFloor)
+ UNIMPLEMENTED_CASE(MathRint /* (D)D */)
+ UNIMPLEMENTED_CASE(MathRoundDouble /* (D)J */)
+ UNIMPLEMENTED_CASE(MathRoundFloat /* (F)I */)
+ UNIMPLEMENTED_CASE(SystemArrayCopyChar /* ([CI[CII)V */)
+ UNIMPLEMENTED_CASE(SystemArrayCopy /* (Ljava/lang/Object;ILjava/lang/Object;II)V */)
+ UNIMPLEMENTED_CASE(ThreadCurrentThread /* ()Ljava/lang/Thread; */)
+ UNIMPLEMENTED_CASE(MemoryPeekByte /* (J)B */)
+ UNIMPLEMENTED_CASE(MemoryPeekIntNative /* (J)I */)
+ UNIMPLEMENTED_CASE(MemoryPeekLongNative /* (J)J */)
+ UNIMPLEMENTED_CASE(MemoryPeekShortNative /* (J)S */)
+ UNIMPLEMENTED_CASE(MemoryPokeByte /* (JB)V */)
+ UNIMPLEMENTED_CASE(MemoryPokeIntNative /* (JI)V */)
+ UNIMPLEMENTED_CASE(MemoryPokeLongNative /* (JJ)V */)
+ UNIMPLEMENTED_CASE(MemoryPokeShortNative /* (JS)V */)
+ INTRINSIC_CASE(StringCharAt)
+ INTRINSIC_CASE(StringCompareTo)
+ INTRINSIC_CASE(StringEquals)
+ INTRINSIC_CASE(StringGetCharsNoCheck)
+ INTRINSIC_CASE(StringIndexOf)
+ INTRINSIC_CASE(StringIndexOfAfter)
+ UNIMPLEMENTED_CASE(StringStringIndexOf /* (Ljava/lang/String;)I */)
+ UNIMPLEMENTED_CASE(StringStringIndexOfAfter /* (Ljava/lang/String;I)I */)
+ INTRINSIC_CASE(StringIsEmpty)
+ INTRINSIC_CASE(StringLength)
+ UNIMPLEMENTED_CASE(StringNewStringFromBytes /* ([BIII)Ljava/lang/String; */)
+ UNIMPLEMENTED_CASE(StringNewStringFromChars /* (II[C)Ljava/lang/String; */)
+ UNIMPLEMENTED_CASE(StringNewStringFromString /* (Ljava/lang/String;)Ljava/lang/String; */)
+ UNIMPLEMENTED_CASE(StringBufferAppend /* (Ljava/lang/String;)Ljava/lang/StringBuffer; */)
+ UNIMPLEMENTED_CASE(StringBufferLength /* ()I */)
+ UNIMPLEMENTED_CASE(StringBufferToString /* ()Ljava/lang/String; */)
+ UNIMPLEMENTED_CASE(StringBuilderAppend /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */)
+ UNIMPLEMENTED_CASE(StringBuilderLength /* ()I */)
+ UNIMPLEMENTED_CASE(StringBuilderToString /* ()Ljava/lang/String; */)
+ UNIMPLEMENTED_CASE(UnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
+ UNIMPLEMENTED_CASE(UnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */)
+ UNIMPLEMENTED_CASE(UnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
+ UNIMPLEMENTED_CASE(UnsafeGet /* (Ljava/lang/Object;J)I */)
+ UNIMPLEMENTED_CASE(UnsafeGetVolatile /* (Ljava/lang/Object;J)I */)
+ UNIMPLEMENTED_CASE(UnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
+ UNIMPLEMENTED_CASE(UnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
+ UNIMPLEMENTED_CASE(UnsafeGetLong /* (Ljava/lang/Object;J)J */)
+ UNIMPLEMENTED_CASE(UnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */)
+ UNIMPLEMENTED_CASE(UnsafePut /* (Ljava/lang/Object;JI)V */)
+ UNIMPLEMENTED_CASE(UnsafePutOrdered /* (Ljava/lang/Object;JI)V */)
+ UNIMPLEMENTED_CASE(UnsafePutVolatile /* (Ljava/lang/Object;JI)V */)
+ UNIMPLEMENTED_CASE(UnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
+ UNIMPLEMENTED_CASE(UnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
+ UNIMPLEMENTED_CASE(UnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
+ UNIMPLEMENTED_CASE(UnsafePutLong /* (Ljava/lang/Object;JJ)V */)
+ UNIMPLEMENTED_CASE(UnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */)
+ UNIMPLEMENTED_CASE(UnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */)
+ UNIMPLEMENTED_CASE(UnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */)
+ UNIMPLEMENTED_CASE(UnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */)
+ UNIMPLEMENTED_CASE(UnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */)
+ UNIMPLEMENTED_CASE(UnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */)
+ UNIMPLEMENTED_CASE(UnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */)
+ UNIMPLEMENTED_CASE(UnsafeLoadFence /* ()V */)
+ UNIMPLEMENTED_CASE(UnsafeStoreFence /* ()V */)
+ UNIMPLEMENTED_CASE(UnsafeFullFence /* ()V */)
+ UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
+ UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
+ case Intrinsics::kNone:
+ res = false;
+ break;
+ // Note: no default case to ensure we catch any newly added intrinsics.
+ }
+ return res;
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/interpreter_intrinsics.h b/runtime/interpreter/interpreter_intrinsics.h
new file mode 100644
index 0000000000..2a23002d05
--- /dev/null
+++ b/runtime/interpreter/interpreter_intrinsics.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
+#define ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
+
+#include "jvalue.h"
+
+namespace art {
+
+class ArtMethod;
+class Instruction;
+class ShadowFrame;
+
+namespace interpreter {
+
+// Invokes to methods identified as intrinics are routed here. If there is
+// no interpreter implementation, return false and a normal invoke will proceed.
+bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
+ ArtMethod* const called_method,
+ const Instruction* inst,
+ uint16_t inst_data,
+ JValue* result_register);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 8bf094e1b8..a53040c2df 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -18,6 +18,7 @@
* Mterp entry point and support functions.
*/
#include "interpreter/interpreter_common.h"
+#include "interpreter/interpreter_intrinsics.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "mterp.h"
#include "debugger.h"
@@ -157,7 +158,7 @@ extern "C" size_t MterpInvokeVirtual(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kVirtual, false, false>(
+ return DoFastInvoke<kVirtual>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -190,7 +191,7 @@ extern "C" size_t MterpInvokeDirect(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kDirect, false, false>(
+ return DoFastInvoke<kDirect>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -201,7 +202,7 @@ extern "C" size_t MterpInvokeStatic(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
- return DoInvoke<kStatic, false, false>(
+ return DoFastInvoke<kStatic>(
self, *shadow_frame, inst, inst_data, result_register);
}
@@ -267,6 +268,18 @@ extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
+ const uint32_t vregC = inst->VRegC_35c();
+ const uint32_t vtable_idx = inst->VRegB_35c();
+ ObjPtr<mirror::Object> const receiver = shadow_frame->GetVRegReference(vregC);
+ if (receiver != nullptr) {
+ ArtMethod* const called_method = receiver->GetClass()->GetEmbeddedVTableEntry(
+ vtable_idx, kRuntimePointerSize);
+ if ((called_method != nullptr) && called_method->IsIntrinsic()) {
+ if (MterpHandleIntrinsic(shadow_frame, called_method, inst, inst_data, result_register)) {
+ return !self->IsExceptionPending();
+ }
+ }
+ }
return DoInvokeVirtualQuick<false>(
self, *shadow_frame, inst, inst_data, result_register);
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 80554c240d..70be30c22c 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1131,53 +1131,6 @@ void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits(
result->SetJ(bit_cast<int64_t, double>(in));
}
-static ObjPtr<mirror::Object> GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const DexFile* dex_file = dex_cache->GetDexFile();
- if (dex_file == nullptr) {
- return nullptr;
- }
-
- // Create the direct byte buffer.
- JNIEnv* env = self->GetJniEnv();
- DCHECK(env != nullptr);
- void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
- ScopedLocalRef<jobject> byte_buffer(env, env->NewDirectByteBuffer(address, dex_file->Size()));
- if (byte_buffer.get() == nullptr) {
- DCHECK(self->IsExceptionPending());
- return nullptr;
- }
-
- jvalue args[1];
- args[0].l = byte_buffer.get();
-
- ScopedLocalRef<jobject> dex(env, env->CallStaticObjectMethodA(
- WellKnownClasses::com_android_dex_Dex,
- WellKnownClasses::com_android_dex_Dex_create,
- args));
-
- return self->DecodeJObject(dex.get());
-}
-
-void UnstartedRuntime::UnstartedDexCacheGetDexNative(
- Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
- // We will create the Dex object, but the image writer will release it before creating the
- // art file.
- mirror::Object* src = shadow_frame->GetVRegReference(arg_offset);
- bool have_dex = false;
- if (src != nullptr) {
- ObjPtr<mirror::Object> dex = GetDexFromDexCache(self, src->AsDexCache());
- if (dex != nullptr) {
- have_dex = true;
- result->SetL(dex);
- }
- }
- if (!have_dex) {
- self->ClearException();
- Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Could not create Dex object");
- }
-}
-
static void UnstartedMemoryPeek(
Primitive::Type type, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
int64_t address = shadow_frame->GetVRegLong(arg_offset);
@@ -1336,12 +1289,14 @@ void UnstartedRuntime::UnstartedStringDoReplace(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
jchar old_c = shadow_frame->GetVReg(arg_offset + 1);
jchar new_c = shadow_frame->GetVReg(arg_offset + 2);
- ObjPtr<mirror::String> string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> string =
+ hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString());
if (string == nullptr) {
AbortTransactionOrFail(self, "String.replaceWithMatch with null object");
return;
}
- result->SetL(string->DoReplace(self, old_c, new_c));
+ result->SetL(mirror::String::DoReplace(self, string, old_c, new_c));
}
// This allows creating the new style of String objects during compilation.
@@ -1672,6 +1627,12 @@ void UnstartedRuntime::UnstartedMethodInvoke(
}
}
+void UnstartedRuntime::UnstartedSystemIdentityHashCode(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
+ result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
+}
void UnstartedRuntime::UnstartedJNIVMRuntimeNewUnpaddedArray(
Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
@@ -1836,13 +1797,6 @@ void UnstartedRuntime::UnstartedJNIThrowableNativeFillInStackTrace(
}
}
-void UnstartedRuntime::UnstartedJNISystemIdentityHashCode(
- Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
- mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result) {
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
- result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
-}
-
void UnstartedRuntime::UnstartedJNIByteOrderIsLittleEndian(
Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index e9435e466b..47910357d5 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -52,7 +52,6 @@
V(MathPow, "double java.lang.Math.pow(double, double)") \
V(ObjectHashCode, "int java.lang.Object.hashCode()") \
V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \
- V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \
V(MemoryPeekByte, "byte libcore.io.Memory.peekByte(long)") \
V(MemoryPeekShort, "short libcore.io.Memory.peekShortNative(long)") \
V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \
@@ -76,7 +75,8 @@
V(UnsafePutObjectVolatile, "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") \
V(UnsafePutOrderedObject, "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") \
V(IntegerParseInt, "int java.lang.Integer.parseInt(java.lang.String)") \
- V(LongParseLong, "long java.lang.Long.parseLong(java.lang.String)")
+ V(LongParseLong, "long java.lang.Long.parseLong(java.lang.String)") \
+ V(SystemIdentityHashCode, "int java.lang.System.identityHashCode(java.lang.Object)")
// Methods that are native.
#define UNSTARTED_RUNTIME_JNI_LIST(V) \
@@ -98,7 +98,6 @@
V(ArrayCreateMultiArray, "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") \
V(ArrayCreateObjectArray, "java.lang.Object java.lang.reflect.Array.createObjectArray(java.lang.Class, int)") \
V(ThrowableNativeFillInStackTrace, "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") \
- V(SystemIdentityHashCode, "int java.lang.System.identityHashCode(java.lang.Object)") \
V(ByteOrderIsLittleEndian, "boolean java.nio.ByteOrder.isLittleEndian()") \
V(UnsafeCompareAndSwapInt, "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") \
V(UnsafeGetIntVolatile, "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") \
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index db222fad6d..56e261cfc8 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -1367,5 +1367,26 @@ TEST_F(UnstartedRuntimeTest, ConstructorNewInstance0) {
ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
+TEST_F(UnstartedRuntimeTest, IdentityHashCode) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ JValue result;
+ UnstartedSystemIdentityHashCode(self, tmp, &result, 0);
+
+ EXPECT_EQ(0, result.GetI());
+ ASSERT_FALSE(self->IsExceptionPending());
+
+ ObjPtr<mirror::String> str = mirror::String::AllocFromModifiedUtf8(self, "abd");
+ tmp->SetVRegReference(0, str.Ptr());
+ UnstartedSystemIdentityHashCode(self, tmp, &result, 0);
+ EXPECT_NE(0, result.GetI());
+ EXPECT_EQ(str->IdentityHashCode(), result.GetI());
+ ASSERT_FALSE(self->IsExceptionPending());
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index a341cdb89f..b93b8f2a97 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -39,6 +39,7 @@
#include "runtime_options.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change-inl.h"
+#include "sigchain.h"
#include "thread-inl.h"
#include "thread_list.h"
@@ -900,7 +901,8 @@ bool JavaVMExt::LoadNativeLibrary(JNIEnv* env,
int version = (*jni_on_load)(this, nullptr);
if (runtime_->GetTargetSdkVersion() != 0 && runtime_->GetTargetSdkVersion() <= 21) {
- fault_manager.EnsureArtActionInFrontOfSignalChain();
+ // Make sure that sigchain owns SIGSEGV.
+ EnsureFrontOfChain(SIGSEGV);
}
self->SetClassLoaderOverride(old_class_loader.get());
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index af29468062..86af6d44db 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -203,8 +203,7 @@ struct JdwpState {
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -215,8 +214,7 @@ struct JdwpState {
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -225,22 +223,19 @@ struct JdwpState {
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- REQUIRES(!Locks::jdwp_event_list_lock_, !jdwp_token_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -264,7 +259,7 @@ struct JdwpState {
void SendRequest(ExpandBuf* pReq);
void ResetState()
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/* atomic ops to get next serial number */
@@ -273,7 +268,7 @@ struct JdwpState {
void Run()
REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
- !attach_lock_, !Locks::jdwp_event_list_lock_);
+ !attach_lock_, !event_list_lock_);
/*
* Register an event by adding it to the event list.
@@ -282,25 +277,25 @@ struct JdwpState {
* may discard its pointer after calling this.
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
- REQUIRES(!Locks::jdwp_event_list_lock_)
+ REQUIRES(!event_list_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -315,16 +310,16 @@ struct JdwpState {
ObjectId threadId)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(!Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- REQUIRES(Locks::jdwp_event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -392,8 +387,9 @@ struct JdwpState {
AtomicInteger event_serial_;
// Linked list of events requested by the debugger (breakpoints, class prep, etc).
- JdwpEvent* event_list_ GUARDED_BY(Locks::jdwp_event_list_lock_);
- size_t event_list_size_ GUARDED_BY(Locks::jdwp_event_list_lock_); // Number of elements in event_list_.
+ Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
+ JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
+ size_t event_list_size_ GUARDED_BY(event_list_lock_); // Number of elements in event_list_.
// Used to synchronize JDWP command handler thread and event threads so only one
// thread does JDWP stuff at a time. This prevent from interleaving command handling
@@ -414,7 +410,7 @@ struct JdwpState {
// When the runtime shuts down, it needs to stop JDWP command handler thread by closing the
// JDWP connection. However, if the JDWP thread is processing a command, it needs to wait
// for the command to finish so we can send its reply before closing the connection.
- Mutex shutdown_lock_ ACQUIRED_AFTER(Locks::jdwp_event_list_lock_);
+ Mutex shutdown_lock_ ACQUIRED_AFTER(event_list_lock_);
ConditionVariable shutdown_cond_ GUARDED_BY(shutdown_lock_);
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
index b13d565ec2..0aa04c10ca 100644
--- a/runtime/jdwp/jdwp_adb.cc
+++ b/runtime/jdwp/jdwp_adb.cc
@@ -227,7 +227,7 @@ bool JdwpAdbState::Accept() {
const int sleep_max_ms = 2*1000;
char buff[5];
- int sock = socket(PF_UNIX, SOCK_STREAM, 0);
+ int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
if (sock < 0) {
PLOG(ERROR) << "Could not create ADB control socket";
return false;
@@ -264,7 +264,7 @@ bool JdwpAdbState::Accept() {
* up after a few minutes in case somebody ships an app with
* the debuggable flag set.
*/
- int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
+ int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
if (!ret) {
int control_sock = ControlSock();
#ifdef ART_TARGET_ANDROID
@@ -278,7 +278,7 @@ bool JdwpAdbState::Accept() {
/* now try to send our pid to the ADB daemon */
ret = TEMP_FAILURE_RETRY(send(control_sock, buff, 4, 0));
- if (ret >= 0) {
+ if (ret == 4) {
VLOG(jdwp) << StringPrintf("PID sent as '%.*s' to ADB", 4, buff);
break;
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 36d733ea08..96249f9b58 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -237,7 +237,7 @@ JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
/*
* Add to list.
*/
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
if (event_list_ != nullptr) {
pEvent->next = event_list_;
event_list_->prev = pEvent;
@@ -256,7 +256,7 @@ void JdwpState::UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass) {
StackHandleScope<1> hs(Thread::Current());
Handle<mirror::Class> h_klass(hs.NewHandle(klass));
std::vector<JdwpEvent*> to_remove;
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
// Fill in the to_remove list
bool found_event = false;
@@ -356,7 +356,7 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
void JdwpState::UnregisterEventById(uint32_t requestId) {
bool found = false;
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
if (pEvent->requestId == requestId) {
@@ -383,7 +383,7 @@ void JdwpState::UnregisterEventById(uint32_t requestId) {
* Remove all entries from the event list.
*/
void JdwpState::UnregisterAll() {
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
JdwpEvent* pEvent = event_list_;
while (pEvent != nullptr) {
@@ -593,7 +593,7 @@ void JdwpState::FindMatchingEventsLocked(JdwpEventKind event_kind, const ModBask
*/
bool JdwpState::FindMatchingEvents(JdwpEventKind event_kind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list) {
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
match_list->reserve(event_list_size_);
FindMatchingEventsLocked(event_kind, basket, match_list);
return !match_list->empty();
@@ -908,7 +908,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
std::vector<JdwpEvent*> match_list;
{
// We use the locked version because we have multiple possible match events.
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
match_list.reserve(event_list_size_);
if ((eventFlags & Dbg::kBreakpoint) != 0) {
FindMatchingEventsLocked(EK_BREAKPOINT, basket, &match_list);
@@ -955,7 +955,7 @@ void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thi
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1041,7 +1041,7 @@ void JdwpState::PostFieldEvent(const EventLocation* pLoc, ArtField* field,
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1103,7 +1103,7 @@ void JdwpState::PostThreadChange(Thread* thread, bool start) {
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1213,7 +1213,7 @@ void JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable*
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
@@ -1295,7 +1295,7 @@ void JdwpState::PostClassPrepare(mirror::Class* klass) {
}
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CleanupMatchList(match_list);
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 64ed724afc..e6c60685cc 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -227,6 +227,7 @@ JdwpState::JdwpState(const JdwpOptions* options)
last_activity_time_ms_(0),
request_serial_(0x10000000),
event_serial_(0x20000000),
+ event_list_lock_("JDWP event list lock", kJdwpEventListLock),
event_list_(nullptr),
event_list_size_(0),
jdwp_token_lock_("JDWP token lock"),
@@ -238,6 +239,7 @@ JdwpState::JdwpState(const JdwpOptions* options)
shutdown_lock_("JDWP shutdown lock", kJdwpShutdownLock),
shutdown_cond_("JDWP shutdown condition variable", shutdown_lock_),
processing_request_(false) {
+ Locks::AddToExpectedMutexesOnWeakRefAccess(&event_list_lock_);
}
/*
@@ -330,7 +332,7 @@ void JdwpState::ResetState() {
UnregisterAll();
{
- MutexLock mu(Thread::Current(), *Locks::jdwp_event_list_lock_);
+ MutexLock mu(Thread::Current(), event_list_lock_);
CHECK(event_list_ == nullptr);
}
@@ -380,6 +382,8 @@ JdwpState::~JdwpState() {
CHECK(netState == nullptr);
ResetState();
+
+ Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&event_list_lock_);
}
/*
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index bd7251baeb..510f5f00a6 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -35,6 +35,11 @@ std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs) {
ObjectRegistry::ObjectRegistry()
: lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), next_id_(1) {
+ Locks::AddToExpectedMutexesOnWeakRefAccess(&lock_);
+}
+
+ObjectRegistry::~ObjectRegistry() {
+ Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&lock_);
}
JDWP::RefTypeId ObjectRegistry::AddRefType(ObjPtr<mirror::Class> c) {
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 9cacc66c32..8754631e1b 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -62,6 +62,7 @@ std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs);
class ObjectRegistry {
public:
ObjectRegistry();
+ ~ObjectRegistry();
JDWP::ObjectId Add(ObjPtr<mirror::Object> o)
REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 1ec4749146..3631a9d467 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -325,16 +325,12 @@ void Jit::DeleteThreadPool() {
}
void Jit::StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::vector<std::string>& code_paths) {
if (profile_saver_options_.IsEnabled()) {
ProfileSaver::Start(profile_saver_options_,
filename,
code_cache_.get(),
- code_paths,
- foreign_dex_profile_path,
- app_dir);
+ code_paths);
}
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index d566799340..4f5bebfbf9 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -136,14 +136,8 @@ class Jit {
// Starts the profile saver if the config options allow profile recording.
// The profile will be stored in the specified `filename` and will contain
// information collected from the given `code_paths` (a set of dex locations).
- // The `foreign_dex_profile_path` is the path where the saver will put the
- // profile markers for loaded dex files which are not owned by the application.
- // The `app_dir` is the application directory and is used to decide which
- // dex files belong to the application.
void StartProfileSaver(const std::string& filename,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
+ const std::vector<std::string>& code_paths);
void StopProfileSaver();
void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
@@ -285,6 +279,10 @@ class JitOptions {
code_cache_initial_capacity_(0),
code_cache_max_capacity_(0),
compile_threshold_(0),
+ warmup_threshold_(0),
+ osr_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0),
dump_info_on_shutdown_(false) {}
DISALLOW_COPY_AND_ASSIGN(JitOptions);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 62acedfb1b..e9a5ae5fa9 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -211,6 +211,7 @@ class ScopedCodeCacheWrite : ScopedTrace {
uint8_t* JitCodeCache::CommitCode(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
+ uint8_t* method_info,
uint8_t* roots_data,
size_t frame_size_in_bytes,
size_t core_spill_mask,
@@ -225,6 +226,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self,
uint8_t* result = CommitCodeInternal(self,
method,
stack_map,
+ method_info,
roots_data,
frame_size_in_bytes,
core_spill_mask,
@@ -242,6 +244,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self,
result = CommitCodeInternal(self,
method,
stack_map,
+ method_info,
roots_data,
frame_size_in_bytes,
core_spill_mask,
@@ -510,6 +513,7 @@ void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
+ uint8_t* method_info,
uint8_t* roots_data,
size_t frame_size_in_bytes,
size_t core_spill_mask,
@@ -547,6 +551,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
code_ptr - stack_map,
+ code_ptr - method_info,
frame_size_in_bytes,
core_spill_mask,
fp_spill_mask,
@@ -739,12 +744,14 @@ void JitCodeCache::ClearData(Thread* self,
size_t JitCodeCache::ReserveData(Thread* self,
size_t stack_map_size,
+ size_t method_info_size,
size_t number_of_roots,
ArtMethod* method,
uint8_t** stack_map_data,
+ uint8_t** method_info_data,
uint8_t** roots_data) {
size_t table_size = ComputeRootTableSize(number_of_roots);
- size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
+ size_t size = RoundUp(stack_map_size + method_info_size + table_size, sizeof(void*));
uint8_t* result = nullptr;
{
@@ -774,11 +781,13 @@ size_t JitCodeCache::ReserveData(Thread* self,
if (result != nullptr) {
*roots_data = result;
*stack_map_data = result + table_size;
+ *method_info_data = *stack_map_data + stack_map_size;
FillRootTableLength(*roots_data, number_of_roots);
return size;
} else {
*roots_data = nullptr;
*stack_map_data = nullptr;
+ *method_info_data = nullptr;
return 0;
}
}
@@ -1262,12 +1271,23 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
std::vector<ProfileMethodInfo::ProfileClassReference> profile_classes;
const InlineCache& cache = info->cache_[i];
+ ArtMethod* caller = info->GetMethod();
+ bool is_missing_types = false;
for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
mirror::Class* cls = cache.classes_[k].Read();
if (cls == nullptr) {
break;
}
+ // Check if the receiver is in the boot class path or if it's in the
+ // same class loader as the caller. If not, skip it, as there is not
+ // much we can do during AOT.
+ if (!cls->IsBootStrapClassLoaded() &&
+ caller->GetClassLoader() != cls->GetClassLoader()) {
+ is_missing_types = true;
+ continue;
+ }
+
const DexFile* class_dex_file = nullptr;
dex::TypeIndex type_index;
@@ -1284,17 +1304,20 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
if (!type_index.IsValid()) {
// Could be a proxy class or an array for which we couldn't find the type index.
+ is_missing_types = true;
continue;
}
if (ContainsElement(dex_base_locations, class_dex_file->GetBaseLocation())) {
// Only consider classes from the same apk (including multidex).
profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
class_dex_file, type_index);
+ } else {
+ is_missing_types = true;
}
}
if (!profile_classes.empty()) {
inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
- cache.dex_pc_, profile_classes);
+ cache.dex_pc_, is_missing_types, profile_classes);
}
}
methods.emplace_back(/*ProfileMethodInfo*/
@@ -1343,7 +1366,10 @@ ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self)
MutexLock mu(self, lock_);
ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
if (info != nullptr) {
- info->IncrementInlineUse();
+ if (!info->IncrementInlineUse()) {
+ // Overflow of inlining uses, just bail.
+ return nullptr;
+ }
}
return info;
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index c970979eaa..db214e7983 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -105,6 +105,7 @@ class JitCodeCache {
uint8_t* CommitCode(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
+ uint8_t* method_info,
uint8_t* roots_data,
size_t frame_size_in_bytes,
size_t core_spill_mask,
@@ -129,10 +130,12 @@ class JitCodeCache {
// for storing `number_of_roots` roots. Returns null if there is no more room.
// Return the number of bytes allocated.
size_t ReserveData(Thread* self,
- size_t size,
+ size_t stack_map_size,
+ size_t method_info_size,
size_t number_of_roots,
ArtMethod* method,
uint8_t** stack_map_data,
+ uint8_t** method_info_data,
uint8_t** roots_data)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
@@ -249,6 +252,7 @@ class JitCodeCache {
uint8_t* CommitCodeInternal(Thread* self,
ArtMethod* method,
uint8_t* stack_map,
+ uint8_t* method_info,
uint8_t* roots_data,
size_t frame_size_in_bytes,
size_t core_spill_mask,
diff --git a/runtime/jit/profile_compilation_info.cc b/runtime/jit/profile_compilation_info.cc
index 627cc93f38..24ea27529a 100644
--- a/runtime/jit/profile_compilation_info.cc
+++ b/runtime/jit/profile_compilation_info.cc
@@ -37,7 +37,9 @@
namespace art {
const uint8_t ProfileCompilationInfo::kProfileMagic[] = { 'p', 'r', 'o', '\0' };
-const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '3', '\0' }; // inline caches
+// Last profile version: fix profman merges. Update profile version to force
+// regeneration of possibly faulty profiles.
+const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '0', '5', '\0' };
static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
@@ -46,16 +48,27 @@ static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
// using the same test profile.
static constexpr bool kDebugIgnoreChecksum = false;
-static constexpr uint8_t kMegamorphicEncoding = 7;
+static constexpr uint8_t kIsMissingTypesEncoding = 6;
+static constexpr uint8_t kIsMegamorphicEncoding = 7;
static_assert(sizeof(InlineCache::kIndividualCacheSize) == sizeof(uint8_t),
"InlineCache::kIndividualCacheSize does not have the expect type size");
-static_assert(InlineCache::kIndividualCacheSize < kMegamorphicEncoding,
+static_assert(InlineCache::kIndividualCacheSize < kIsMegamorphicEncoding,
"InlineCache::kIndividualCacheSize is larger than expected");
+static_assert(InlineCache::kIndividualCacheSize < kIsMissingTypesEncoding,
+ "InlineCache::kIndividualCacheSize is larger than expected");
+
+ProfileCompilationInfo::ProfileCompilationInfo(const ProfileCompilationInfo& pci) {
+ MergeWith(pci);
+}
+
+ProfileCompilationInfo::~ProfileCompilationInfo() {
+ ClearProfile();
+}
void ProfileCompilationInfo::DexPcData::AddClass(uint16_t dex_profile_idx,
const dex::TypeIndex& type_idx) {
- if (is_megamorphic) {
+ if (is_megamorphic || is_missing_types) {
return;
}
classes.emplace(dex_profile_idx, type_idx);
@@ -206,7 +219,8 @@ static constexpr size_t kLineHeaderSize =
* Classes are grouped per their dex files and the line
* `dex_profile_index,class_id1,class_id2...,dex_profile_index2,...` encodes the
* mapping from `dex_profile_index` to the set of classes `class_id1,class_id2...`
- * M stands for megamorphic and it's encoded as the byte kMegamorphicEncoding.
+ * M stands for megamorphic or missing types and it's encoded as either
+ * the byte kIsMegamorphicEncoding or kIsMissingTypesEncoding.
* When present, there will be no class ids following.
**/
bool ProfileCompilationInfo::Save(int fd) {
@@ -222,20 +236,21 @@ bool ProfileCompilationInfo::Save(int fd) {
DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
AddUintToBuffer(&buffer, static_cast<uint8_t>(info_.size()));
- for (const auto& it : info_) {
+ // Dex files must be written in the order of their profile index. This
+ // avoids writing the index in the output file and simplifies the parsing logic.
+ for (const DexFileData* dex_data_ptr : info_) {
+ const DexFileData& dex_data = *dex_data_ptr;
if (buffer.size() > kMaxSizeToKeepBeforeWriting) {
if (!WriteBuffer(fd, buffer.data(), buffer.size())) {
return false;
}
buffer.clear();
}
- const std::string& dex_location = it.first;
- const DexFileData& dex_data = it.second;
// Note that we allow dex files without any methods or classes, so that
// inline caches can refer valid dex files.
- if (dex_location.size() >= kMaxDexFileKeyLength) {
+ if (dex_data.profile_key.size() >= kMaxDexFileKeyLength) {
LOG(WARNING) << "DexFileKey exceeds allocated limit";
return false;
}
@@ -245,19 +260,19 @@ bool ProfileCompilationInfo::Save(int fd) {
uint32_t methods_region_size = GetMethodsRegionSize(dex_data);
size_t required_capacity = buffer.size() +
kLineHeaderSize +
- dex_location.size() +
+ dex_data.profile_key.size() +
sizeof(uint16_t) * dex_data.class_set.size() +
methods_region_size;
buffer.reserve(required_capacity);
- DCHECK_LE(dex_location.size(), std::numeric_limits<uint16_t>::max());
+ DCHECK_LE(dex_data.profile_key.size(), std::numeric_limits<uint16_t>::max());
DCHECK_LE(dex_data.class_set.size(), std::numeric_limits<uint16_t>::max());
- AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_location.size()));
+ AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.profile_key.size()));
AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
AddUintToBuffer(&buffer, methods_region_size); // uint32_t
AddUintToBuffer(&buffer, dex_data.checksum); // uint32_t
- AddStringToBuffer(&buffer, dex_location);
+ AddStringToBuffer(&buffer, dex_data.profile_key);
for (const auto& method_it : dex_data.method_map) {
AddUintToBuffer(&buffer, method_it.first);
@@ -289,10 +304,19 @@ void ProfileCompilationInfo::AddInlineCacheToBuffer(std::vector<uint8_t>* buffer
// Add the dex pc.
AddUintToBuffer(buffer, dex_pc);
- if (dex_pc_data.is_megamorphic) {
- // Add the megamorphic encoding if needed and continue.
- // If megamorphic, we don't add the rest of the classes.
- AddUintToBuffer(buffer, kMegamorphicEncoding);
+ // Add the megamorphic/missing_types encoding if needed and continue.
+ // In either cases we don't add any classes to the profiles and so there's
+ // no point to continue.
+ // TODO(calin): in case we miss types there is still value to add the
+ // rest of the classes. They can be added without bumping the profile version.
+ if (dex_pc_data.is_missing_types) {
+ DCHECK(!dex_pc_data.is_megamorphic); // at this point the megamorphic flag should not be set.
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMissingTypesEncoding);
+ continue;
+ } else if (dex_pc_data.is_megamorphic) {
+ DCHECK_EQ(classes.size(), 0u);
+ AddUintToBuffer(buffer, kIsMegamorphicEncoding);
continue;
}
@@ -353,23 +377,52 @@ void ProfileCompilationInfo::GroupClassesByDex(
}
ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::GetOrAddDexFileData(
- const std::string& dex_location,
+ const std::string& profile_key,
uint32_t checksum) {
- auto info_it = info_.FindOrAdd(dex_location, DexFileData(checksum, info_.size()));
- if (info_.size() > std::numeric_limits<uint8_t>::max()) {
+ const auto& profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
+ if (profile_key_map_.size() > std::numeric_limits<uint8_t>::max()) {
// Allow only 255 dex files to be profiled. This allows us to save bytes
// when encoding. The number is well above what we expect for normal applications.
if (kIsDebugBuild) {
- LOG(WARNING) << "Exceeded the maximum number of dex files (255). Something went wrong";
+ LOG(ERROR) << "Exceeded the maximum number of dex files (255). Something went wrong";
}
- info_.erase(dex_location);
+ profile_key_map_.erase(profile_key);
+ return nullptr;
+ }
+
+ uint8_t profile_index = profile_index_it->second;
+ if (info_.size() <= profile_index) {
+ // This is a new addition. Add it to the info_ array.
+ info_.emplace_back(new DexFileData(profile_key, checksum, profile_index));
+ }
+ DexFileData* result = info_[profile_index];
+ // DCHECK that profile info map key is consistent with the one stored in the dex file data.
+ // This should always be the case since since the cache map is managed by ProfileCompilationInfo.
+ DCHECK_EQ(profile_key, result->profile_key);
+ DCHECK_EQ(profile_index, result->profile_index);
+
+ // Check that the checksum matches.
+ // This may different if for example the dex file was updated and
+ // we had a record of the old one.
+ if (result->checksum != checksum) {
+ LOG(WARNING) << "Checksum mismatch for dex " << profile_key;
return nullptr;
}
- if (info_it->second.checksum != checksum) {
- LOG(WARNING) << "Checksum mismatch for dex " << dex_location;
+ return result;
+}
+
+const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
+ const std::string& profile_key) const {
+ const auto& profile_index_it = profile_key_map_.find(profile_key);
+ if (profile_index_it == profile_key_map_.end()) {
return nullptr;
}
- return &info_it->second;
+
+ uint8_t profile_index = profile_index_it->second;
+ const DexFileData* result = info_[profile_index];
+ DCHECK_EQ(profile_key, result->profile_key);
+ DCHECK_EQ(profile_index, result->profile_index);
+ return result;
}
bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
@@ -393,9 +446,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t method_index,
const OfflineProfileMethodInfo& pmi) {
- DexFileData* const data = GetOrAddDexFileData(
- GetProfileDexFileKey(dex_location),
- dex_checksum);
+ DexFileData* const data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location), dex_checksum);
if (data == nullptr) { // checksum mismatch
return false;
}
@@ -403,11 +454,21 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
for (const auto& pmi_inline_cache_it : pmi.inline_caches) {
uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
- auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc);
+ DexPcData& dex_pc_data = inline_cache_it->second.FindOrAdd(pmi_ic_dex_pc)->second;
+ if (dex_pc_data.is_missing_types || dex_pc_data.is_megamorphic) {
+ // We are already megamorphic or we are missing types; no point in going forward.
+ continue;
+ }
+
+ if (pmi_ic_dex_pc_data.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ continue;
+ }
if (pmi_ic_dex_pc_data.is_megamorphic) {
- dex_pc_data_it->second.SetMegamorphic();
+ dex_pc_data.SetIsMegamorphic();
continue;
}
+
for (const ClassReference& class_ref : pmi_ic_dex_pc_data.classes) {
const DexReference& dex_ref = pmi.dex_references[class_ref.dex_profile_index];
DexFileData* class_dex_data = GetOrAddDexFileData(
@@ -416,7 +477,7 @@ bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
if (class_dex_data == nullptr) { // checksum mismatch
return false;
}
- dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
return true;
@@ -432,6 +493,11 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
auto inline_cache_it = data->method_map.FindOrAdd(pmi.dex_method_index);
for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
+ if (cache.is_missing_types) {
+ auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
for (const ProfileMethodInfo::ProfileClassReference& class_ref : cache.classes) {
DexFileData* class_dex_data = GetOrAddDexFileData(
GetProfileDexFileKey(class_ref.dex_file->GetLocation()),
@@ -440,6 +506,10 @@ bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi) {
return false;
}
auto dex_pc_data_it = inline_cache_it->second.FindOrAdd(cache.dex_pc);
+ if (dex_pc_data_it->second.is_missing_types) {
+ // Don't bother adding classes if we are missing types.
+ break;
+ }
dex_pc_data_it->second.AddClass(class_dex_data->profile_index, class_ref.type_index);
}
}
@@ -457,13 +527,13 @@ bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
return true;
}
-#define READ_UINT(type, buffer, dest, error) \
- do { \
- if (!buffer.ReadUintAndAdvance<type>(&dest)) { \
- *error = "Could not read "#dest; \
- return false; \
- } \
- } \
+#define READ_UINT(type, buffer, dest, error) \
+ do { \
+ if (!(buffer).ReadUintAndAdvance<type>(&(dest))) { \
+ *(error) = "Could not read "#dest; \
+ return false; \
+ } \
+ } \
while (false)
bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
@@ -478,8 +548,12 @@ bool ProfileCompilationInfo::ReadInlineCache(SafeBuffer& buffer,
READ_UINT(uint16_t, buffer, dex_pc, error);
READ_UINT(uint8_t, buffer, dex_to_classes_map_size, error);
auto dex_pc_data_it = inline_cache->FindOrAdd(dex_pc);
- if (dex_to_classes_map_size == kMegamorphicEncoding) {
- dex_pc_data_it->second.SetMegamorphic();
+ if (dex_to_classes_map_size == kIsMissingTypesEncoding) {
+ dex_pc_data_it->second.SetIsMissingTypes();
+ continue;
+ }
+ if (dex_to_classes_map_size == kIsMegamorphicEncoding) {
+ dex_pc_data_it->second.SetIsMegamorphic();
continue;
}
for (; dex_to_classes_map_size > 0; dex_to_classes_map_size--) {
@@ -708,6 +782,8 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine
return kProfileLoadSuccess;
}
+// TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
+// return a unique pointer to a ProfileCompilationInfo upon success.
bool ProfileCompilationInfo::Load(int fd) {
std::string error;
ProfileLoadSatus status = LoadInternal(fd, &error);
@@ -725,6 +801,10 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::LoadInternal(
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK_GE(fd, 0);
+ if (!IsEmpty()) {
+ return kProfileLoadWouldOverwiteData;
+ }
+
struct stat stat_buffer;
if (fstat(fd, &stat_buffer) != 0) {
return kProfileLoadIOError;
@@ -775,10 +855,10 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
// the current profile info.
// Note that the number of elements should be very small, so this should not
// be a performance issue.
- for (const auto& other_it : other.info_) {
- auto info_it = info_.find(other_it.first);
- if ((info_it != info_.end()) && (info_it->second.checksum != other_it.second.checksum)) {
- LOG(WARNING) << "Checksum mismatch for dex " << other_it.first;
+ for (const DexFileData* other_dex_data : other.info_) {
+ const DexFileData* dex_data = FindDexData(other_dex_data->profile_key);
+ if ((dex_data != nullptr) && (dex_data->checksum != other_dex_data->checksum)) {
+ LOG(WARNING) << "Checksum mismatch for dex " << other_dex_data->profile_key;
return false;
}
}
@@ -795,36 +875,37 @@ bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other) {
// First, build a mapping from other_dex_profile_index to this_dex_profile_index.
// This will make sure that the ClassReferences will point to the correct dex file.
SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
- for (const auto& other_it : other.info_) {
- const std::string& other_dex_location = other_it.first;
- const DexFileData& other_dex_data = other_it.second;
- auto info_it = info_.FindOrAdd(other_dex_location, DexFileData(other_dex_data.checksum, 0));
- const DexFileData& dex_data = info_it->second;
- dex_profile_index_remap.Put(other_dex_data.profile_index, dex_data.profile_index);
+ for (const DexFileData* other_dex_data : other.info_) {
+ const DexFileData* dex_data = GetOrAddDexFileData(other_dex_data->profile_key,
+ other_dex_data->checksum);
+ if (dex_data == nullptr) {
+ return false; // Could happen if we exceed the number of allowed dex files.
+ }
+ dex_profile_index_remap.Put(other_dex_data->profile_index, dex_data->profile_index);
}
// Merge the actual profile data.
- for (const auto& other_it : other.info_) {
- const std::string& other_dex_location = other_it.first;
- const DexFileData& other_dex_data = other_it.second;
- auto info_it = info_.find(other_dex_location);
- DCHECK(info_it != info_.end());
+ for (const DexFileData* other_dex_data : other.info_) {
+ DexFileData* dex_data = const_cast<DexFileData*>(FindDexData(other_dex_data->profile_key));
+ DCHECK(dex_data != nullptr);
// Merge the classes.
- info_it->second.class_set.insert(other_dex_data.class_set.begin(),
- other_dex_data.class_set.end());
+ dex_data->class_set.insert(other_dex_data->class_set.begin(),
+ other_dex_data->class_set.end());
// Merge the methods and the inline caches.
- for (const auto& other_method_it : other_dex_data.method_map) {
+ for (const auto& other_method_it : other_dex_data->method_map) {
uint16_t other_method_index = other_method_it.first;
- auto method_it = info_it->second.method_map.FindOrAdd(other_method_index);
+ auto method_it = dex_data->method_map.FindOrAdd(other_method_index);
const auto& other_inline_cache = other_method_it.second;
for (const auto& other_ic_it : other_inline_cache) {
uint16_t other_dex_pc = other_ic_it.first;
const ClassSet& other_class_set = other_ic_it.second.classes;
auto class_set = method_it->second.FindOrAdd(other_dex_pc);
- if (other_ic_it.second.is_megamorphic) {
- class_set->second.SetMegamorphic();
+ if (other_ic_it.second.is_missing_types) {
+ class_set->second.SetIsMissingTypes();
+ } else if (other_ic_it.second.is_megamorphic) {
+ class_set->second.SetIsMegamorphic();
} else {
for (const auto& class_it : other_class_set) {
class_set->second.AddClass(dex_profile_index_remap.Get(
@@ -855,28 +936,18 @@ const ProfileCompilationInfo::InlineCacheMap*
ProfileCompilationInfo::FindMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t dex_method_index) const {
- auto info_it = info_.find(GetProfileDexFileKey(dex_location));
- if (info_it != info_.end()) {
- if (!ChecksumMatch(dex_checksum, info_it->second.checksum)) {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location));
+ if (dex_data != nullptr) {
+ if (!ChecksumMatch(dex_checksum, dex_data->checksum)) {
return nullptr;
}
- const MethodMap& methods = info_it->second.method_map;
+ const MethodMap& methods = dex_data->method_map;
const auto method_it = methods.find(dex_method_index);
return method_it == methods.end() ? nullptr : &(method_it->second);
}
return nullptr;
}
-void ProfileCompilationInfo::DexFileToProfileIndex(
- /*out*/std::vector<DexReference>* dex_references) const {
- dex_references->resize(info_.size());
- for (const auto& info_it : info_) {
- DexReference& dex_ref = (*dex_references)[info_it.second.profile_index];
- dex_ref.dex_location = info_it.first;
- dex_ref.dex_checksum = info_it.second.checksum;
- }
-}
-
bool ProfileCompilationInfo::GetMethod(const std::string& dex_location,
uint32_t dex_checksum,
uint16_t dex_method_index,
@@ -886,7 +957,12 @@ bool ProfileCompilationInfo::GetMethod(const std::string& dex_location,
return false;
}
- DexFileToProfileIndex(&pmi->dex_references);
+ pmi->dex_references.resize(info_.size());
+ for (const DexFileData* dex_data : info_) {
+ pmi->dex_references[dex_data->profile_index].dex_location = dex_data->profile_key;
+ pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
+ }
+
// TODO(calin): maybe expose a direct pointer to avoid copying
pmi->inline_caches = *inline_caches;
return true;
@@ -894,12 +970,12 @@ bool ProfileCompilationInfo::GetMethod(const std::string& dex_location,
bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
- auto info_it = info_.find(GetProfileDexFileKey(dex_file.GetLocation()));
- if (info_it != info_.end()) {
- if (!ChecksumMatch(dex_file, info_it->second.checksum)) {
+ const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_file.GetLocation()));
+ if (dex_data != nullptr) {
+ if (!ChecksumMatch(dex_file, dex_data->checksum)) {
return false;
}
- const std::set<dex::TypeIndex>& classes = info_it->second.class_set;
+ const std::set<dex::TypeIndex>& classes = dex_data->class_set;
return classes.find(type_idx) != classes.end();
}
return false;
@@ -907,16 +983,16 @@ bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeInd
uint32_t ProfileCompilationInfo::GetNumberOfMethods() const {
uint32_t total = 0;
- for (const auto& it : info_) {
- total += it.second.method_map.size();
+ for (const DexFileData* dex_data : info_) {
+ total += dex_data->method_map.size();
}
return total;
}
uint32_t ProfileCompilationInfo::GetNumberOfResolvedClasses() const {
uint32_t total = 0;
- for (const auto& it : info_) {
- total += it.second.class_set.size();
+ for (const DexFileData* dex_data : info_) {
+ total += dex_data->class_set.size();
}
return total;
}
@@ -949,27 +1025,27 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "ProfileInfo:";
const std::string kFirstDexFileKeySubstitute = ":classes.dex";
- for (const auto& it : info_) {
+
+ for (const DexFileData* dex_data : info_) {
os << "\n";
- const std::string& location = it.first;
- const DexFileData& dex_data = it.second;
if (print_full_dex_location) {
- os << location;
+ os << dex_data->profile_key;
} else {
// Replace the (empty) multidex suffix of the first key with a substitute for easier reading.
- std::string multidex_suffix = DexFile::GetMultiDexSuffix(location);
+ std::string multidex_suffix = DexFile::GetMultiDexSuffix(dex_data->profile_key);
os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
}
+ os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == (*dex_files)[i]->GetLocation()) {
+ if (dex_data->profile_key == (*dex_files)[i]->GetLocation()) {
dex_file = (*dex_files)[i];
}
}
}
os << "\n\tmethods: ";
- for (const auto method_it : dex_data.method_map) {
+ for (const auto& method_it : dex_data->method_map) {
if (dex_file != nullptr) {
os << "\n\t\t" << dex_file->PrettyMethod(method_it.first, true);
} else {
@@ -979,8 +1055,10 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "[";
for (const auto& inline_cache_it : method_it.second) {
os << "{" << std::hex << inline_cache_it.first << std::dec << ":";
- if (inline_cache_it.second.is_megamorphic) {
- os << "M";
+ if (inline_cache_it.second.is_missing_types) {
+ os << "MT";
+ } else if (inline_cache_it.second.is_megamorphic) {
+ os << "MM";
} else {
for (const ClassReference& class_ref : inline_cache_it.second.classes) {
os << "(" << static_cast<uint32_t>(class_ref.dex_profile_index)
@@ -992,7 +1070,7 @@ std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>*
os << "], ";
}
os << "\n\tclasses: ";
- for (const auto class_it : dex_data.class_set) {
+ for (const auto class_it : dex_data->class_set) {
if (dex_file != nullptr) {
os << "\n\t\t" << dex_file->PrettyType(class_it);
} else {
@@ -1016,18 +1094,17 @@ void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* de
if (info_.empty()) {
return;
}
- for (const auto& it : info_) {
- const std::string& location = it.first;
- const DexFileData& dex_data = it.second;
+ for (const DexFileData* dex_data : info_) {
const DexFile* dex_file = nullptr;
if (dex_files != nullptr) {
for (size_t i = 0; i < dex_files->size(); i++) {
- if (location == (*dex_files)[i]->GetLocation()) {
+ if (dex_data->profile_key == GetProfileDexFileKey((*dex_files)[i]->GetLocation()) &&
+ dex_data->checksum == (*dex_files)[i]->GetLocationChecksum()) {
dex_file = (*dex_files)[i];
}
}
}
- for (const auto class_it : dex_data.class_set) {
+ for (const auto class_it : dex_data->class_set) {
if (dex_file != nullptr) {
class_names->insert(std::string(dex_file->PrettyType(class_it)));
}
@@ -1036,25 +1113,42 @@ void ProfileCompilationInfo::GetClassNames(const std::vector<const DexFile*>* de
}
bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
- return info_.Equals(other.info_);
+ // No need to compare profile_key_map_. That's only a cache for fast search.
+ // All the information is already in the info_ vector.
+ if (info_.size() != other.info_.size()) {
+ return false;
+ }
+ for (size_t i = 0; i < info_.size(); i++) {
+ const DexFileData& dex_data = *info_[i];
+ const DexFileData& other_dex_data = *other.info_[i];
+ if (!(dex_data == other_dex_data)) {
+ return false;
+ }
+ }
+ return true;
}
-std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() const {
+std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const {
+ std::unordered_map<std::string, std::string> key_to_location_map;
+ for (const std::string& location : dex_files_locations) {
+ key_to_location_map.emplace(GetProfileDexFileKey(location), location);
+ }
std::set<DexCacheResolvedClasses> ret;
- for (auto&& pair : info_) {
- const std::string& profile_key = pair.first;
- const DexFileData& data = pair.second;
- // TODO: Is it OK to use the same location for both base and dex location here?
- DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum);
- classes.AddClasses(data.class_set.begin(), data.class_set.end());
- ret.insert(classes);
+ for (const DexFileData* dex_data : info_) {
+ const auto& it = key_to_location_map.find(dex_data->profile_key);
+ if (it != key_to_location_map.end()) {
+ DexCacheResolvedClasses classes(it->second, it->second, dex_data->checksum);
+ classes.AddClasses(dex_data->class_set.begin(), dex_data->class_set.end());
+ ret.insert(classes);
+ }
}
return ret;
}
void ProfileCompilationInfo::ClearResolvedClasses() {
- for (auto& pair : info_) {
- pair.second.class_set.clear();
+ for (DexFileData* dex_data : info_) {
+ dex_data->class_set.clear();
}
}
@@ -1062,7 +1156,8 @@ void ProfileCompilationInfo::ClearResolvedClasses() {
bool ProfileCompilationInfo::GenerateTestProfile(int fd,
uint16_t number_of_dex_files,
uint16_t method_ratio,
- uint16_t class_ratio) {
+ uint16_t class_ratio,
+ uint32_t random_seed) {
const std::string base_dex_location = "base.apk";
ProfileCompilationInfo info;
// The limits are defined by the dex specification.
@@ -1071,7 +1166,7 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
uint16_t number_of_methods = max_method * method_ratio / 100;
uint16_t number_of_classes = max_classes * class_ratio / 100;
- srand(MicroTime());
+ std::srand(random_seed);
// Make sure we generate more samples with a low index value.
// This makes it more likely to hit valid method/class indices in small apps.
@@ -1101,6 +1196,32 @@ bool ProfileCompilationInfo::GenerateTestProfile(int fd,
return info.Save(fd);
}
+// Naive implementation to generate a random profile file suitable for testing.
+bool ProfileCompilationInfo::GenerateTestProfile(
+ int fd,
+ std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ uint32_t random_seed) {
+ std::srand(random_seed);
+ ProfileCompilationInfo info;
+ for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ const std::string& location = dex_file->GetLocation();
+ uint32_t checksum = dex_file->GetLocationChecksum();
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ // Randomly add a class from the dex file (with 50% chance).
+ if (std::rand() % 2 != 0) {
+ info.AddClassIndex(location, checksum, dex::TypeIndex(dex_file->GetClassDef(i).class_idx_));
+ }
+ }
+ for (uint32_t i = 0; i < dex_file->NumMethodIds(); ++i) {
+ // Randomly add a method from the dex file (with 50% chance).
+ if (std::rand() % 2 != 0) {
+ info.AddMethodIndex(location, checksum, i);
+ }
+ }
+ }
+ return info.Save(fd);
+}
+
bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
const OfflineProfileMethodInfo& other) const {
if (inline_caches.size() != other.inline_caches.size()) {
@@ -1108,7 +1229,7 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
}
// We can't use a simple equality test because we need to match the dex files
- // of the inline caches which might have different profile indices.
+ // of the inline caches which might have different profile indexes.
for (const auto& inline_cache_it : inline_caches) {
uint16_t dex_pc = inline_cache_it.first;
const DexPcData dex_pc_data = inline_cache_it.second;
@@ -1117,7 +1238,8 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
return false;
}
const DexPcData& other_dex_pc_data = other_it->second;
- if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic) {
+ if (dex_pc_data.is_megamorphic != other_dex_pc_data.is_megamorphic ||
+ dex_pc_data.is_missing_types != other_dex_pc_data.is_missing_types) {
return false;
}
for (const ClassReference& class_ref : dex_pc_data.classes) {
@@ -1141,4 +1263,17 @@ bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
return true;
}
+void ProfileCompilationInfo::ClearProfile() {
+ for (DexFileData* dex_data : info_) {
+ delete dex_data;
+ }
+ info_.clear();
+ profile_key_map_.clear();
+}
+
+bool ProfileCompilationInfo::IsEmpty() const {
+ DCHECK_EQ(info_.empty(), profile_key_map_.empty());
+ return info_.empty();
+}
+
} // namespace art
diff --git a/runtime/jit/profile_compilation_info.h b/runtime/jit/profile_compilation_info.h
index 4bfbfcd287..87f763686e 100644
--- a/runtime/jit/profile_compilation_info.h
+++ b/runtime/jit/profile_compilation_info.h
@@ -36,18 +36,22 @@ namespace art {
*/
struct ProfileMethodInfo {
struct ProfileClassReference {
+ ProfileClassReference() : dex_file(nullptr) {}
ProfileClassReference(const DexFile* dex, const dex::TypeIndex& index)
: dex_file(dex), type_index(index) {}
const DexFile* dex_file;
- const dex::TypeIndex type_index;
+ dex::TypeIndex type_index;
};
struct ProfileInlineCache {
- ProfileInlineCache(uint32_t pc, const std::vector<ProfileClassReference>& profile_classes)
- : dex_pc(pc), classes(profile_classes) {}
+ ProfileInlineCache(uint32_t pc,
+ bool missing_types,
+ const std::vector<ProfileClassReference>& profile_classes)
+ : dex_pc(pc), is_missing_types(missing_types), classes(profile_classes) {}
const uint32_t dex_pc;
+ const bool is_missing_types;
const std::vector<ProfileClassReference> classes;
};
@@ -82,7 +86,7 @@ class ProfileCompilationInfo {
// A dex location together with its checksum.
struct DexReference {
- DexReference() {}
+ DexReference() : dex_checksum(0) {}
DexReference(const std::string& location, uint32_t checksum)
: dex_location(location), dex_checksum(checksum) {}
@@ -91,6 +95,11 @@ class ProfileCompilationInfo {
return dex_checksum == other.dex_checksum && dex_location == other.dex_location;
}
+ bool MatchesDex(const DexFile* dex_file) const {
+ return dex_checksum == dex_file->GetLocationChecksum() &&
+ dex_location == GetProfileDexFileKey(dex_file->GetLocation());
+ }
+
std::string dex_location;
uint32_t dex_checksum;
};
@@ -128,18 +137,30 @@ class ProfileCompilationInfo {
// Encodes the actual inline cache for a given dex pc (whether or not the receiver is
// megamorphic and its possible types).
- // If the receiver is megamorphic the set of classes will be empty.
+ // If the receiver is megamorphic or is missing types the set of classes will be empty.
struct DexPcData {
- DexPcData() : is_megamorphic(false) {}
+ DexPcData() : is_missing_types(false), is_megamorphic(false) {}
void AddClass(uint16_t dex_profile_idx, const dex::TypeIndex& type_idx);
- void SetMegamorphic() {
+ void SetIsMegamorphic() {
+ if (is_missing_types) return;
is_megamorphic = true;
classes.clear();
}
+ void SetIsMissingTypes() {
+ is_megamorphic = false;
+ is_missing_types = true;
+ classes.clear();
+ }
bool operator==(const DexPcData& other) const {
- return is_megamorphic == other.is_megamorphic && classes == other.classes;
+ return is_megamorphic == other.is_megamorphic &&
+ is_missing_types == other.is_missing_types &&
+ classes == other.classes;
}
+ // Not all runtime types can be encoded in the profile. For example if the receiver
+ // type is in a dex file which is not tracked for profiling its type cannot be
+ // encoded. When types are missing this field will be set to true.
+ bool is_missing_types;
bool is_megamorphic;
ClassSet classes;
};
@@ -160,11 +181,16 @@ class ProfileCompilationInfo {
// Public methods to create, extend or query the profile.
+ ProfileCompilationInfo() {}
+ ProfileCompilationInfo(const ProfileCompilationInfo& pci);
+ ~ProfileCompilationInfo();
+
// Add the given methods and classes to the current profile object.
bool AddMethodsAndClasses(const std::vector<ProfileMethodInfo>& methods,
const std::set<DexCacheResolvedClasses>& resolved_classes);
// Load profile information from the given file descriptor.
+ // If the current profile is non-empty the load will fail.
bool Load(int fd);
// Merge the data from another ProfileCompilationInfo into the current object.
@@ -218,9 +244,8 @@ class ProfileCompilationInfo {
bool Equals(const ProfileCompilationInfo& other);
// Return the class descriptors for all of the classes in the profiles' class sets.
- // Note the dex location is actually the profile key, the caller needs to call back in to the
- // profile info stuff to generate a map back to the dex location.
- std::set<DexCacheResolvedClasses> GetResolvedClasses() const;
+ std::set<DexCacheResolvedClasses> GetResolvedClasses(
+ const std::unordered_set<std::string>& dex_files_locations) const;
// Clear the resolved classes from the current object.
void ClearResolvedClasses();
@@ -233,7 +258,14 @@ class ProfileCompilationInfo {
static bool GenerateTestProfile(int fd,
uint16_t number_of_dex_files,
uint16_t method_ratio,
- uint16_t class_ratio);
+ uint16_t class_ratio,
+ uint32_t random_seed);
+
+ // Generate a test profile which will randomly contain classes and methods from
+ // the provided list of dex files.
+ static bool GenerateTestProfile(int fd,
+ std::vector<std::unique_ptr<const DexFile>>& dex_files,
+ uint32_t random_seed);
// Check that the given profile method info contain the same data.
static bool Equals(const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi1,
@@ -241,6 +273,7 @@ class ProfileCompilationInfo {
private:
enum ProfileLoadSatus {
+ kProfileLoadWouldOverwiteData,
kProfileLoadIOError,
kProfileLoadVersionMismatch,
kProfileLoadBadData,
@@ -251,14 +284,21 @@ class ProfileCompilationInfo {
using MethodMap = SafeMap<uint16_t, InlineCacheMap>;
// Internal representation of the profile information belonging to a dex file.
+ // Note that we could do without profile_key (the key used to encode the dex
+ // file in the profile) and profile_index (the index of the dex file in the
+ // profile) fields in this struct because we can infer them from
+ // profile_key_map_ and info_. However, it makes the profiles logic much
+ // simpler if we have references here as well.
struct DexFileData {
- DexFileData(uint32_t location_checksum, uint16_t index)
- : profile_index(index), checksum(location_checksum) {}
- // The profile index of this dex file (matches ClassReference#dex_profile_index)
+ DexFileData(const std::string& key, uint32_t location_checksum, uint16_t index)
+ : profile_key(key), profile_index(index), checksum(location_checksum) {}
+ // The profile key this data belongs to.
+ std::string profile_key;
+ // The profile index of this dex file (matches ClassReference#dex_profile_index).
uint8_t profile_index;
- // The dex checksum
+ // The dex checksum.
uint32_t checksum;
- // The methonds' profile information
+ // The methonds' profile information.
MethodMap method_map;
// The classes which have been profiled. Note that these don't necessarily include
// all the classes that can be found in the inline caches reference.
@@ -269,12 +309,9 @@ class ProfileCompilationInfo {
}
};
- // Maps dex file to their profile information.
- using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>;
-
- // Return the profile data for the given dex location or null if the dex location
+ // Return the profile data for the given profile key or null if the dex location
// already exists but has a different checksum
- DexFileData* GetOrAddDexFileData(const std::string& dex_location, uint32_t checksum);
+ DexFileData* GetOrAddDexFileData(const std::string& profile_key, uint32_t checksum);
// Add a method index to the profile (without inline caches).
bool AddMethodIndex(const std::string& dex_location, uint32_t checksum, uint16_t method_idx);
@@ -305,6 +342,16 @@ class ProfileCompilationInfo {
// be the same as the profile index of the dex file (used to encode the ClassReferences).
void DexFileToProfileIndex(/*out*/std::vector<DexReference>* dex_references) const;
+ // Return the dex data associated with the given profile key or null if the profile
+ // doesn't contain the key.
+ const DexFileData* FindDexData(const std::string& profile_key) const;
+
+ // Clear all the profile data.
+ void ClearProfile();
+
+ // Checks if the profile is empty.
+ bool IsEmpty() const;
+
// Parsing functionality.
// The information present in the header of each profile line.
@@ -411,7 +458,15 @@ class ProfileCompilationInfo {
friend class ProfileAssistantTest;
friend class Dex2oatLayoutTest;
- DexFileToProfileInfoMap info_;
+ // Vector containing the actual profile info.
+ // The vector index is the profile index of the dex data and
+ // matched DexFileData::profile_index.
+ std::vector<DexFileData*> info_;
+
+ // Cache mapping profile keys to profile index.
+ // This is used to speed up searches since it avoids iterating
+ // over the info_ vector when searching by profile key.
+ SafeMap<const std::string, uint8_t> profile_key_map_;
};
} // namespace art
diff --git a/runtime/jit/profile_compilation_info_test.cc b/runtime/jit/profile_compilation_info_test.cc
index 332280a0e3..c9f2d0e153 100644
--- a/runtime/jit/profile_compilation_info_test.cc
+++ b/runtime/jit/profile_compilation_info_test.cc
@@ -108,26 +108,31 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
for (ArtMethod* method : methods) {
std::vector<ProfileMethodInfo::ProfileInlineCache> caches;
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 1; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(0));
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Polymorphic
- for (uint16_t dex_pc = 1; dex_pc < 2; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < InlineCache::kIndividualCacheSize / 2; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
}
// Megamorphic
- for (uint16_t dex_pc = 2; dex_pc < 3; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
std::vector<ProfileMethodInfo::ProfileClassReference> classes;
for (uint16_t k = 0; k < 2 * InlineCache::kIndividualCacheSize; k++) {
classes.emplace_back(method->GetDexFile(), dex::TypeIndex(k));
}
- caches.emplace_back(dex_pc, classes);
+ caches.emplace_back(dex_pc, /*is_missing_types*/false, classes);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
+ std::vector<ProfileMethodInfo::ProfileClassReference> classes;
+ caches.emplace_back(dex_pc, /*is_missing_types*/true, classes);
}
ProfileMethodInfo pmi(method->GetDexFile(), method->GetDexMethodIndex(), caches);
profile_methods.push_back(pmi);
@@ -148,12 +153,15 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo offline_pmi;
SafeMap<DexFile*, uint8_t> dex_map; // dex files to profile index
for (const auto& inline_cache : pmi.inline_caches) {
+ ProfileCompilationInfo::DexPcData& dex_pc_data =
+ offline_pmi.inline_caches.FindOrAdd(inline_cache.dex_pc)->second;
+ if (inline_cache.is_missing_types) {
+ dex_pc_data.SetIsMissingTypes();
+ }
for (const auto& class_ref : inline_cache.classes) {
uint8_t dex_profile_index = dex_map.FindOrAdd(const_cast<DexFile*>(class_ref.dex_file),
static_cast<uint8_t>(dex_map.size()))->second;
- offline_pmi.inline_caches
- .FindOrAdd(inline_cache.dex_pc)->second
- .AddClass(dex_profile_index, class_ref.type_index);
+ dex_pc_data.AddClass(dex_profile_index, class_ref.type_index);
if (dex_profile_index >= offline_pmi.dex_references.size()) {
// This is a new dex.
const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey(
@@ -170,29 +178,35 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
- pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
- pmi.dex_references.emplace_back("dex_location2", /* checksum */ 2);
- pmi.dex_references.emplace_back("dex_location3", /* checksum */ 3);
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */1);
+ pmi.dex_references.emplace_back("dex_location2", /* checksum */2);
+ pmi.dex_references.emplace_back("dex_location3", /* checksum */3);
// Monomorphic
- for (uint16_t dex_pc = 0; dex_pc < 10; dex_pc++) {
+ for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Polymorphic
- for (uint16_t dex_pc = 10; dex_pc < 20; dex_pc++) {
+ for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
dex_pc_data.AddClass(0, dex::TypeIndex(0));
dex_pc_data.AddClass(1, dex::TypeIndex(1));
dex_pc_data.AddClass(2, dex::TypeIndex(2));
- pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
// Megamorphic
- for (uint16_t dex_pc = 20; dex_pc < 30; dex_pc++) {
+ for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMegamorphic();
+ pmi.inline_caches.Put(dex_pc, dex_pc_data);
+ }
+ // Missing types
+ for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMissingTypes();
pmi.inline_caches.Put(dex_pc, dex_pc_data);
}
@@ -207,7 +221,13 @@ class ProfileCompilationInfoTest : public CommonRuntimeTest {
}
}
- // Cannot sizeof the actual arrays so hardcode the values here.
+ void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
+ for (auto it : pmi->inline_caches) {
+ it.second.SetIsMissingTypes();
+ }
+ }
+
+ // Cannot sizeof the actual arrays so hard code the values here.
// They should not change anyway.
static constexpr int kProfileMagicSize = 4;
static constexpr int kProfileVersionSize = 4;
@@ -530,6 +550,58 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
ASSERT_TRUE(loaded_pmi1 == pmi_extra);
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+
+ // Add methods with inline caches.
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info));
+ }
+
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Make some inline caches megamorphic and add them to the profile again.
+ ProfileCompilationInfo saved_info_extra;
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
+ MakeMegamorphic(&pmi_extra);
+ for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ // Mark all inline caches with missing types and add them to the profile again.
+ // This will verify that all inline caches (megamorphic or not) should be marked as missing types.
+ ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
+ SetIsMissingTypes(&missing_types);
+ for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, method_idx, pmi, &saved_info_extra));
+ }
+
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(saved_info_extra.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+
+ // Merge the profiles so that we have the same view as the file.
+ ASSERT_TRUE(saved_info.MergeWith(saved_info_extra));
+
+ // Check that we get back what we saved.
+ ProfileCompilationInfo loaded_info;
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+
+ ASSERT_TRUE(loaded_info.Equals(saved_info));
+
+ ProfileCompilationInfo::OfflineProfileMethodInfo loaded_pmi1;
+ ASSERT_TRUE(loaded_info.GetMethod("dex_location1",
+ /* checksum */ 1,
+ /* method_idx */ 3,
+ &loaded_pmi1));
+ ASSERT_TRUE(loaded_pmi1 == pmi_extra);
+}
+
TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
ScratchFile profile;
@@ -570,7 +642,7 @@ TEST_F(ProfileCompilationInfoTest, SaveArtMethodsWithInlineCaches) {
}
}
-TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCahce) {
+TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) {
ScratchFile profile;
ProfileCompilationInfo info;
@@ -662,7 +734,7 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
ProfileCompilationInfo::DexPcData dex_pc_data;
- dex_pc_data.is_megamorphic = true;
+ dex_pc_data.SetIsMegamorphic();
pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
ProfileCompilationInfo info_megamorphic;
@@ -686,4 +758,55 @@ TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
}
+TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
+ // Create an inline cache with missing types
+ ProfileCompilationInfo::OfflineProfileMethodInfo pmi;
+ pmi.dex_references.emplace_back("dex_location1", /* checksum */ 1);
+ ProfileCompilationInfo::DexPcData dex_pc_data;
+ dex_pc_data.SetIsMissingTypes();
+ pmi.inline_caches.Put(/*dex_pc*/ 0, dex_pc_data);
+
+ ProfileCompilationInfo info_megamorphic;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ pmi,
+ &info_megamorphic));
+
+ // Create a profile with no inline caches (for the same method).
+ ProfileCompilationInfo info_no_inline_cache;
+ ASSERT_TRUE(AddMethod("dex_location1",
+ /*checksum*/ 1,
+ /*method_idx*/ 0,
+ &info_no_inline_cache));
+
+ // Merge the missing type cache into the empty one.
+ // Everything should be saved without errors.
+ ASSERT_TRUE(info_no_inline_cache.MergeWith(info_megamorphic));
+ ScratchFile profile;
+ ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
+}
+
+TEST_F(ProfileCompilationInfoTest, LoadShouldClearExistingDataFromProfiles) {
+ ScratchFile profile;
+
+ ProfileCompilationInfo saved_info;
+ // Save a few methods.
+ for (uint16_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(AddMethod("dex_location1", /* checksum */ 1, /* method_idx */ i, &saved_info));
+ }
+ ASSERT_TRUE(saved_info.Save(GetFd(profile)));
+ ASSERT_EQ(0, profile.GetFile()->Flush());
+ ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+ // Add a bunch of methods to test_info.
+ ProfileCompilationInfo test_info;
+ for (uint16_t i = 0; i < 10; i++) {
+ ASSERT_TRUE(AddMethod("dex_location2", /* checksum */ 2, /* method_idx */ i, &test_info));
+ }
+
+ // Attempt to load the saved profile into test_info.
+ // This should fail since the test_info already contains data and the load would overwrite it.
+ ASSERT_FALSE(test_info.Load(GetFd(profile)));
+}
} // namespace art
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 61e6c4126a..9d6cd95649 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -39,14 +39,9 @@ pthread_t ProfileSaver::profiler_pthread_ = 0U;
ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
: jit_code_cache_(jit_code_cache),
- foreign_dex_profile_path_(foreign_dex_profile_path),
shutting_down_(false),
- last_save_number_of_methods_(0),
- last_save_number_of_classes_(0),
last_time_ns_saver_woke_up_(0),
jit_activity_notifications_(0),
wait_lock_("ProfileSaver wait lock"),
@@ -58,13 +53,12 @@ ProfileSaver::ProfileSaver(const ProfileSaverOptions& options,
total_number_of_failed_writes_(0),
total_ms_of_sleep_(0),
total_ns_of_work_(0),
- total_number_of_foreign_dex_marks_(0),
max_number_of_profile_entries_cached_(0),
total_number_of_hot_spikes_(0),
total_number_of_wake_ups_(0),
options_(options) {
DCHECK(options_.IsEnabled());
- AddTrackedLocations(output_filename, app_data_dir, code_paths);
+ AddTrackedLocations(output_filename, code_paths);
}
void ProfileSaver::Run() {
@@ -127,15 +121,16 @@ void ProfileSaver::Run() {
break;
}
- uint16_t new_methods = 0;
+ uint16_t number_of_new_methods = 0;
uint64_t start_work = NanoTime();
- bool profile_saved_to_disk = ProcessProfilingInfo(&new_methods);
+ bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods);
// Update the notification counter based on result. Note that there might be contention on this
// but we don't care about to be 100% precise.
if (!profile_saved_to_disk) {
// If we didn't save to disk it may be because we didn't have enough new methods.
- // Set the jit activity notifications to new_methods so we can wake up earlier if needed.
- jit_activity_notifications_ = new_methods;
+ // Set the jit activity notifications to number_of_new_methods so we can wake up earlier
+ // if needed.
+ jit_activity_notifications_ = number_of_new_methods;
}
total_ns_of_work_ += NanoTime() - start_work;
}
@@ -175,10 +170,10 @@ void ProfileSaver::NotifyJitActivityInternal() {
}
}
-ProfileCompilationInfo* ProfileSaver::GetCachedProfiledInfo(const std::string& filename) {
+ProfileSaver::ProfileInfoCache* ProfileSaver::GetCachedProfiledInfo(const std::string& filename) {
auto info_it = profile_cache_.find(filename);
if (info_it == profile_cache_.end()) {
- info_it = profile_cache_.Put(filename, ProfileCompilationInfo());
+ info_it = profile_cache_.Put(filename, ProfileInfoCache());
}
return &info_it->second;
}
@@ -252,8 +247,9 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
<< " (" << classes.GetDexLocation() << ")";
}
}
- ProfileCompilationInfo* info = GetCachedProfiledInfo(filename);
- info->AddMethodsAndClasses(profile_methods_for_location, resolved_classes_for_location);
+ ProfileInfoCache* cached_info = GetCachedProfiledInfo(filename);
+ cached_info->profile.AddMethodsAndClasses(profile_methods_for_location,
+ resolved_classes_for_location);
total_number_of_profile_entries_cached += resolved_classes_for_location.size();
}
max_number_of_profile_entries_cached_ = std::max(
@@ -261,7 +257,7 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() {
total_number_of_profile_entries_cached);
}
-bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
+bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods) {
ScopedTrace trace(__PRETTY_FUNCTION__);
SafeMap<std::string, std::set<std::string>> tracked_locations;
{
@@ -272,10 +268,16 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
bool profile_file_saved = false;
uint64_t total_number_of_profile_entries_cached = 0;
- *new_methods = 0;
+ if (number_of_new_methods != nullptr) {
+ *number_of_new_methods = 0;
+ }
for (const auto& it : tracked_locations) {
- if (ShuttingDown(Thread::Current())) {
+ if (!force_save && ShuttingDown(Thread::Current())) {
+ // The ProfileSaver is in shutdown mode, meaning a stop request was made and
+ // we need to exit cleanly (by waiting for the saver thread to finish). Unless
+ // we have a request for a forced save, do not do any processing so that we
+ // speed up the exit.
return true;
}
const std::string& filename = it.first;
@@ -287,16 +289,18 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
total_number_of_code_cache_queries_++;
}
- ProfileCompilationInfo* cached_info = GetCachedProfiledInfo(filename);
- cached_info->AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>());
+ ProfileInfoCache* cached_info = GetCachedProfiledInfo(filename);
+ ProfileCompilationInfo* cached_profile = &cached_info->profile;
+ cached_profile->AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>());
int64_t delta_number_of_methods =
- cached_info->GetNumberOfMethods() -
- static_cast<int64_t>(last_save_number_of_methods_);
+ cached_profile->GetNumberOfMethods() -
+ static_cast<int64_t>(cached_info->last_save_number_of_methods);
int64_t delta_number_of_classes =
- cached_info->GetNumberOfResolvedClasses() -
- static_cast<int64_t>(last_save_number_of_classes_);
+ cached_profile->GetNumberOfResolvedClasses() -
+ static_cast<int64_t>(cached_info->last_save_number_of_classes);
- if (delta_number_of_methods < options_.GetMinMethodsToSave() &&
+ if (!force_save &&
+ delta_number_of_methods < options_.GetMinMethodsToSave() &&
delta_number_of_classes < options_.GetMinClassesToSave()) {
VLOG(profiler) << "Not enough information to save to: " << filename
<< " Number of methods: " << delta_number_of_methods
@@ -304,16 +308,19 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
total_number_of_skipped_writes_++;
continue;
}
- *new_methods = std::max(static_cast<uint16_t>(delta_number_of_methods), *new_methods);
+ if (number_of_new_methods != nullptr) {
+ *number_of_new_methods = std::max(static_cast<uint16_t>(delta_number_of_methods),
+ *number_of_new_methods);
+ }
uint64_t bytes_written;
// Force the save. In case the profile data is corrupted or the the profile
// has the wrong version this will "fix" the file to the correct format.
- if (cached_info->MergeAndSave(filename, &bytes_written, /*force*/ true)) {
- last_save_number_of_methods_ = cached_info->GetNumberOfMethods();
- last_save_number_of_classes_ = cached_info->GetNumberOfResolvedClasses();
+ if (cached_profile->MergeAndSave(filename, &bytes_written, /*force*/ true)) {
+ cached_info->last_save_number_of_methods = cached_profile->GetNumberOfMethods();
+ cached_info->last_save_number_of_classes = cached_profile->GetNumberOfResolvedClasses();
// Clear resolved classes. No need to store them around as
// they don't change after the first write.
- cached_info->ClearResolvedClasses();
+ cached_profile->ClearResolvedClasses();
if (bytes_written > 0) {
total_number_of_writes_++;
total_bytes_written_ += bytes_written;
@@ -330,8 +337,8 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) {
total_number_of_failed_writes_++;
}
total_number_of_profile_entries_cached +=
- cached_info->GetNumberOfMethods() +
- cached_info->GetNumberOfResolvedClasses();
+ cached_profile->GetNumberOfMethods() +
+ cached_profile->GetNumberOfResolvedClasses();
}
max_number_of_profile_entries_cached_ = std::max(
max_number_of_profile_entries_cached_,
@@ -382,9 +389,7 @@ static bool ShouldProfileLocation(const std::string& location) {
void ProfileSaver::Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir) {
+ const std::vector<std::string>& code_paths) {
DCHECK(options.IsEnabled());
DCHECK(Runtime::Current()->GetJit() != nullptr);
DCHECK(!output_filename.empty());
@@ -409,7 +414,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
// apps which share the same runtime).
DCHECK_EQ(instance_->jit_code_cache_, jit_code_cache);
// Add the code_paths to the tracked locations.
- instance_->AddTrackedLocations(output_filename, app_data_dir, code_paths_to_profile);
+ instance_->AddTrackedLocations(output_filename, code_paths_to_profile);
return;
}
@@ -419,9 +424,7 @@ void ProfileSaver::Start(const ProfileSaverOptions& options,
instance_ = new ProfileSaver(options,
output_filename,
jit_code_cache,
- code_paths_to_profile,
- foreign_dex_profile_path,
- app_data_dir);
+ code_paths_to_profile);
// Create a new thread which does the saving.
CHECK_PTHREAD_CALL(
@@ -448,9 +451,6 @@ void ProfileSaver::Stop(bool dump_info) {
return;
}
instance_->shutting_down_ = true;
- if (dump_info) {
- instance_->DumpInfo(LOG_STREAM(INFO));
- }
}
{
@@ -462,8 +462,14 @@ void ProfileSaver::Stop(bool dump_info) {
// Wait for the saver thread to stop.
CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
+ // Force save everything before destroying the instance.
+ instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
+
{
MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_);
+ if (dump_info) {
+ instance_->DumpInfo(LOG_STREAM(INFO));
+ }
instance_ = nullptr;
profiler_pthread_ = 0U;
}
@@ -481,154 +487,16 @@ bool ProfileSaver::IsStarted() {
}
void ProfileSaver::AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths) {
auto it = tracked_dex_base_locations_.find(output_filename);
if (it == tracked_dex_base_locations_.end()) {
tracked_dex_base_locations_.Put(output_filename,
std::set<std::string>(code_paths.begin(), code_paths.end()));
- if (!app_data_dir.empty()) {
- app_data_dirs_.insert(app_data_dir);
- }
} else {
it->second.insert(code_paths.begin(), code_paths.end());
}
}
-// TODO(calin): This may lead to several calls to realpath.
-// Consider moving the logic to the saver thread (i.e. when notified,
-// only cache the location, and then wake up the saver thread to do the
-// comparisons with the real file paths and to create the markers).
-void ProfileSaver::NotifyDexUse(const std::string& dex_location) {
- if (!ShouldProfileLocation(dex_location)) {
- return;
- }
- std::set<std::string> app_code_paths;
- std::string foreign_dex_profile_path;
- std::set<std::string> app_data_dirs;
- {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ == nullptr) {
- return;
- }
- // Make a copy so that we don't hold the lock while doing I/O.
- for (const auto& it : instance_->tracked_dex_base_locations_) {
- app_code_paths.insert(it.second.begin(), it.second.end());
- }
- foreign_dex_profile_path = instance_->foreign_dex_profile_path_;
- app_data_dirs.insert(instance_->app_data_dirs_.begin(), instance_->app_data_dirs_.end());
- }
-
- bool mark_created = MaybeRecordDexUseInternal(dex_location,
- app_code_paths,
- foreign_dex_profile_path,
- app_data_dirs);
- if (mark_created) {
- MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
- if (instance_ != nullptr) {
- instance_->total_number_of_foreign_dex_marks_++;
- }
- }
-}
-
-static bool CheckContainsWithRealPath(const std::set<std::string>& paths_set,
- const std::string& path_to_check) {
- for (const auto& path : paths_set) {
- UniqueCPtr<const char[]> real_path(realpath(path.c_str(), nullptr));
- if (real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << path;
- continue;
- }
- std::string real_path_str(real_path.get());
- if (real_path_str == path_to_check) {
- return true;
- }
- }
- return false;
-}
-
-// After the call, dex_location_real_path will contain the marker's name.
-static bool CreateForeignDexMarker(const std::string& foreign_dex_profile_path,
- /*in-out*/ std::string* dex_location_real_path) {
- // For foreign dex files we record a flag on disk. PackageManager will (potentially) take this
- // into account when deciding how to optimize the loaded dex file.
- // The expected flag name is the canonical path of the apk where '/' is substituted to '@'.
- // (it needs to be kept in sync with
- // frameworks/base/services/core/java/com/android/server/pm/PackageDexOptimizer.java)
- std::replace(dex_location_real_path->begin(), dex_location_real_path->end(), '/', '@');
- std::string flag_path = foreign_dex_profile_path + "/" + *dex_location_real_path;
- // We use O_RDONLY as the access mode because we must supply some access
- // mode, and there is no access mode that means 'create but do not read' the
- // file. We will not not actually read from the file.
- int fd = TEMP_FAILURE_RETRY(open(flag_path.c_str(),
- O_CREAT | O_RDONLY | O_EXCL | O_CLOEXEC | O_NOFOLLOW, 0));
- if (fd != -1) {
- if (close(fd) != 0) {
- PLOG(WARNING) << "Could not close file after flagging foreign dex use " << flag_path;
- }
- return true;
- } else {
- if (errno != EEXIST && errno != EACCES) {
- // Another app could have already created the file, and selinux may not
- // allow the read access to the file implied by the call to open.
- PLOG(WARNING) << "Could not create foreign dex use mark " << flag_path;
- return false;
- }
- return true;
- }
-}
-
-bool ProfileSaver::MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& app_code_paths,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs) {
- if (dex_location.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use with an empty dex location.";
- return false;
- }
- if (foreign_dex_profile_path.empty()) {
- LOG(WARNING) << "Asked to record foreign dex use without a valid profile path ";
- return false;
- }
-
- if (app_code_paths.find(dex_location) != app_code_paths.end()) {
- // The dex location belongs to the application code paths. Nothing to record.
- return false;
- }
-
- if (app_data_dirs.find(dex_location) != app_data_dirs.end()) {
- // The dex location is under the application folder. Nothing to record.
- return false;
- }
-
- // Do another round of checks with the real paths.
- // Application directory could be a symlink (e.g. /data/data instead of /data/user/0), and we
- // don't have control over how the dex files are actually loaded (symlink or canonical path),
-
- // Note that we could cache all the real locations in the saver (since it's an expensive
- // operation). However we expect that app_code_paths is small (usually 1 element), and
- // NotifyDexUse is called just a few times in the app lifetime. So we make the compromise
- // to save some bytes of memory usage.
-
- UniqueCPtr<const char[]> dex_location_real_path(realpath(dex_location.c_str(), nullptr));
- if (dex_location_real_path == nullptr) {
- PLOG(WARNING) << "Could not get realpath for " << dex_location;
- return false;
- }
- std::string dex_location_real_path_str(dex_location_real_path.get());
-
- if (CheckContainsWithRealPath(app_code_paths, dex_location_real_path_str)) {
- return false;
- }
-
- if (CheckContainsWithRealPath(app_data_dirs, dex_location_real_path_str)) {
- return false;
- }
-
- return CreateForeignDexMarker(foreign_dex_profile_path, &dex_location_real_path_str);
-}
-
void ProfileSaver::DumpInstanceInfo(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
@@ -645,8 +513,6 @@ void ProfileSaver::DumpInfo(std::ostream& os) {
<< "ProfileSaver total_number_of_failed_writes=" << total_number_of_failed_writes_ << '\n'
<< "ProfileSaver total_ms_of_sleep=" << total_ms_of_sleep_ << '\n'
<< "ProfileSaver total_ms_of_work=" << NsToMs(total_ns_of_work_) << '\n'
- << "ProfileSaver total_number_of_foreign_dex_marks="
- << total_number_of_foreign_dex_marks_ << '\n'
<< "ProfileSaver max_number_profile_entries_cached="
<< max_number_of_profile_entries_cached_ << '\n'
<< "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n'
@@ -664,8 +530,7 @@ void ProfileSaver::ForceProcessProfiles() {
// but we only use this in testing when we now this won't happen.
// Refactor the way we handle the instance so that we don't end up in this situation.
if (saver != nullptr) {
- uint16_t new_methods;
- saver->ProcessProfilingInfo(&new_methods);
+ saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr);
}
}
@@ -674,10 +539,8 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile,
uint16_t method_idx) {
MutexLock mu(Thread::Current(), *Locks::profiler_lock_);
if (instance_ != nullptr) {
- ProfileCompilationInfo* info = instance_->GetCachedProfiledInfo(profile);
- if (info != nullptr) {
- return info->ContainsMethod(MethodReference(dex_file, method_idx));
- }
+ const ProfileCompilationInfo& info = instance_->GetCachedProfiledInfo(profile)->profile;
+ return info.ContainsMethod(MethodReference(dex_file, method_idx));
}
return false;
}
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 9c5e41fd13..4dd8e60ee4 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -32,9 +32,7 @@ class ProfileSaver {
static void Start(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir)
+ const std::vector<std::string>& code_paths)
REQUIRES(!Locks::profiler_lock_, !wait_lock_);
// Stops the profile saver thread.
@@ -46,8 +44,6 @@ class ProfileSaver {
// Returns true if the profile saver is started.
static bool IsStarted() REQUIRES(!Locks::profiler_lock_);
- static void NotifyDexUse(const std::string& dex_location);
-
// If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing.
static void DumpInstanceInfo(std::ostream& os);
@@ -63,12 +59,18 @@ class ProfileSaver {
uint16_t method_idx);
private:
+ // A cache structure which keeps track of the data saved to disk.
+ // It is used to reduce the number of disk read/writes.
+ struct ProfileInfoCache {
+ ProfileCompilationInfo profile;
+ uint32_t last_save_number_of_methods = 0;
+ uint32_t last_save_number_of_classes = 0;
+ };
+
ProfileSaver(const ProfileSaverOptions& options,
const std::string& output_filename,
jit::JitCodeCache* jit_code_cache,
- const std::vector<std::string>& code_paths,
- const std::string& foreign_dex_profile_path,
- const std::string& app_data_dir);
+ const std::vector<std::string>& code_paths);
// NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
static void* RunProfileSaverThread(void* arg)
@@ -77,9 +79,14 @@ class ProfileSaver {
// The run loop for the saver.
void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_);
+
// Processes the existing profiling info from the jit code cache and returns
// true if it needed to be saved to disk.
- bool ProcessProfilingInfo(uint16_t* new_methods)
+ // If number_of_new_methods is not null, after the call it will contain the number of new methods
+ // written to disk.
+ // If force_save is true, the saver will ignore any constraints which limit IO (e.g. will write
+ // the profile to disk even if it's just one new method).
+ bool ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods)
REQUIRES(!Locks::profiler_lock_)
REQUIRES(!Locks::mutator_lock_);
@@ -90,24 +97,17 @@ class ProfileSaver {
bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_);
void AddTrackedLocations(const std::string& output_filename,
- const std::string& app_data_dir,
const std::vector<std::string>& code_paths)
REQUIRES(Locks::profiler_lock_);
// Retrieves the cached profile compilation info for the given profile file.
// If no entry exists, a new empty one will be created, added to the cache and
// then returned.
- ProfileCompilationInfo* GetCachedProfiledInfo(const std::string& filename);
+ ProfileInfoCache* GetCachedProfiledInfo(const std::string& filename);
// Fetches the current resolved classes and methods from the ClassLinker and stores them in the
// profile_cache_ for later save.
void FetchAndCacheResolvedClassesAndMethods();
- static bool MaybeRecordDexUseInternal(
- const std::string& dex_location,
- const std::set<std::string>& tracked_locations,
- const std::string& foreign_dex_profile_path,
- const std::set<std::string>& app_data_dirs);
-
void DumpInfo(std::ostream& os);
// The only instance of the saver.
@@ -121,17 +121,8 @@ class ProfileSaver {
// It maps profile locations to code paths (dex base locations).
SafeMap<std::string, std::set<std::string>> tracked_dex_base_locations_
GUARDED_BY(Locks::profiler_lock_);
- // The directory were the we should store the code paths.
- std::string foreign_dex_profile_path_;
-
- // A list of application directories, used to infer if a loaded dex belongs
- // to the application or not. Multiple application data directories are possible when
- // different apps share the same runtime.
- std::set<std::string> app_data_dirs_ GUARDED_BY(Locks::profiler_lock_);
bool shutting_down_ GUARDED_BY(Locks::profiler_lock_);
- uint32_t last_save_number_of_methods_;
- uint32_t last_save_number_of_classes_;
uint64_t last_time_ns_saver_woke_up_ GUARDED_BY(wait_lock_);
uint32_t jit_activity_notifications_;
@@ -139,7 +130,7 @@ class ProfileSaver {
// profile information. The size of this cache is usually very small and tops
// to just a few hundreds entries in the ProfileCompilationInfo objects.
// It helps avoiding unnecessary writes to disk.
- SafeMap<std::string, ProfileCompilationInfo> profile_cache_;
+ SafeMap<std::string, ProfileInfoCache> profile_cache_;
// Save period condition support.
Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -152,7 +143,6 @@ class ProfileSaver {
uint64_t total_number_of_failed_writes_;
uint64_t total_ms_of_sleep_;
uint64_t total_ns_of_work_;
- uint64_t total_number_of_foreign_dex_marks_;
// TODO(calin): replace with an actual size.
uint64_t max_number_of_profile_entries_cached_;
uint64_t total_number_of_hot_spikes_;
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
index a6385d7469..c8d256fec0 100644
--- a/runtime/jit/profile_saver_options.h
+++ b/runtime/jit/profile_saver_options.h
@@ -21,7 +21,7 @@ namespace art {
struct ProfileSaverOptions {
public:
static constexpr uint32_t kMinSavePeriodMs = 20 * 1000; // 20 seconds
- static constexpr uint32_t kSaveResolvedClassesDelayMs = 2 * 1000; // 2 seconds
+ static constexpr uint32_t kSaveResolvedClassesDelayMs = 5 * 1000; // 5 seconds
// Minimum number of JIT samples during launch to include a method into the profile.
static constexpr uint32_t kStartupMethodSamples = 1;
static constexpr uint32_t kMinMethodsToSave = 10;
@@ -37,7 +37,8 @@ struct ProfileSaverOptions {
min_methods_to_save_(kMinMethodsToSave),
min_classes_to_save_(kMinClassesToSave),
min_notification_before_wake_(kMinNotificationBeforeWake),
- max_notification_before_wake_(kMaxNotificationBeforeWake) {}
+ max_notification_before_wake_(kMaxNotificationBeforeWake),
+ profile_path_("") {}
ProfileSaverOptions(
bool enabled,
@@ -47,7 +48,8 @@ struct ProfileSaverOptions {
uint32_t min_methods_to_save,
uint32_t min_classes_to_save,
uint32_t min_notification_before_wake,
- uint32_t max_notification_before_wake):
+ uint32_t max_notification_before_wake,
+ const std::string& profile_path):
enabled_(enabled),
min_save_period_ms_(min_save_period_ms),
save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms),
@@ -55,7 +57,8 @@ struct ProfileSaverOptions {
min_methods_to_save_(min_methods_to_save),
min_classes_to_save_(min_classes_to_save),
min_notification_before_wake_(min_notification_before_wake),
- max_notification_before_wake_(max_notification_before_wake) {}
+ max_notification_before_wake_(max_notification_before_wake),
+ profile_path_(profile_path) {}
bool IsEnabled() const {
return enabled_;
@@ -85,6 +88,9 @@ struct ProfileSaverOptions {
uint32_t GetMaxNotificationBeforeWake() const {
return max_notification_before_wake_;
}
+ std::string GetProfilePath() const {
+ return profile_path_;
+ }
friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) {
os << "enabled_" << pso.enabled_
@@ -106,6 +112,7 @@ struct ProfileSaverOptions {
uint32_t min_classes_to_save_;
uint32_t min_notification_before_wake_;
uint32_t max_notification_before_wake_;
+ std::string profile_path_;
};
} // namespace art
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index f42a8da8fa..d6881aa3f4 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -108,9 +108,15 @@ class ProfilingInfo {
}
}
- void IncrementInlineUse() {
- DCHECK_NE(current_inline_uses_, std::numeric_limits<uint16_t>::max());
+ // Increments the number of times this method is currently being inlined.
+ // Returns whether it was successful, that is it could increment without
+ // overflowing.
+ bool IncrementInlineUse() {
+ if (current_inline_uses_ == std::numeric_limits<uint16_t>::max()) {
+ return false;
+ }
current_inline_uses_++;
+ return true;
}
void DecrementInlineUse() {
diff --git a/runtime/jvalue.h b/runtime/jvalue.h
index 398bfbc27a..f61a07c0c0 100644
--- a/runtime/jvalue.h
+++ b/runtime/jvalue.h
@@ -39,7 +39,9 @@ union PACKED(alignof(mirror::Object*)) JValue {
}
uint16_t GetC() const { return c; }
- void SetC(uint16_t new_c) { c = new_c; }
+ void SetC(uint16_t new_c) {
+ j = static_cast<int64_t>(new_c); // Zero-extend to 64 bits.
+ }
double GetD() const { return d; }
void SetD(double new_d) { d = new_d; }
@@ -66,7 +68,9 @@ union PACKED(alignof(mirror::Object*)) JValue {
}
uint8_t GetZ() const { return z; }
- void SetZ(uint8_t new_z) { z = new_z; }
+ void SetZ(uint8_t new_z) {
+ j = static_cast<int64_t>(new_z); // Zero-extend to 64 bits.
+ }
mirror::Object** GetGCRoot() { return &l; }
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index f91b0ed9ea..e9db9b8b4c 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -33,6 +33,11 @@ void* LinearAlloc::Alloc(Thread* self, size_t size) {
return allocator_.Alloc(size);
}
+void* LinearAlloc::AllocAlign16(Thread* self, size_t size) {
+ MutexLock mu(self, lock_);
+ return allocator_.AllocAlign16(size);
+}
+
size_t LinearAlloc::GetUsedMemory() const {
MutexLock mu(Thread::Current(), lock_);
return allocator_.BytesUsed();
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index df7f17dd7a..384b2e3243 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -29,6 +29,7 @@ class LinearAlloc {
explicit LinearAlloc(ArenaPool* pool);
void* Alloc(Thread* self, size_t size) REQUIRES(!lock_);
+ void* AllocAlign16(Thread* self, size_t size) REQUIRES(!lock_);
// Realloc never frees the input pointer, it is the caller's job to do this if necessary.
void* Realloc(Thread* self, void* ptr, size_t old_size, size_t new_size) REQUIRES(!lock_);
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 2f2565b9d0..edc64f35a1 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -57,7 +57,8 @@ class Monitor;
* |10|9|87654321098765432109876543210|
* |11|0| ForwardingAddress |
*
- * The rb bits store the read barrier state.
+ * The `r` bit stores the read barrier state.
+ * The `m` bit stores the mark state.
*/
class LockWord {
public:
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 58c5d17d1c..bd7c4ad53c 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -49,10 +49,19 @@ namespace {
bool GetUnboxedPrimitiveType(ObjPtr<mirror::Class> klass, Primitive::Type* type)
REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
-#define LOOKUP_PRIMITIVE(primitive, _, __, ___) \
- if (klass->DescriptorEquals(Primitive::BoxedDescriptor(primitive))) { \
- *type = primitive; \
- return true; \
+ std::string storage;
+ const char* descriptor = klass->GetDescriptor(&storage);
+ static const char kJavaLangPrefix[] = "Ljava/lang/";
+ static const size_t kJavaLangPrefixSize = sizeof(kJavaLangPrefix) - 1;
+ if (strncmp(descriptor, kJavaLangPrefix, kJavaLangPrefixSize) != 0) {
+ return false;
+ }
+
+ descriptor += kJavaLangPrefixSize;
+#define LOOKUP_PRIMITIVE(primitive, _, java_name, ___) \
+ if (strcmp(descriptor, #java_name ";") == 0) { \
+ *type = primitive; \
+ return true; \
}
PRIMITIVES_LIST(LOOKUP_PRIMITIVE);
@@ -141,21 +150,23 @@ bool IsParameterTypeConvertible(ObjPtr<mirror::Class> from, ObjPtr<mirror::Class
if (from->DescriptorEquals("Ljava/lang/Object;")) {
// Object might be converted into a primitive during unboxing.
return true;
- } else if (Primitive::IsNumericType(to_primitive) &&
- from->DescriptorEquals("Ljava/lang/Number;")) {
+ }
+
+ if (Primitive::IsNumericType(to_primitive) && from->DescriptorEquals("Ljava/lang/Number;")) {
// Number might be unboxed into any of the number primitive types.
return true;
}
+
Primitive::Type unboxed_type;
if (GetUnboxedPrimitiveType(from, &unboxed_type)) {
if (unboxed_type == to_primitive) {
// Straightforward unboxing conversion such as Boolean => boolean.
return true;
- } else {
- // Check if widening operations for numeric primitives would work,
- // such as Byte => byte => long.
- return Primitive::IsWidenable(unboxed_type, to_primitive);
}
+
+ // Check if widening operations for numeric primitives would work,
+ // such as Byte => byte => long.
+ return Primitive::IsWidenable(unboxed_type, to_primitive);
}
}
@@ -372,25 +383,18 @@ inline bool IsFieldAccess(mirror::MethodHandle::Kind handle_kind) {
static inline size_t GetInsForProxyOrNativeMethod(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method->IsNative() || method->IsProxyMethod());
-
method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
- size_t num_ins = 0;
- // Separate accounting for the receiver, which isn't a part of the
- // shorty.
- if (!method->IsStatic()) {
- ++num_ins;
- }
-
- uint32_t shorty_len = 0;
- const char* shorty = method->GetShorty(&shorty_len);
- for (size_t i = 1; i < shorty_len; ++i) {
- const char c = shorty[i];
- ++num_ins;
- if (c == 'J' || c == 'D') {
+ uint32_t shorty_length = 0;
+ const char* shorty = method->GetShorty(&shorty_length);
+
+ // Static methods do not include the receiver. The receiver isn't included
+ // in the shorty_length though the return value is.
+ size_t num_ins = method->IsStatic() ? shorty_length - 1 : shorty_length;
+ for (const char* c = shorty + 1; *c != '\0'; ++c) {
+ if (*c == 'J' || *c == 'D') {
++num_ins;
}
}
-
return num_ins;
}
@@ -402,7 +406,10 @@ static inline bool IsCallerTransformer(Handle<mirror::MethodType> callsite_type)
ObjPtr<mirror::ObjectArray<mirror::Class>> param_types(callsite_type->GetPTypes());
if (param_types->GetLength() == 1) {
ObjPtr<mirror::Class> param(param_types->GetWithoutChecks(0));
- return param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame);
+ // NB Comparing descriptor here as it appears faster in cycle simulation than using:
+ // param == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_EmulatedStackFrame)
+ // Costs are 98 vs 173 cycles per invocation.
+ return param->DescriptorEquals("Ldalvik/system/EmulatedStackFrame;");
}
return false;
@@ -416,35 +423,8 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
ShadowFrame& shadow_frame,
const uint32_t (&args)[Instruction::kMaxVarArgRegs],
uint32_t first_arg,
- JValue* result,
- const mirror::MethodHandle::Kind handle_kind)
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // For virtual and interface methods ensure called_method points to
- // the actual method to invoke.
- if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
- handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
- uint32_t receiver_reg = is_range ? first_arg : args[0];
- ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(receiver_reg));
- if (IsCallerTransformer(callsite_type)) {
- // The current receiver is an emulated stack frame, the method's
- // receiver needs to be fetched from there as the emulated frame
- // will be unpacked into a new frame.
- receiver = ObjPtr<mirror::EmulatedStackFrame>::DownCast(receiver)->GetReceiver();
- }
-
- ObjPtr<mirror::Class> declaring_class(called_method->GetDeclaringClass());
- if (receiver == nullptr || receiver->GetClass() != declaring_class) {
- // Verify that _vRegC is an object reference and of the type expected by
- // the receiver.
- if (!VerifyObjectIsClass(receiver, declaring_class)) {
- DCHECK(self->IsExceptionPending());
- return false;
- }
- called_method = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
- called_method, kRuntimePointerSize);
- }
- }
-
// Compute method information.
const DexFile::CodeItem* code_item = called_method->GetCodeItem();
@@ -513,17 +493,23 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
result->SetL(0);
return false;
}
- } else if (!ConvertAndCopyArgumentsFromCallerFrame<is_range>(self,
- callsite_type,
- target_type,
- shadow_frame,
- args,
- first_arg,
- first_dest_reg,
- new_shadow_frame)) {
- DCHECK(self->IsExceptionPending());
- result->SetL(0);
- return false;
+ } else {
+ if (!callsite_type->IsConvertible(target_type.Get())) {
+ ThrowWrongMethodTypeException(target_type.Get(), callsite_type.Get());
+ return false;
+ }
+ if (!ConvertAndCopyArgumentsFromCallerFrame<is_range>(self,
+ callsite_type,
+ target_type,
+ shadow_frame,
+ args,
+ first_arg,
+ first_dest_reg,
+ new_shadow_frame)) {
+ DCHECK(self->IsExceptionPending());
+ result->SetL(0);
+ return false;
+ }
}
}
}
@@ -548,13 +534,13 @@ static inline bool DoCallPolymorphic(ArtMethod* called_method,
if (ConvertReturnValue(emulated_stack_type, target_type, &local_result)) {
emulated_stack_frame->SetReturnValue(self, local_result);
return true;
- } else {
- DCHECK(self->IsExceptionPending());
- return false;
}
- } else {
- return ConvertReturnValue(callsite_type, target_type, result);
+
+ DCHECK(self->IsExceptionPending());
+ return false;
}
+
+ return ConvertReturnValue(callsite_type, target_type, result);
}
template <bool is_range>
@@ -650,98 +636,130 @@ inline static ObjPtr<mirror::Class> GetAndInitializeDeclaringClass(Thread* self,
return klass;
}
+ArtMethod* RefineTargetMethod(Thread* self,
+ ShadowFrame& shadow_frame,
+ const mirror::MethodHandle::Kind& handle_kind,
+ Handle<mirror::MethodType> handle_type,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t receiver_reg,
+ ArtMethod* target_method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
+ handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
+ // For virtual and interface methods ensure target_method points to
+ // the actual method to invoke.
+ ObjPtr<mirror::Object> receiver(shadow_frame.GetVRegReference(receiver_reg));
+ if (IsCallerTransformer(callsite_type)) {
+ // The current receiver is an emulated stack frame, the method's
+ // receiver needs to be fetched from there as the emulated frame
+ // will be unpacked into a new frame.
+ receiver = ObjPtr<mirror::EmulatedStackFrame>::DownCast(receiver)->GetReceiver();
+ }
+
+ ObjPtr<mirror::Class> declaring_class(target_method->GetDeclaringClass());
+ if (receiver == nullptr || receiver->GetClass() != declaring_class) {
+ // Verify that _vRegC is an object reference and of the type expected by
+ // the receiver.
+ if (!VerifyObjectIsClass(receiver, declaring_class)) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+ return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(
+ target_method, kRuntimePointerSize);
+ }
+ } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeDirect) {
+ // String constructors are a special case, they are replaced with
+ // StringFactory methods.
+ if (target_method->IsConstructor() && target_method->GetDeclaringClass()->IsStringClass()) {
+ DCHECK(handle_type->GetRType()->IsStringClass());
+ return WellKnownClasses::StringInitToStringFactory(target_method);
+ }
+ } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeSuper) {
+ ObjPtr<mirror::Class> declaring_class = target_method->GetDeclaringClass();
+
+ // Note that we're not dynamically dispatching on the type of the receiver
+ // here. We use the static type of the "receiver" object that we've
+ // recorded in the method handle's type, which will be the same as the
+ // special caller that was specified at the point of lookup.
+ ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
+ if (!declaring_class->IsInterface()) {
+ ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
+ uint16_t vtable_index = target_method->GetMethodIndex();
+ DCHECK(super_class != nullptr);
+ DCHECK(super_class->HasVTable());
+ // Note that super_class is a super of referrer_class and target_method
+ // will always be declared by super_class (or one of its super classes).
+ DCHECK_LT(vtable_index, super_class->GetVTableLength());
+ return super_class->GetVTableEntry(vtable_index, kRuntimePointerSize);
+ } else {
+ return referrer_class->FindVirtualMethodForInterfaceSuper(target_method, kRuntimePointerSize);
+ }
+ }
+ return target_method;
+}
+
template <bool is_range>
-bool DoInvokePolymorphicUnchecked(Thread* self,
- ShadowFrame& shadow_frame,
- Handle<mirror::MethodHandle> method_handle,
- Handle<mirror::MethodType> callsite_type,
- const uint32_t (&args)[Instruction::kMaxVarArgRegs],
- uint32_t first_arg,
- JValue* result)
+bool DoInvokePolymorphicMethod(Thread* self,
+ ShadowFrame& shadow_frame,
+ Handle<mirror::MethodHandle> method_handle,
+ Handle<mirror::MethodType> callsite_type,
+ const uint32_t (&args)[Instruction::kMaxVarArgRegs],
+ uint32_t first_arg,
+ JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
- if (IsInvoke(handle_kind)) {
- // Get the method we're actually invoking along with the kind of
- // invoke that is desired. We don't need to perform access checks at this
- // point because they would have been performed on our behalf at the point
- // of creation of the method handle.
- ArtMethod* called_method = method_handle->GetTargetMethod();
- CHECK(called_method != nullptr);
-
- if (handle_kind == mirror::MethodHandle::Kind::kInvokeVirtual ||
- handle_kind == mirror::MethodHandle::Kind::kInvokeInterface) {
- // TODO: Unfortunately, we have to postpone dynamic receiver based checks
- // because the receiver might be cast or might come from an emulated stack
- // frame, which means that it is unknown at this point. We perform these
- // checks inside DoCallPolymorphic right before we do the actual invoke.
- } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeDirect) {
- // String constructors are a special case, they are replaced with StringFactory
- // methods.
- if (called_method->IsConstructor() && called_method->GetDeclaringClass()->IsStringClass()) {
- DCHECK(handle_type->GetRType()->IsStringClass());
- called_method = WellKnownClasses::StringInitToStringFactory(called_method);
- }
- } else if (handle_kind == mirror::MethodHandle::Kind::kInvokeSuper) {
- ObjPtr<mirror::Class> declaring_class = called_method->GetDeclaringClass();
-
- // Note that we're not dynamically dispatching on the type of the receiver
- // here. We use the static type of the "receiver" object that we've
- // recorded in the method handle's type, which will be the same as the
- // special caller that was specified at the point of lookup.
- ObjPtr<mirror::Class> referrer_class = handle_type->GetPTypes()->Get(0);
- if (!declaring_class->IsInterface()) {
- ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
- uint16_t vtable_index = called_method->GetMethodIndex();
- DCHECK(super_class != nullptr);
- DCHECK(super_class->HasVTable());
- // Note that super_class is a super of referrer_class and called_method
- // will always be declared by super_class (or one of its super classes).
- DCHECK_LT(vtable_index, super_class->GetVTableLength());
- called_method = super_class->GetVTableEntry(vtable_index, kRuntimePointerSize);
- } else {
- called_method = referrer_class->FindVirtualMethodForInterfaceSuper(
- called_method, kRuntimePointerSize);
- }
- CHECK(called_method != nullptr);
- }
- if (IsInvokeTransform(handle_kind)) {
- // There are two cases here - method handles representing regular
- // transforms and those representing call site transforms. Method
- // handles for call site transforms adapt their MethodType to match
- // the call site. For these, the |callee_type| is the same as the
- // |callsite_type|. The VarargsCollector is such a tranform, its
- // method type depends on the call site, ie. x(a) or x(a, b), or
- // x(a, b, c). The VarargsCollector invokes a variable arity method
- // with the arity arguments in an array.
- Handle<mirror::MethodType> callee_type =
- (handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
- : handle_type;
- return DoCallTransform<is_range>(called_method,
+ DCHECK(IsInvoke(handle_kind));
+
+ // Get the method we're actually invoking along with the kind of
+ // invoke that is desired. We don't need to perform access checks at this
+ // point because they would have been performed on our behalf at the point
+ // of creation of the method handle.
+ ArtMethod* target_method = method_handle->GetTargetMethod();
+ uint32_t receiver_reg = is_range ? first_arg: args[0];
+ ArtMethod* called_method = RefineTargetMethod(self,
+ shadow_frame,
+ handle_kind,
+ handle_type,
+ callsite_type,
+ receiver_reg,
+ target_method);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+
+ if (IsInvokeTransform(handle_kind)) {
+ // There are two cases here - method handles representing regular
+ // transforms and those representing call site transforms. Method
+ // handles for call site transforms adapt their MethodType to match
+ // the call site. For these, the |callee_type| is the same as the
+ // |callsite_type|. The VarargsCollector is such a tranform, its
+ // method type depends on the call site, ie. x(a) or x(a, b), or
+ // x(a, b, c). The VarargsCollector invokes a variable arity method
+ // with the arity arguments in an array.
+ Handle<mirror::MethodType> callee_type =
+ (handle_kind == mirror::MethodHandle::Kind::kInvokeCallSiteTransform) ? callsite_type
+ : handle_type;
+ return DoCallTransform<is_range>(called_method,
+ callsite_type,
+ callee_type,
+ self,
+ shadow_frame,
+ method_handle /* receiver */,
+ args,
+ first_arg,
+ result);
+ } else {
+ return DoCallPolymorphic<is_range>(called_method,
callsite_type,
- callee_type,
+ handle_type,
self,
shadow_frame,
- method_handle /* receiver */,
args,
first_arg,
result);
-
- } else {
- return DoCallPolymorphic<is_range>(called_method,
- callsite_type,
- handle_type,
- self,
- shadow_frame,
- args,
- first_arg,
- result,
- handle_kind);
- }
- } else {
- LOG(FATAL) << "Unreachable: " << handle_kind;
- UNREACHABLE();
}
}
@@ -948,55 +966,30 @@ static inline bool DoInvokePolymorphicNonExact(Thread* self,
ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
CHECK(handle_type != nullptr);
- if (!IsInvokeTransform(handle_kind)) {
- if (UNLIKELY(!IsCallerTransformer(callsite_type) &&
- !callsite_type->IsConvertible(handle_type.Ptr()))) {
+ if (IsFieldAccess(handle_kind)) {
+ DCHECK(!callsite_type->IsExactMatch(handle_type.Ptr()));
+ if (!callsite_type->IsConvertible(handle_type.Ptr())) {
ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
return false;
}
+ const bool do_convert = true;
+ return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
+ self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
- if (IsFieldAccess(handle_kind)) {
- if (UNLIKELY(callsite_type->IsExactMatch(handle_type.Ptr()))) {
- const bool do_convert = false;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- } else {
- const bool do_convert = true;
- return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
- self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
- }
-
- if (UNLIKELY(callsite_type->IsExactMatch(handle_type.Ptr()))) {
- return DoInvokePolymorphicUnchecked<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- } else {
- return DoInvokePolymorphicUnchecked<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
+ return DoInvokePolymorphicMethod<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
}
template <bool is_range>
@@ -1008,32 +1001,9 @@ bool DoInvokePolymorphicExact(Thread* self,
uint32_t first_arg,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // We need to check the nominal type of the handle in addition to the
- // real type. The "nominal" type is present when MethodHandle.asType is
- // called any handle, and results in the declared type of the handle
- // changing.
- ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
- if (UNLIKELY(nominal_type != nullptr)) {
- if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
- ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
- return false;
- }
- return DoInvokePolymorphicNonExact<is_range>(self,
- shadow_frame,
- method_handle,
- callsite_type,
- args,
- first_arg,
- result);
- }
-
- ObjPtr<mirror::MethodType> handle_type(method_handle->GetMethodType());
- if (UNLIKELY(!callsite_type->IsExactMatch(handle_type.Ptr()))) {
- ThrowWrongMethodTypeException(handle_type.Ptr(), callsite_type.Get());
- return false;
- }
-
+ StackHandleScope<1> hs(self);
const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
+ Handle<mirror::MethodType> method_handle_type(hs.NewHandle(method_handle->GetMethodType()));
if (IsFieldAccess(handle_kind)) {
const bool do_convert = false;
return DoInvokePolymorphicFieldAccess<is_range, do_convert>(
@@ -1046,13 +1016,68 @@ bool DoInvokePolymorphicExact(Thread* self,
result);
}
- return DoInvokePolymorphicUnchecked<is_range>(self,
+ // Slow-path check.
+ if (IsInvokeTransform(handle_kind) || IsCallerTransformer(callsite_type)) {
+ return DoInvokePolymorphicMethod<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
+
+ // On the fast-path. This is equivalent to DoCallPolymoprhic without the conversion paths.
+ ArtMethod* target_method = method_handle->GetTargetMethod();
+ uint32_t receiver_reg = is_range ? first_arg : args[0];
+ ArtMethod* called_method = RefineTargetMethod(self,
shadow_frame,
- method_handle,
+ handle_kind,
+ method_handle_type,
callsite_type,
- args,
- first_arg,
- result);
+ receiver_reg,
+ target_method);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return false;
+ }
+
+ // Compute method information.
+ const DexFile::CodeItem* code_item = called_method->GetCodeItem();
+ uint16_t num_regs;
+ size_t num_input_regs;
+ size_t first_dest_reg;
+ if (LIKELY(code_item != nullptr)) {
+ num_regs = code_item->registers_size_;
+ first_dest_reg = num_regs - code_item->ins_size_;
+ num_input_regs = code_item->ins_size_;
+ // Parameter registers go at the end of the shadow frame.
+ DCHECK_NE(first_dest_reg, (size_t)-1);
+ } else {
+ // No local regs for proxy and native methods.
+ DCHECK(called_method->IsNative() || called_method->IsProxyMethod());
+ num_regs = num_input_regs = GetInsForProxyOrNativeMethod(called_method);
+ first_dest_reg = 0;
+ }
+
+ // Allocate shadow frame on the stack.
+ const char* old_cause = self->StartAssertNoThreadSuspension("DoCallCommon");
+ ShadowFrameAllocaUniquePtr shadow_frame_unique_ptr =
+ CREATE_SHADOW_FRAME(num_regs, &shadow_frame, called_method, /* dex pc */ 0);
+ ShadowFrame* new_shadow_frame = shadow_frame_unique_ptr.get();
+ CopyArgumentsFromCallerFrame<is_range>(shadow_frame,
+ new_shadow_frame,
+ args,
+ first_arg,
+ first_dest_reg,
+ num_input_regs);
+ self->EndAssertNoThreadSuspension(old_cause);
+
+ PerformCall(self, code_item, shadow_frame.GetMethod(), first_dest_reg, new_shadow_frame, result);
+ if (self->IsExceptionPending()) {
+ return false;
+ }
+ return true;
}
} // namespace
@@ -1067,7 +1092,35 @@ bool DoInvokePolymorphic(Thread* self,
uint32_t first_arg,
JValue* result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::MethodType> method_handle_type = method_handle->GetMethodType();
if (IsMethodHandleInvokeExact(invoke_method)) {
+ // We need to check the nominal type of the handle in addition to the
+ // real type. The "nominal" type is present when MethodHandle.asType is
+ // called any handle, and results in the declared type of the handle
+ // changing.
+ ObjPtr<mirror::MethodType> nominal_type(method_handle->GetNominalType());
+ if (UNLIKELY(nominal_type != nullptr)) {
+ if (UNLIKELY(!callsite_type->IsExactMatch(nominal_type.Ptr()))) {
+ ThrowWrongMethodTypeException(nominal_type.Ptr(), callsite_type.Get());
+ return false;
+ }
+
+ if (LIKELY(!nominal_type->IsExactMatch(method_handle_type.Ptr()))) {
+ // Different nominal type means we have to treat as non-exact.
+ return DoInvokePolymorphicNonExact<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
+ }
+
+ if (!callsite_type->IsExactMatch(method_handle_type.Ptr())) {
+ ThrowWrongMethodTypeException(method_handle_type.Ptr(), callsite_type.Get());
+ return false;
+ }
return DoInvokePolymorphicExact<is_range>(self,
shadow_frame,
method_handle,
@@ -1076,6 +1129,16 @@ bool DoInvokePolymorphic(Thread* self,
first_arg,
result);
} else {
+ if (UNLIKELY(callsite_type->IsExactMatch(method_handle_type.Ptr()))) {
+ // A non-exact invoke that can be invoked exactly.
+ return DoInvokePolymorphicExact<is_range>(self,
+ shadow_frame,
+ method_handle,
+ callsite_type,
+ args,
+ first_arg,
+ result);
+ }
return DoInvokePolymorphicNonExact<is_range>(self,
shadow_frame,
method_handle,
diff --git a/runtime/method_info.h b/runtime/method_info.h
new file mode 100644
index 0000000000..5a72125be4
--- /dev/null
+++ b/runtime/method_info.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_METHOD_INFO_H_
+#define ART_RUNTIME_METHOD_INFO_H_
+
+#include "base/logging.h"
+#include "leb128.h"
+#include "memory_region.h"
+
+namespace art {
+
+// Method info is for not dedupe friendly data of a method. Currently it only holds methods indices.
+// Putting this data in MethodInfo instead of code infos saves ~5% oat size.
+class MethodInfo {
+ using MethodIndexType = uint16_t;
+
+ public:
+ // Reading mode
+ explicit MethodInfo(const uint8_t* ptr) {
+ if (ptr != nullptr) {
+ num_method_indices_ = DecodeUnsignedLeb128(&ptr);
+ region_ = MemoryRegion(const_cast<uint8_t*>(ptr),
+ num_method_indices_ * sizeof(MethodIndexType));
+ }
+ }
+
+ // Writing mode
+ MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) {
+ DCHECK(ptr != nullptr);
+ ptr = EncodeUnsignedLeb128(ptr, num_method_indices_);
+ region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType));
+ }
+
+ static size_t ComputeSize(size_t num_method_indices) {
+ uint8_t temp[8];
+ uint8_t* ptr = temp;
+ ptr = EncodeUnsignedLeb128(ptr, num_method_indices);
+ return (ptr - temp) + num_method_indices * sizeof(MethodIndexType);
+ }
+
+ ALWAYS_INLINE MethodIndexType GetMethodIndex(size_t index) const {
+ // Use bit functions to avoid pesky alignment requirements.
+ return region_.LoadBits(index * BitSizeOf<MethodIndexType>(), BitSizeOf<MethodIndexType>());
+ }
+
+ void SetMethodIndex(size_t index, MethodIndexType method_index) {
+ region_.StoreBits(index * BitSizeOf<MethodIndexType>(),
+ method_index,
+ BitSizeOf<MethodIndexType>());
+ }
+
+ size_t NumMethodIndices() const {
+ return num_method_indices_;
+ }
+
+ private:
+ size_t num_method_indices_ = 0u;
+ MemoryRegion region_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_METHOD_INFO_H_
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index f56226bd98..04c80c5cc0 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -402,8 +402,8 @@ inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
return (T)static_cast<uintptr_t>(
AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
}
- return (T)static_cast<uintptr_t>(
- AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx));
+ return (T)static_cast<uintptr_t>(static_cast<uint32_t>(
+ AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx)));
}
template<bool kTransactionActive, bool kUnchecked>
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 16cf30f1e2..51d9d24619 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -198,6 +198,13 @@ class PointerArray : public Array {
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ void** ElementAddress(size_t index, PointerSize ptr_size) REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK_LT(index, static_cast<size_t>(GetLength()));
+ return reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(this) +
+ Array::DataOffset(static_cast<size_t>(ptr_size)).Uint32Value() +
+ static_cast<size_t>(ptr_size) * index);
+ }
+
template<bool kTransactionActive = false, bool kUnchecked = false>
void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 2cff47e8b4..be3b937f3e 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -29,6 +29,7 @@
#include "dex_file.h"
#include "gc/heap-inl.h"
#include "iftable.h"
+#include "class_ext-inl.h"
#include "object_array-inl.h"
#include "read_barrier-inl.h"
#include "reference-inl.h"
@@ -83,6 +84,12 @@ inline ClassLoader* Class::GetClassLoader() {
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ClassExt* Class::GetExtData() {
+ return GetFieldObject<ClassExt, kVerifyFlags, kReadBarrierOption>(
+ OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
+}
+
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline DexCache* Class::GetDexCache() {
return GetFieldObject<DexCache, kVerifyFlags, kReadBarrierOption>(
OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
@@ -841,7 +848,7 @@ inline void Class::AssertInitializedOrInitializingInThread(Thread* self) {
}
}
-inline ObjectArray<Class>* Class::GetInterfaces() {
+inline ObjectArray<Class>* Class::GetProxyInterfaces() {
CHECK(IsProxyClass());
// First static field.
auto* field = GetStaticField(0);
@@ -850,7 +857,7 @@ inline ObjectArray<Class>* Class::GetInterfaces() {
return GetFieldObject<ObjectArray<Class>>(field_offset);
}
-inline ObjectArray<ObjectArray<Class>>* Class::GetThrows() {
+inline ObjectArray<ObjectArray<Class>>* Class::GetProxyThrows() {
CHECK(IsProxyClass());
// Second static field.
auto* field = GetStaticField(1);
@@ -920,7 +927,7 @@ inline uint32_t Class::NumDirectInterfaces() {
} else if (IsArrayClass()) {
return 2;
} else if (IsProxyClass()) {
- ObjectArray<Class>* interfaces = GetInterfaces();
+ ObjectArray<Class>* interfaces = GetProxyInterfaces();
return interfaces != nullptr ? interfaces->GetLength() : 0;
} else {
const DexFile::TypeList* interfaces = GetInterfaceTypeList();
@@ -951,6 +958,10 @@ void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
for (ArtMethod& method : GetMethods(pointer_size)) {
method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
}
+ ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (!ext.IsNull()) {
+ ext->VisitNativeRoots<kReadBarrierOption, Visitor>(visitor, pointer_size);
+ }
}
inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) {
@@ -1086,7 +1097,9 @@ inline void Class::FixupNativePointers(Class* dest,
if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
- ArtMethod* new_method = visitor(method);
+ void** dest_addr = reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) +
+ EmbeddedVTableEntryOffset(i, pointer_size).Uint32Value());
+ ArtMethod* new_method = visitor(method, dest_addr);
if (method != new_method) {
dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
}
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index eb2ec9b3c8..26af488bd2 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -64,10 +64,6 @@ void Class::VisitRoots(RootVisitor* visitor) {
java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass));
}
-ClassExt* Class::GetExtData() {
- return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_));
-}
-
ClassExt* Class::EnsureExtDataPresent(Thread* self) {
ObjPtr<ClassExt> existing(GetExtData());
if (!existing.IsNull()) {
@@ -946,7 +942,7 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3
DCHECK(interface != nullptr);
return interface;
} else if (klass->IsProxyClass()) {
- ObjPtr<ObjectArray<Class>> interfaces = klass->GetInterfaces();
+ ObjPtr<ObjectArray<Class>> interfaces = klass->GetProxyInterfaces();
DCHECK(interfaces != nullptr);
return interfaces->Get(idx);
} else {
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d34f09c721..27aecd5150 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -206,10 +206,10 @@ class MANAGED Class FINAL : public Object {
return status >= kStatusResolved || status == kStatusErrorResolved;
}
- // Returns true if the class was compile-time verified.
+ // Returns true if the class should be verified at runtime.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
+ bool ShouldVerifyAtRuntime() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetStatus<kVerifyFlags>() == kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
@@ -595,7 +595,7 @@ class MANAGED Class FINAL : public Object {
// The size of java.lang.Class.class.
static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 73;
+ uint32_t vtable_entries = Object::kVTableLength + 67;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
@@ -1162,6 +1162,8 @@ class MANAGED Class FINAL : public Object {
void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ClassExt* GetExtData() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the ExtData for this class, allocating one if necessary. This should be the only way
@@ -1262,10 +1264,10 @@ class MANAGED Class FINAL : public Object {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
- ObjectArray<Class>* GetInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjectArray<Class>* GetProxyInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<ObjectArray<Class>>* GetThrows() REQUIRES_SHARED(Locks::mutator_lock_);
+ ObjectArray<ObjectArray<Class>>* GetProxyThrows() REQUIRES_SHARED(Locks::mutator_lock_);
// For reference class only.
MemberOffset GetDisableIntrinsicFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
new file mode 100644
index 0000000000..feaac8580a
--- /dev/null
+++ b/runtime/mirror/class_ext-inl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_
+#define ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_
+
+#include "class_ext.h"
+
+#include "art_method-inl.h"
+
+namespace art {
+namespace mirror {
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
+ ObjPtr<PointerArray> arr(GetObsoleteMethods<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (arr.IsNull()) {
+ return;
+ }
+ int32_t len = arr->GetLength();
+ for (int32_t i = 0; i < len; i++) {
+ ArtMethod* method = arr->GetElementPtrSize<ArtMethod*,
+ kDefaultVerifyFlags,
+ kReadBarrierOption>(i, pointer_size);
+ if (method != nullptr) {
+ method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ }
+ }
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index 7270079a8f..94e4b88f6c 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "class_ext.h"
+#include "class_ext-inl.h"
#include "art_method-inl.h"
#include "base/casts.h"
@@ -24,7 +24,6 @@
#include "gc/accounting/card_table-inl.h"
#include "object-inl.h"
#include "object_array.h"
-#include "object_array-inl.h"
#include "stack_trace_element.h"
#include "utils.h"
#include "well_known_classes.h"
@@ -34,6 +33,11 @@ namespace mirror {
GcRoot<Class> ClassExt::dalvik_system_ClassExt_;
+uint32_t ClassExt::ClassSize(PointerSize pointer_size) {
+ uint32_t vtable_entries = Object::kVTableLength;
+ return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
+}
+
void ClassExt::SetObsoleteArrays(ObjPtr<PointerArray> methods,
ObjPtr<ObjectArray<DexCache>> dex_caches) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId())
@@ -113,9 +117,9 @@ void ClassExt::SetVerifyError(ObjPtr<Object> err) {
}
}
-void ClassExt::SetOriginalDexFileBytes(ObjPtr<ByteArray> bytes) {
+void ClassExt::SetOriginalDexFile(ObjPtr<Object> bytes) {
DCHECK(!Runtime::Current()->IsActiveTransaction());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_bytes_), bytes);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_), bytes);
}
void ClassExt::SetClass(ObjPtr<Class> dalvik_system_ClassExt) {
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index ad8a61b676..708665d46b 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -17,9 +17,8 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_EXT_H_
#define ART_RUNTIME_MIRROR_CLASS_EXT_H_
-#include "class-inl.h"
-
#include "array.h"
+#include "class.h"
#include "dex_cache.h"
#include "gc_root.h"
#include "object.h"
@@ -36,10 +35,7 @@ namespace mirror {
// C++ mirror of dalvik.system.ClassExt
class MANAGED ClassExt : public Object {
public:
- static uint32_t ClassSize(PointerSize pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength;
- return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
- }
+ static uint32_t ClassSize(PointerSize pointer_size);
// Size of an instance of dalvik.system.ClassExt.
static constexpr uint32_t InstanceSize() {
@@ -57,15 +53,18 @@ class MANAGED ClassExt : public Object {
OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_));
}
- PointerArray* GetObsoleteMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_));
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ inline PointerArray* GetObsoleteMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldObject<PointerArray, kVerifyFlags, kReadBarrierOption>(
+ OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_));
}
- ByteArray* GetOriginalDexFileBytes() REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldObject<ByteArray>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_bytes_));
+ Object* GetOriginalDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_));
}
- void SetOriginalDexFileBytes(ObjPtr<ByteArray> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetOriginalDexFile(ObjPtr<Object> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
void SetObsoleteArrays(ObjPtr<PointerArray> methods, ObjPtr<ObjectArray<DexCache>> dex_caches)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -78,6 +77,10 @@ class MANAGED ClassExt : public Object {
static void ResetClass();
static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ inline void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
static ClassExt* Alloc(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
private:
@@ -86,7 +89,7 @@ class MANAGED ClassExt : public Object {
HeapReference<PointerArray> obsolete_methods_;
- HeapReference<ByteArray> original_dex_file_bytes_;
+ HeapReference<Object> original_dex_file_;
// The saved verification error of this class.
HeapReference<Object> verify_error_;
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 29bf6a0240..5d3af5071a 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -24,6 +24,7 @@
#include "base/casts.h"
#include "base/enums.h"
#include "base/logging.h"
+#include "dex_file.h"
#include "gc_root.h"
#include "mirror/class.h"
#include "mirror/call_site.h"
@@ -36,8 +37,17 @@
namespace art {
namespace mirror {
+template <typename T>
+inline void NativeDexCachePair<T>::Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache,
+ PointerSize pointer_size) {
+ NativeDexCachePair<T> first_elem;
+ first_elem.object = nullptr;
+ first_elem.index = InvalidIndexForSlot(0);
+ DexCache::SetNativePairPtrSize(dex_cache, 0, first_elem, pointer_size);
+}
+
inline uint32_t DexCache::ClassSize(PointerSize pointer_size) {
- uint32_t vtable_entries = Object::kVTableLength + 5;
+ const uint32_t vtable_entries = Object::kVTableLength;
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
@@ -164,20 +174,36 @@ inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite*
}
}
+inline uint32_t DexCache::FieldSlotIndex(uint32_t field_idx) {
+ DCHECK_LT(field_idx, GetDexFile()->NumFieldIds());
+ const uint32_t slot_idx = field_idx % kDexCacheFieldCacheSize;
+ DCHECK_LT(slot_idx, NumResolvedFields());
+ return slot_idx;
+}
+
inline ArtField* DexCache::GetResolvedField(uint32_t field_idx, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
- DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
- ArtField* field = GetElementPtrSize(GetResolvedFields(), field_idx, ptr_size);
- if (field == nullptr || field->GetDeclaringClass()->IsErroneous()) {
- return nullptr;
- }
- return field;
+ auto pair = GetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), ptr_size);
+ return pair.GetObjectForIndex(field_idx);
}
inline void DexCache::SetResolvedField(uint32_t field_idx, ArtField* field, PointerSize ptr_size) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
- DCHECK_LT(field_idx, NumResolvedFields()); // NOTE: Unchecked, i.e. not throwing AIOOB.
- SetElementPtrSize(GetResolvedFields(), field_idx, field, ptr_size);
+ DCHECK(field != nullptr);
+ FieldDexCachePair pair(field, field_idx);
+ SetNativePairPtrSize(GetResolvedFields(), FieldSlotIndex(field_idx), pair, ptr_size);
+}
+
+inline void DexCache::ClearResolvedField(uint32_t field_idx, PointerSize ptr_size) {
+ DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), ptr_size);
+ uint32_t slot_idx = FieldSlotIndex(field_idx);
+ auto* resolved_fields = GetResolvedFields();
+ // This is racy but should only be called from the single-threaded ImageWriter.
+ DCHECK(Runtime::Current()->IsAotCompiler());
+ if (GetNativePairPtrSize(resolved_fields, slot_idx, ptr_size).index == field_idx) {
+ FieldDexCachePair cleared(nullptr, FieldDexCachePair::InvalidIndexForSlot(slot_idx));
+ SetNativePairPtrSize(resolved_fields, slot_idx, cleared, ptr_size);
+ }
}
inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size) {
@@ -225,6 +251,40 @@ inline void DexCache::SetElementPtrSize(PtrType* ptr_array,
}
}
+template <typename T>
+NativeDexCachePair<T> DexCache::GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
+ auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
+ ConversionPair64 value = AtomicLoadRelaxed16B(&array[idx]);
+ return NativeDexCachePair<T>(reinterpret_cast64<T*>(value.first),
+ dchecked_integral_cast<size_t>(value.second));
+ } else {
+ auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
+ ConversionPair32 value = array[idx].load(std::memory_order_relaxed);
+ return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second);
+ }
+}
+
+template <typename T>
+void DexCache::SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ NativeDexCachePair<T> pair,
+ PointerSize ptr_size) {
+ if (ptr_size == PointerSize::k64) {
+ auto* array = reinterpret_cast<std::atomic<ConversionPair64>*>(pair_array);
+ ConversionPair64 v(reinterpret_cast64<uint64_t>(pair.object), pair.index);
+ AtomicStoreRelease16B(&array[idx], v);
+ } else {
+ auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
+ ConversionPair32 v(
+ dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(pair.object)),
+ dchecked_integral_cast<uint32_t>(pair.index));
+ array[idx].store(v, std::memory_order_release);
+ }
+}
+
template <typename T,
ReadBarrierOption kReadBarrierOption,
typename Visitor>
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 1b8b3913b9..c95d92e34b 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -52,8 +52,12 @@ void DexCache::InitializeDexCache(Thread* self,
dex_file->NumTypeIds() != 0u ||
dex_file->NumMethodIds() != 0u ||
dex_file->NumFieldIds() != 0u) {
+ static_assert(ArenaAllocator::kAlignment == 8, "Expecting arena alignment of 8.");
+ DCHECK(layout.Alignment() == 8u || layout.Alignment() == 16u);
// Zero-initialized.
- raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
+ raw_arrays = (layout.Alignment() == 16u)
+ ? reinterpret_cast<uint8_t*>(linear_alloc->AllocAlign16(self, layout.Size()))
+ : reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
mirror::StringDexCacheType* strings = (dex_file->NumStringIds() == 0u) ? nullptr :
@@ -62,17 +66,21 @@ void DexCache::InitializeDexCache(Thread* self,
reinterpret_cast<mirror::TypeDexCacheType*>(raw_arrays + layout.TypesOffset());
ArtMethod** methods = (dex_file->NumMethodIds() == 0u) ? nullptr :
reinterpret_cast<ArtMethod**>(raw_arrays + layout.MethodsOffset());
- ArtField** fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
- reinterpret_cast<ArtField**>(raw_arrays + layout.FieldsOffset());
+ mirror::FieldDexCacheType* fields = (dex_file->NumFieldIds() == 0u) ? nullptr :
+ reinterpret_cast<mirror::FieldDexCacheType*>(raw_arrays + layout.FieldsOffset());
- size_t num_strings = mirror::DexCache::kDexCacheStringCacheSize;
+ size_t num_strings = kDexCacheStringCacheSize;
if (dex_file->NumStringIds() < num_strings) {
num_strings = dex_file->NumStringIds();
}
- size_t num_types = mirror::DexCache::kDexCacheTypeCacheSize;
+ size_t num_types = kDexCacheTypeCacheSize;
if (dex_file->NumTypeIds() < num_types) {
num_types = dex_file->NumTypeIds();
}
+ size_t num_fields = kDexCacheFieldCacheSize;
+ if (dex_file->NumFieldIds() < num_fields) {
+ num_fields = dex_file->NumFieldIds();
+ }
// Note that we allocate the method type dex caches regardless of this flag,
// and we make sure here that they're not used by the runtime. This is in the
@@ -80,17 +88,17 @@ void DexCache::InitializeDexCache(Thread* self,
//
// If this needs to be mitigated in a production system running this code,
// DexCache::kDexCacheMethodTypeCacheSize can be set to zero.
- mirror::MethodTypeDexCacheType* method_types = nullptr;
+ MethodTypeDexCacheType* method_types = nullptr;
size_t num_method_types = 0;
- if (dex_file->NumProtoIds() < mirror::DexCache::kDexCacheMethodTypeCacheSize) {
+ if (dex_file->NumProtoIds() < kDexCacheMethodTypeCacheSize) {
num_method_types = dex_file->NumProtoIds();
} else {
- num_method_types = mirror::DexCache::kDexCacheMethodTypeCacheSize;
+ num_method_types = kDexCacheMethodTypeCacheSize;
}
if (num_method_types > 0) {
- method_types = reinterpret_cast<mirror::MethodTypeDexCacheType*>(
+ method_types = reinterpret_cast<MethodTypeDexCacheType*>(
raw_arrays + layout.MethodTypesOffset());
}
@@ -98,13 +106,13 @@ void DexCache::InitializeDexCache(Thread* self,
? nullptr
: reinterpret_cast<GcRoot<mirror::CallSite>*>(raw_arrays + layout.CallSitesOffset());
- DCHECK_ALIGNED(raw_arrays, alignof(mirror::StringDexCacheType)) <<
+ DCHECK_ALIGNED(raw_arrays, alignof(StringDexCacheType)) <<
"Expected raw_arrays to align to StringDexCacheType.";
- DCHECK_ALIGNED(layout.StringsOffset(), alignof(mirror::StringDexCacheType)) <<
+ DCHECK_ALIGNED(layout.StringsOffset(), alignof(StringDexCacheType)) <<
"Expected StringsOffset() to align to StringDexCacheType.";
- DCHECK_ALIGNED(strings, alignof(mirror::StringDexCacheType)) <<
+ DCHECK_ALIGNED(strings, alignof(StringDexCacheType)) <<
"Expected strings to align to StringDexCacheType.";
- static_assert(alignof(mirror::StringDexCacheType) == 8u,
+ static_assert(alignof(StringDexCacheType) == 8u,
"Expected StringDexCacheType to have align of 8.");
if (kIsDebugBuild) {
// Sanity check to make sure all the dex cache arrays are empty. b/28992179
@@ -117,10 +125,11 @@ void DexCache::InitializeDexCache(Thread* self,
CHECK(types[i].load(std::memory_order_relaxed).object.IsNull());
}
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
- CHECK(mirror::DexCache::GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
+ CHECK(GetElementPtrSize(methods, i, image_pointer_size) == nullptr);
}
- for (size_t i = 0; i < dex_file->NumFieldIds(); ++i) {
- CHECK(mirror::DexCache::GetElementPtrSize(fields, i, image_pointer_size) == nullptr);
+ for (size_t i = 0; i < num_fields; ++i) {
+ CHECK_EQ(GetNativePairPtrSize(fields, i, image_pointer_size).index, 0u);
+ CHECK(GetNativePairPtrSize(fields, i, image_pointer_size).object == nullptr);
}
for (size_t i = 0; i < num_method_types; ++i) {
CHECK_EQ(method_types[i].load(std::memory_order_relaxed).index, 0u);
@@ -136,6 +145,9 @@ void DexCache::InitializeDexCache(Thread* self,
if (types != nullptr) {
mirror::TypeDexCachePair::Initialize(types);
}
+ if (fields != nullptr) {
+ mirror::FieldDexCachePair::Initialize(fields, image_pointer_size);
+ }
if (method_types != nullptr) {
mirror::MethodTypeDexCachePair::Initialize(method_types);
}
@@ -148,7 +160,7 @@ void DexCache::InitializeDexCache(Thread* self,
methods,
dex_file->NumMethodIds(),
fields,
- dex_file->NumFieldIds(),
+ num_fields,
method_types,
num_method_types,
call_sites,
@@ -164,7 +176,7 @@ void DexCache::Init(const DexFile* dex_file,
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
- ArtField** resolved_fields,
+ FieldDexCacheType* resolved_fields,
uint32_t num_resolved_fields,
MethodTypeDexCacheType* resolved_method_types,
uint32_t num_resolved_method_types,
@@ -218,5 +230,23 @@ void DexCache::SetLocation(ObjPtr<mirror::String> location) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
}
+#if !defined(__aarch64__) && !defined(__x86_64__)
+static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
+ pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
+ DexCache::ConversionPair64 value = *reinterpret_cast<ConversionPair64*>(target);
+ pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
+ return value;
+}
+
+void DexCache::AtomicStoreRelease16B(std::atomic<ConversionPair64>* target,
+ ConversionPair64 value) {
+ pthread_mutex_lock(&dex_cache_slow_atomic_mutex);
+ *reinterpret_cast<ConversionPair64*>(target) = value;
+ pthread_mutex_unlock(&dex_cache_slow_atomic_mutex);
+}
+#endif
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 057919806f..cf570b8be0 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -65,7 +65,7 @@ template <typename T> struct PACKED(8) DexCachePair {
DexCachePair(ObjPtr<T> object, uint32_t index)
: object(object),
index(index) {}
- DexCachePair() = default;
+ DexCachePair() : index(0) {}
DexCachePair(const DexCachePair<T>&) = default;
DexCachePair& operator=(const DexCachePair<T>&) = default;
@@ -91,12 +91,44 @@ template <typename T> struct PACKED(8) DexCachePair {
}
};
+template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
+ T* object;
+ size_t index;
+ // This is similar to DexCachePair except that we're storing a native pointer
+ // instead of a GC root. See DexCachePair for the details.
+ NativeDexCachePair(T* object, uint32_t index)
+ : object(object),
+ index(index) {}
+ NativeDexCachePair() : object(nullptr), index(0u) { }
+ NativeDexCachePair(const NativeDexCachePair<T>&) = default;
+ NativeDexCachePair& operator=(const NativeDexCachePair<T>&) = default;
+
+ static void Initialize(std::atomic<NativeDexCachePair<T>>* dex_cache, PointerSize pointer_size);
+
+ static uint32_t InvalidIndexForSlot(uint32_t slot) {
+ // Since the cache size is a power of two, 0 will always map to slot 0.
+ // Use 1 for slot 0 and 0 for all other slots.
+ return (slot == 0) ? 1u : 0u;
+ }
+
+ T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (idx != index) {
+ return nullptr;
+ }
+ DCHECK(object != nullptr);
+ return object;
+ }
+};
+
using TypeDexCachePair = DexCachePair<Class>;
using TypeDexCacheType = std::atomic<TypeDexCachePair>;
using StringDexCachePair = DexCachePair<String>;
using StringDexCacheType = std::atomic<StringDexCachePair>;
+using FieldDexCachePair = NativeDexCachePair<ArtField>;
+using FieldDexCacheType = std::atomic<FieldDexCachePair>;
+
using MethodTypeDexCachePair = DexCachePair<MethodType>;
using MethodTypeDexCacheType = std::atomic<MethodTypeDexCachePair>;
@@ -116,6 +148,11 @@ class MANAGED DexCache FINAL : public Object {
static_assert(IsPowerOfTwo(kDexCacheStringCacheSize),
"String dex cache size is not a power of 2.");
+ // Size of field dex cache. Needs to be a power of 2 for entrypoint assumptions to hold.
+ static constexpr size_t kDexCacheFieldCacheSize = 1024;
+ static_assert(IsPowerOfTwo(kDexCacheFieldCacheSize),
+ "Field dex cache size is not a power of 2.");
+
// Size of method type dex cache. Needs to be a power of 2 for entrypoint assumptions
// to hold.
static constexpr size_t kDexCacheMethodTypeCacheSize = 1024;
@@ -130,6 +167,10 @@ class MANAGED DexCache FINAL : public Object {
return kDexCacheStringCacheSize;
}
+ static constexpr size_t StaticArtFieldSize() {
+ return kDexCacheFieldCacheSize;
+ }
+
static constexpr size_t StaticMethodTypeSize() {
return kDexCacheMethodTypeCacheSize;
}
@@ -171,10 +212,6 @@ class MANAGED DexCache FINAL : public Object {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
- static MemberOffset DexOffset() {
- return OFFSET_OF_OBJECT_MEMBER(DexCache, dex_);
- }
-
static MemberOffset StringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
@@ -255,6 +292,8 @@ class MANAGED DexCache FINAL : public Object {
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void ClearResolvedField(uint32_t idx, PointerSize ptr_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
MethodType* GetResolvedMethodType(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -299,11 +338,11 @@ class MANAGED DexCache FINAL : public Object {
SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
}
- ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
- return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
+ FieldDexCacheType* GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return GetFieldPtr<FieldDexCacheType*>(ResolvedFieldsOffset());
}
- void SetResolvedFields(ArtField** resolved_fields)
+ void SetResolvedFields(FieldDexCacheType* resolved_fields)
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
@@ -376,6 +415,22 @@ class MANAGED DexCache FINAL : public Object {
template <typename PtrType>
static void SetElementPtrSize(PtrType* ptr_array, size_t idx, PtrType ptr, PointerSize ptr_size);
+ template <typename T>
+ static NativeDexCachePair<T> GetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ PointerSize ptr_size);
+
+ template <typename T>
+ static void SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_array,
+ size_t idx,
+ NativeDexCachePair<T> pair,
+ PointerSize ptr_size);
+
+ uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t FieldSlotIndex(uint32_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+
private:
void Init(const DexFile* dex_file,
ObjPtr<String> location,
@@ -385,7 +440,7 @@ class MANAGED DexCache FINAL : public Object {
uint32_t num_resolved_types,
ArtMethod** resolved_methods,
uint32_t num_resolved_methods,
- ArtField** resolved_fields,
+ FieldDexCacheType* resolved_fields,
uint32_t num_resolved_fields,
MethodTypeDexCacheType* resolved_method_types,
uint32_t num_resolved_method_types,
@@ -394,9 +449,18 @@ class MANAGED DexCache FINAL : public Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t StringSlotIndex(dex::StringIndex string_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t TypeSlotIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t MethodTypeSlotIndex(uint32_t proto_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ // std::pair<> is not trivially copyable and as such it is unsuitable for atomic operations,
+ // so we use a custom pair class for loading and storing the NativeDexCachePair<>.
+ template <typename IntType>
+ struct PACKED(2 * sizeof(IntType)) ConversionPair {
+ ConversionPair(IntType f, IntType s) : first(f), second(s) { }
+ ConversionPair(const ConversionPair&) = default;
+ ConversionPair& operator=(const ConversionPair&) = default;
+ IntType first;
+ IntType second;
+ };
+ using ConversionPair32 = ConversionPair<uint32_t>;
+ using ConversionPair64 = ConversionPair<uint64_t>;
// Visit instance fields of the dex cache as well as its associated arrays.
template <bool kVisitNativeRoots,
@@ -406,12 +470,58 @@ class MANAGED DexCache FINAL : public Object {
void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
- HeapReference<Object> dex_;
+ // Due to lack of 16-byte atomics support, we use hand-crafted routines.
+#if defined(__aarch64__)
+ // 16-byte atomics are supported on aarch64.
+ ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
+ std::atomic<ConversionPair64>* target) {
+ return target->load(std::memory_order_relaxed);
+ }
+
+ ALWAYS_INLINE static void AtomicStoreRelease16B(
+ std::atomic<ConversionPair64>* target, ConversionPair64 value) {
+ target->store(value, std::memory_order_release);
+ }
+#elif defined(__x86_64__)
+ ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
+ std::atomic<ConversionPair64>* target) {
+ uint64_t first, second;
+ __asm__ __volatile__(
+ "lock cmpxchg16b (%2)"
+ : "=&a"(first), "=&d"(second)
+ : "r"(target), "a"(0), "d"(0), "b"(0), "c"(0)
+ : "cc");
+ return ConversionPair64(first, second);
+ }
+
+ ALWAYS_INLINE static void AtomicStoreRelease16B(
+ std::atomic<ConversionPair64>* target, ConversionPair64 value) {
+ uint64_t first, second;
+ __asm__ __volatile__ (
+ "movq (%2), %%rax\n\t"
+ "movq 8(%2), %%rdx\n\t"
+ "1:\n\t"
+ "lock cmpxchg16b (%2)\n\t"
+ "jnz 1b"
+ : "=&a"(first), "=&d"(second)
+ : "r"(target), "b"(value.first), "c"(value.second)
+ : "cc");
+ }
+#else
+ static ConversionPair64 AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target);
+ static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
+#endif
+
HeapReference<String> location_;
+ // Number of elements in the call_sites_ array. Note that this appears here
+ // because of our packing logic for 32 bit fields.
+ uint32_t num_resolved_call_sites_;
+
uint64_t dex_file_; // const DexFile*
uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_
// elements.
- uint64_t resolved_fields_; // ArtField*, array with num_resolved_fields_ elements.
+ uint64_t resolved_fields_; // std::atomic<FieldDexCachePair>*, array with
+ // num_resolved_fields_ elements.
uint64_t resolved_method_types_; // std::atomic<MethodTypeDexCachePair>* array with
// num_resolved_method_types_ elements.
uint64_t resolved_methods_; // ArtMethod*, array with num_resolved_methods_ elements.
@@ -419,7 +529,6 @@ class MANAGED DexCache FINAL : public Object {
uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_
// elements.
- uint32_t num_resolved_call_sites_; // Number of elements in the call_sites_ array.
uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array.
uint32_t num_resolved_method_types_; // Number of elements in the resolved_method_types_ array.
uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array.
diff --git a/runtime/mirror/dex_cache_test.cc b/runtime/mirror/dex_cache_test.cc
index ef0aaaaa70..71a47f66a0 100644
--- a/runtime/mirror/dex_cache_test.cc
+++ b/runtime/mirror/dex_cache_test.cc
@@ -54,7 +54,8 @@ TEST_F(DexCacheTest, Open) {
EXPECT_TRUE(dex_cache->StaticTypeSize() == dex_cache->NumResolvedTypes()
|| java_lang_dex_file_->NumTypeIds() == dex_cache->NumResolvedTypes());
EXPECT_EQ(java_lang_dex_file_->NumMethodIds(), dex_cache->NumResolvedMethods());
- EXPECT_EQ(java_lang_dex_file_->NumFieldIds(), dex_cache->NumResolvedFields());
+ EXPECT_TRUE(dex_cache->StaticArtFieldSize() == dex_cache->NumResolvedFields()
+ || java_lang_dex_file_->NumFieldIds() == dex_cache->NumResolvedFields());
EXPECT_TRUE(dex_cache->StaticMethodTypeSize() == dex_cache->NumResolvedMethodTypes()
|| java_lang_dex_file_->NumProtoIds() == dex_cache->NumResolvedMethodTypes());
}
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index f6b64897fa..54034c2bbf 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -68,8 +68,16 @@ ArtField* Field::GetArtField() {
}
}
mirror::DexCache* const dex_cache = declaring_class->GetDexCache();
- ArtField* const art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
- CHECK(art_field != nullptr);
+ ArtField* art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
+ if (UNLIKELY(art_field == nullptr)) {
+ if (IsStatic()) {
+ art_field = declaring_class->FindDeclaredStaticField(dex_cache, GetDexFieldIndex());
+ } else {
+ art_field = declaring_class->FindInstanceField(dex_cache, GetDexFieldIndex());
+ }
+ CHECK(art_field != nullptr);
+ dex_cache->SetResolvedField(GetDexFieldIndex(), art_field, kRuntimePointerSize);
+ }
CHECK_EQ(declaring_class, art_field->GetDeclaringClass());
return art_field;
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 8e591e4434..811f1ea726 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -187,10 +187,12 @@ inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency)
uint32_t rb_state = lw.ReadBarrierState();
return rb_state;
#else
- // mips/mips64
- LOG(FATAL) << "Unreachable";
- UNREACHABLE();
- UNUSED(fake_address_dependency);
+ // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
+ LockWord lw = GetLockWord(false);
+ *fake_address_dependency = 0;
+ std::atomic_thread_fence(std::memory_order_acquire);
+ uint32_t rb_state = lw.ReadBarrierState();
+ return rb_state;
#endif
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 4541ce2a42..f7ab26de0d 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -538,10 +538,10 @@ class MANAGED LOCKABLE Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
- intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
- DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
+ DCHECK_EQ(static_cast<uint32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, static_cast<int32_t>(ptr));
+ field_offset, static_cast<int32_t>(static_cast<uint32_t>(ptr)));
} else {
SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, reinterpret_cast64<int64_t>(new_value));
@@ -591,7 +591,8 @@ class MANAGED LOCKABLE Object {
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
- return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+ uint64_t address = static_cast<uint32_t>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+ return reinterpret_cast<T>(static_cast<uintptr_t>(address));
} else {
int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
return reinterpret_cast64<T>(v);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 884b88a6c1..de0e75b083 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -89,16 +89,17 @@ inline bool String::AllASCIIExcept(const uint16_t* chars, int32_t length, uint16
return true;
}
-ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) {
- DCHECK(IsCompressed() ? ContainsElement(ArrayRef<uint8_t>(value_compressed_, GetLength()), old_c)
- : ContainsElement(ArrayRef<uint16_t>(value_, GetLength()), old_c));
- int32_t length = GetLength();
+ObjPtr<String> String::DoReplace(Thread* self, Handle<String> src, uint16_t old_c, uint16_t new_c) {
+ int32_t length = src->GetLength();
+ DCHECK(src->IsCompressed()
+ ? ContainsElement(ArrayRef<uint8_t>(src->value_compressed_, length), old_c)
+ : ContainsElement(ArrayRef<uint16_t>(src->value_, length), old_c));
bool compressible =
kUseStringCompression &&
IsASCII(new_c) &&
- (IsCompressed() || (!IsASCII(old_c) && AllASCIIExcept(value_, length, old_c)));
+ (src->IsCompressed() || (!IsASCII(old_c) && AllASCIIExcept(src->value_, length, old_c)));
gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
- const int32_t length_with_flag = String::GetFlaggedCount(GetLength(), compressible);
+ const int32_t length_with_flag = String::GetFlaggedCount(length, compressible);
SetStringCountVisitor visitor(length_with_flag);
ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
if (UNLIKELY(string == nullptr)) {
@@ -109,10 +110,10 @@ ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) {
return dchecked_integral_cast<uint8_t>((old_c != c) ? c : new_c);
};
uint8_t* out = string->value_compressed_;
- if (LIKELY(IsCompressed())) { // LIKELY(compressible == IsCompressed())
- std::transform(value_compressed_, value_compressed_ + length, out, replace);
+ if (LIKELY(src->IsCompressed())) { // LIKELY(compressible == src->IsCompressed())
+ std::transform(src->value_compressed_, src->value_compressed_ + length, out, replace);
} else {
- std::transform(value_, value_ + length, out, replace);
+ std::transform(src->value_, src->value_ + length, out, replace);
}
DCHECK(kUseStringCompression && AllASCII(out, length));
} else {
@@ -120,10 +121,10 @@ ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) {
return (old_c != c) ? c : new_c;
};
uint16_t* out = string->value_;
- if (UNLIKELY(IsCompressed())) { // LIKELY(compressible == IsCompressed())
- std::transform(value_compressed_, value_compressed_ + length, out, replace);
+ if (UNLIKELY(src->IsCompressed())) { // LIKELY(compressible == src->IsCompressed())
+ std::transform(src->value_compressed_, src->value_compressed_ + length, out, replace);
} else {
- std::transform(value_, value_ + length, out, replace);
+ std::transform(src->value_, src->value_ + length, out, replace);
}
DCHECK(!kUseStringCompression || !AllASCII(out, length));
}
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index dbb5a4c387..b59bbfbd68 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -96,7 +96,7 @@ class MANAGED String FINAL : public Object {
// Create a new string where all occurences of `old_c` are replaced with `new_c`.
// String.doReplace(char, char) is called from String.replace(char, char) when there is a match.
- ObjPtr<String> DoReplace(Thread* self, uint16_t old_c, uint16_t new_c)
+ static ObjPtr<String> DoReplace(Thread* self, Handle<String> src, uint16_t old_c, uint16_t new_c)
REQUIRES_SHARED(Locks::mutator_lock_);
ObjPtr<String> Intern() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 1fa46826eb..e80d31cdd5 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -354,7 +354,7 @@ class MonitorList {
// For use only by the JDWP implementation.
class MonitorInfo {
public:
- MonitorInfo() = default;
+ MonitorInfo() : owner_(nullptr), entry_count_(0) {}
MonitorInfo(const MonitorInfo&) = default;
MonitorInfo& operator=(const MonitorInfo&) = default;
explicit MonitorInfo(mirror::Object* o) REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index efc42fdac7..11f850524d 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -17,6 +17,8 @@
#include "dalvik_system_VMRuntime.h"
#ifdef ART_TARGET_ANDROID
+#include <sys/time.h>
+#include <sys/resource.h>
extern "C" void android_set_application_target_sdk_version(uint32_t version);
#endif
#include <limits.h>
@@ -444,12 +446,17 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
if (!kPreloadDexCachesCollectStats) {
return;
}
+ // TODO: Update for hash-based DexCache arrays.
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
Thread* const self = Thread::Current();
for (const DexFile* dex_file : class_linker->GetBootClassPath()) {
CHECK(dex_file != nullptr);
+ // In fallback mode, not all boot classpath components might be registered, yet.
+ if (!class_linker->IsDexFileRegistered(self, *dex_file)) {
+ continue;
+ }
ObjPtr<mirror::DexCache> const dex_cache = class_linker->FindDexCache(self, *dex_file);
- CHECK(dex_cache != nullptr); // Boot class path dex caches are never unloaded.
+ DCHECK(dex_cache != nullptr); // Boot class path dex caches are never unloaded.
for (size_t j = 0; j < dex_cache->NumStrings(); j++) {
ObjPtr<mirror::String> string = dex_cache->GetResolvedString(dex::StringIndex(j));
if (string != nullptr) {
@@ -463,7 +470,7 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
}
}
for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) {
- ArtField* field = class_linker->GetResolvedField(j, dex_cache);
+ ArtField* field = dex_cache->GetResolvedField(j, class_linker->GetImagePointerSize());
if (field != nullptr) {
filled->num_fields++;
}
@@ -580,9 +587,7 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
static void VMRuntime_registerAppInfo(JNIEnv* env,
jclass clazz ATTRIBUTE_UNUSED,
jstring profile_file,
- jstring app_dir,
- jobjectArray code_paths,
- jstring foreign_dex_profile_path) {
+ jobjectArray code_paths) {
std::vector<std::string> code_paths_vec;
int code_paths_length = env->GetArrayLength(code_paths);
for (int i = 0; i < code_paths_length; i++) {
@@ -596,22 +601,7 @@ static void VMRuntime_registerAppInfo(JNIEnv* env,
std::string profile_file_str(raw_profile_file);
env->ReleaseStringUTFChars(profile_file, raw_profile_file);
- std::string foreign_dex_profile_path_str = "";
- if (foreign_dex_profile_path != nullptr) {
- const char* raw_foreign_dex_profile_path =
- env->GetStringUTFChars(foreign_dex_profile_path, nullptr);
- foreign_dex_profile_path_str.assign(raw_foreign_dex_profile_path);
- env->ReleaseStringUTFChars(foreign_dex_profile_path, raw_foreign_dex_profile_path);
- }
-
- const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr);
- std::string app_dir_str(raw_app_dir);
- env->ReleaseStringUTFChars(app_dir, raw_app_dir);
-
- Runtime::Current()->RegisterAppInfo(code_paths_vec,
- profile_file_str,
- foreign_dex_profile_path_str,
- app_dir_str);
+ Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
}
static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
@@ -641,6 +631,23 @@ static jboolean VMRuntime_didPruneDalvikCache(JNIEnv* env ATTRIBUTE_UNUSED,
return Runtime::Current()->GetPrunedDalvikCache() ? JNI_TRUE : JNI_FALSE;
}
+static void VMRuntime_setSystemDaemonThreadPriority(JNIEnv* env ATTRIBUTE_UNUSED,
+ jclass klass ATTRIBUTE_UNUSED) {
+#ifdef ART_TARGET_ANDROID
+ Thread* self = Thread::Current();
+ DCHECK(self != nullptr);
+ pid_t tid = self->GetTid();
+ // We use a priority lower than the default for the system daemon threads (eg HeapTaskDaemon) to
+ // avoid jank due to CPU contentions between GC and other UI-related threads. b/36631902.
+ // We may use a native priority that doesn't have a corresponding java.lang.Thread-level priority.
+ static constexpr int kSystemDaemonNiceValue = 4; // priority 124
+ if (setpriority(PRIO_PROCESS, tid, kSystemDaemonNiceValue) != 0) {
+ PLOG(INFO) << *self << " setpriority(PRIO_PROCESS, " << tid << ", "
+ << kSystemDaemonNiceValue << ") failed";
+ }
+#endif
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -674,11 +681,11 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMRuntime, is64Bit, "()Z"),
FAST_NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "()Z"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
- NATIVE_METHOD(VMRuntime, registerAppInfo,
- "(Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)V"),
+ NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;[Ljava/lang/String;)V"),
NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
+ NATIVE_METHOD(VMRuntime, setSystemDaemonThreadPriority, "()V"),
};
void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 100f476b43..836ba81d8e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -74,12 +74,40 @@ static void EnableDebugger() {
}
}
-static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_UNUSED)
+class ClassSet {
+ public:
+ // The number of classes we reasonably expect to have to look at. Realistically the number is more
+ // ~10 but there is little harm in having some extra.
+ static constexpr int kClassSetCapacity = 100;
+
+ explicit ClassSet(Thread* const self) : self_(self) {
+ self_->GetJniEnv()->PushFrame(kClassSetCapacity);
+ }
+
+ ~ClassSet() {
+ self_->GetJniEnv()->PopFrame();
+ }
+
+ void AddClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_) {
+ class_set_.insert(self_->GetJniEnv()->AddLocalReference<jclass>(klass.Ptr()));
+ }
+
+ const std::unordered_set<jclass>& GetClasses() const {
+ return class_set_;
+ }
+
+ private:
+ Thread* const self_;
+ std::unordered_set<jclass> class_set_;
+};
+
+static void DoCollectNonDebuggableCallback(Thread* thread, void* data)
REQUIRES(Locks::mutator_lock_) {
class NonDebuggableStacksVisitor : public StackVisitor {
public:
- explicit NonDebuggableStacksVisitor(Thread* t)
- : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
+ NonDebuggableStacksVisitor(Thread* t, ClassSet* class_set)
+ : StackVisitor(t, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ class_set_(class_set) {}
~NonDebuggableStacksVisitor() OVERRIDE {}
@@ -87,7 +115,7 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_
if (GetMethod()->IsRuntimeMethod()) {
return true;
}
- NonDebuggableClasses::AddNonDebuggableClass(GetMethod()->GetDeclaringClass());
+ class_set_->AddClass(GetMethod()->GetDeclaringClass());
if (kIsDebugBuild) {
LOG(INFO) << GetMethod()->GetDeclaringClass()->PrettyClass()
<< " might not be fully debuggable/deoptimizable due to "
@@ -95,16 +123,31 @@ static void DoCollectNonDebuggableCallback(Thread* thread, void* data ATTRIBUTE_
}
return true;
}
+
+ private:
+ ClassSet* class_set_;
};
- NonDebuggableStacksVisitor visitor(thread);
+ NonDebuggableStacksVisitor visitor(thread, reinterpret_cast<ClassSet*>(data));
visitor.WalkStack();
}
-static void CollectNonDebuggableClasses() {
+static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) {
Runtime* const runtime = Runtime::Current();
- ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, nullptr);
+ Thread* const self = Thread::Current();
+ // Get the mutator lock.
+ ScopedObjectAccess soa(self);
+ ClassSet classes(self);
+ {
+ // Drop the shared mutator lock.
+ ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+ // Get exclusive mutator lock with suspend all.
+ ScopedSuspendAll suspend("Checking stacks for non-obsoletable methods!", /*long_suspend*/false);
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ runtime->GetThreadList()->ForEach(DoCollectNonDebuggableCallback, &classes);
+ }
+ for (jclass klass : classes.GetClasses()) {
+ NonDebuggableClasses::AddNonDebuggableClass(klass);
+ }
}
static void EnableDebugFeatures(uint32_t debug_flags) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index c8431c0519..381dc7beb0 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -108,10 +108,50 @@ static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) {
return soa.AddLocalReference<jstring>(mirror::Class::ComputeName(hs.NewHandle(c)));
}
-static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) {
+// TODO: Move this to mirror::Class ? Other mirror types that commonly appear
+// as arrays have a GetArrayClass() method.
+static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class);
+}
+
+static jobjectArray Class_getInterfacesInternal(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::Class> c = DecodeClass(soa, javaThis);
- return soa.AddLocalReference<jobjectArray>(c->GetInterfaces()->Clone(soa.Self()));
+ StackHandleScope<4> hs(soa.Self());
+ Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+
+ if (klass->IsProxyClass()) {
+ return soa.AddLocalReference<jobjectArray>(klass->GetProxyInterfaces()->Clone(soa.Self()));
+ }
+
+ const DexFile::TypeList* iface_list = klass->GetInterfaceTypeList();
+ if (iface_list == nullptr) {
+ return nullptr;
+ }
+
+ const uint32_t num_ifaces = iface_list->Size();
+ Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self()));
+ Handle<mirror::ObjectArray<mirror::Class>> ifaces = hs.NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_ifaces));
+ if (ifaces.IsNull()) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
+ }
+
+ // Check that we aren't in an active transaction, we call SetWithoutChecks
+ // with kActiveTransaction == false.
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+
+ MutableHandle<mirror::Class> interface(hs.NewHandle<mirror::Class>(nullptr));
+ for (uint32_t i = 0; i < num_ifaces; ++i) {
+ const dex::TypeIndex type_idx = iface_list->GetTypeItem(i).type_idx_;
+ interface.Assign(ClassLinker::LookupResolvedType(
+ type_idx, klass->GetDexCache(), klass->GetClassLoader()));
+ ifaces->SetWithoutChecks<false>(i, interface.Get());
+ }
+
+ return soa.AddLocalReference<jobjectArray>(ifaces.Get());
}
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
@@ -501,9 +541,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) {
// Pending exception from GetDeclaredClasses.
return nullptr;
}
- ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
- ObjPtr<mirror::Class> class_array_class =
- Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class);
+ ObjPtr<mirror::Class> class_array_class = GetClassArrayClass(soa.Self());
if (class_array_class == nullptr) {
return nullptr;
}
@@ -736,8 +774,8 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Class, getEnclosingMethodNative, "()Ljava/lang/reflect/Method;"),
FAST_NATIVE_METHOD(Class, getInnerClassFlags, "(I)I"),
FAST_NATIVE_METHOD(Class, getInnerClassName, "()Ljava/lang/String;"),
+ FAST_NATIVE_METHOD(Class, getInterfacesInternal, "()[Ljava/lang/Class;"),
FAST_NATIVE_METHOD(Class, getNameNative, "()Ljava/lang/String;"),
- FAST_NATIVE_METHOD(Class, getProxyInterfaces, "()[Ljava/lang/Class;"),
FAST_NATIVE_METHOD(Class, getPublicDeclaredFields, "()[Ljava/lang/reflect/Field;"),
FAST_NATIVE_METHOD(Class, getSignatureAnnotation, "()[Ljava/lang/String;"),
FAST_NATIVE_METHOD(Class, isAnonymousClass, "()Z"),
diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc
deleted file mode 100644
index 8fda4dfaaf..0000000000
--- a/runtime/native/java_lang_DexCache.cc
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "java_lang_DexCache.h"
-
-#include "dex_file.h"
-#include "dex_file_types.h"
-#include "jni_internal.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache-inl.h"
-#include "mirror/object-inl.h"
-#include "scoped_fast_native_object_access-inl.h"
-#include "well_known_classes.h"
-
-namespace art {
-
-static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) {
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- // Should only be called while holding the lock on the dex cache.
- DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId());
- const DexFile* dex_file = dex_cache->GetDexFile();
- if (dex_file == nullptr) {
- return nullptr;
- }
- void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
- jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
- if (byte_buffer == nullptr) {
- DCHECK(soa.Self()->IsExceptionPending());
- return nullptr;
- }
-
- jvalue args[1];
- args[0].l = byte_buffer;
- return env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex,
- WellKnownClasses::com_android_dex_Dex_create,
- args);
-}
-
-static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) {
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds());
- return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index)));
-}
-
-static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index) {
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- return soa.AddLocalReference<jobject>(
- dex_cache->GetResolvedString(dex::StringIndex(string_index)));
-}
-
-static void DexCache_setResolvedType(JNIEnv* env,
- jobject javaDexCache,
- jint type_index,
- jobject type) {
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- const DexFile& dex_file = *dex_cache->GetDexFile();
- CHECK_LT(static_cast<size_t>(type_index), dex_file.NumTypeIds());
- ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type);
- if (t != nullptr && t->DescriptorEquals(dex_file.StringByTypeIdx(dex::TypeIndex(type_index)))) {
- ClassTable* table =
- Runtime::Current()->GetClassLinker()->FindClassTable(soa.Self(), dex_cache);
- if (table != nullptr && table->TryInsert(t) == t) {
- dex_cache->SetResolvedType(dex::TypeIndex(type_index), t);
- }
- }
-}
-
-static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index,
- jobject string) {
- ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache);
- CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds());
- ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string);
- if (s != nullptr) {
- dex_cache->SetResolvedString(dex::StringIndex(string_index), s);
- }
-}
-
-static JNINativeMethod gMethods[] = {
- FAST_NATIVE_METHOD(DexCache, getDexNative, "()Lcom/android/dex/Dex;"),
- FAST_NATIVE_METHOD(DexCache, getResolvedType, "(I)Ljava/lang/Class;"),
- FAST_NATIVE_METHOD(DexCache, getResolvedString, "(I)Ljava/lang/String;"),
- FAST_NATIVE_METHOD(DexCache, setResolvedType, "(ILjava/lang/Class;)V"),
- FAST_NATIVE_METHOD(DexCache, setResolvedString, "(ILjava/lang/String;)V"),
-};
-
-void register_java_lang_DexCache(JNIEnv* env) {
- REGISTER_NATIVE_METHODS("java/lang/DexCache");
-}
-
-} // namespace art
diff --git a/runtime/native/java_lang_DexCache.h b/runtime/native/java_lang_DexCache.h
deleted file mode 100644
index b1c1f5e72c..0000000000
--- a/runtime/native/java_lang_DexCache.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
-#define ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
-
-#include <jni.h>
-
-namespace art {
-
-void register_java_lang_DexCache(JNIEnv* env);
-
-} // namespace art
-
-#endif // ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 2e561ffa46..bf33bf24a0 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -101,8 +101,9 @@ static jstring String_intern(JNIEnv* env, jobject java_this) {
static jstring String_doReplace(JNIEnv* env, jobject java_this, jchar old_c, jchar new_c) {
ScopedFastNativeObjectAccess soa(env);
- ObjPtr<mirror::String> result =
- soa.Decode<mirror::String>(java_this)->DoReplace(soa.Self(), old_c, new_c);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::String> string = hs.NewHandle(soa.Decode<mirror::String>(java_this));
+ ObjPtr<mirror::String> result = mirror::String::DoReplace(soa.Self(), string, old_c, new_c);
return soa.AddLocalReference<jstring>(result);
}
diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc
index bc23bedc77..8f226ce621 100644
--- a/runtime/native/java_lang_reflect_Executable.cc
+++ b/runtime/native/java_lang_reflect_Executable.cc
@@ -194,12 +194,146 @@ static jboolean Executable_isAnnotationPresentNative(JNIEnv* env,
return annotations::IsMethodAnnotationPresent(method, klass);
}
+static jint Executable_compareMethodParametersInternal(JNIEnv* env,
+ jobject thisMethod,
+ jobject otherMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* this_method = ArtMethod::FromReflectedMethod(soa, thisMethod);
+ ArtMethod* other_method = ArtMethod::FromReflectedMethod(soa, otherMethod);
+
+ this_method = this_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ other_method = other_method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+
+ const DexFile::TypeList* this_list = this_method->GetParameterTypeList();
+ const DexFile::TypeList* other_list = other_method->GetParameterTypeList();
+
+ if (this_list == other_list) {
+ return 0;
+ }
+
+ if (this_list == nullptr && other_list != nullptr) {
+ return -1;
+ }
+
+ if (other_list == nullptr && this_list != nullptr) {
+ return 1;
+ }
+
+ const int32_t this_size = this_list->Size();
+ const int32_t other_size = other_list->Size();
+
+ if (this_size != other_size) {
+ return (this_size - other_size);
+ }
+
+ for (int32_t i = 0; i < this_size; ++i) {
+ const DexFile::TypeId& lhs = this_method->GetDexFile()->GetTypeId(
+ this_list->GetTypeItem(i).type_idx_);
+ const DexFile::TypeId& rhs = other_method->GetDexFile()->GetTypeId(
+ other_list->GetTypeItem(i).type_idx_);
+
+ uint32_t lhs_len, rhs_len;
+ const char* lhs_data = this_method->GetDexFile()->StringDataAndUtf16LengthByIdx(
+ lhs.descriptor_idx_, &lhs_len);
+ const char* rhs_data = other_method->GetDexFile()->StringDataAndUtf16LengthByIdx(
+ rhs.descriptor_idx_, &rhs_len);
+
+ int cmp = strcmp(lhs_data, rhs_data);
+ if (cmp != 0) {
+ return (cmp < 0) ? -1 : 1;
+ }
+ }
+
+ return 0;
+}
+
+static jobject Executable_getMethodNameInternal(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ return soa.AddLocalReference<jobject>(method->GetNameAsString(soa.Self()));
+}
+
+static jobject Executable_getMethodReturnTypeInternal(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ ObjPtr<mirror::Class> return_type(method->GetReturnType(true /* resolve */));
+ if (return_type.IsNull()) {
+ CHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
+ }
+
+ return soa.AddLocalReference<jobject>(return_type);
+}
+
+// TODO: Move this to mirror::Class ? Other mirror types that commonly appear
+// as arrays have a GetArrayClass() method. This is duplicated in
+// java_lang_Class.cc as well.
+static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass();
+ return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class);
+}
+
+static jobjectArray Executable_getParameterTypesInternal(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+
+ const DexFile::TypeList* params = method->GetParameterTypeList();
+ if (params == nullptr) {
+ return nullptr;
+ }
+
+ const uint32_t num_params = params->Size();
+
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self()));
+ Handle<mirror::ObjectArray<mirror::Class>> ptypes = hs.NewHandle(
+ mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_params));
+ if (ptypes.IsNull()) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
+ }
+
+ MutableHandle<mirror::Class> param(hs.NewHandle<mirror::Class>(nullptr));
+ for (uint32_t i = 0; i < num_params; ++i) {
+ const dex::TypeIndex type_idx = params->GetTypeItem(i).type_idx_;
+ param.Assign(Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method));
+ if (param.Get() == nullptr) {
+ DCHECK(soa.Self()->IsExceptionPending());
+ return nullptr;
+ }
+ ptypes->SetWithoutChecks<false>(i, param.Get());
+ }
+
+ return soa.AddLocalReference<jobjectArray>(ptypes.Get());
+}
+
+static jint Executable_getParameterCountInternal(JNIEnv* env, jobject javaMethod) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+
+ const DexFile::TypeList* params = method->GetParameterTypeList();
+ return (params == nullptr) ? 0 : params->Size();
+}
+
+
static JNINativeMethod gMethods[] = {
+ FAST_NATIVE_METHOD(Executable, compareMethodParametersInternal,
+ "(Ljava/lang/reflect/Method;)I"),
FAST_NATIVE_METHOD(Executable, getAnnotationNative,
- "(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
- FAST_NATIVE_METHOD(Executable, getDeclaredAnnotationsNative, "()[Ljava/lang/annotation/Annotation;"),
+ "(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
+ FAST_NATIVE_METHOD(Executable, getDeclaredAnnotationsNative,
+ "()[Ljava/lang/annotation/Annotation;"),
FAST_NATIVE_METHOD(Executable, getParameterAnnotationsNative,
- "()[[Ljava/lang/annotation/Annotation;"),
+ "()[[Ljava/lang/annotation/Annotation;"),
+ FAST_NATIVE_METHOD(Executable, getMethodNameInternal, "()Ljava/lang/String;"),
+ FAST_NATIVE_METHOD(Executable, getMethodReturnTypeInternal, "()Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Executable, getParameterTypesInternal, "()[Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Executable, getParameterCountInternal, "()I"),
FAST_NATIVE_METHOD(Executable, getParameters0, "()[Ljava/lang/reflect/Parameter;"),
FAST_NATIVE_METHOD(Executable, getSignatureAnnotation, "()[Ljava/lang/String;"),
FAST_NATIVE_METHOD(Executable, isAnnotationPresentNative, "(Ljava/lang/Class;)Z"),
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 9cf80a5bf5..9198964f87 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -456,6 +456,13 @@ static jlong Field_getArtField(JNIEnv* env, jobject javaField) {
return reinterpret_cast<jlong>(field);
}
+static jobject Field_getNameInternal(JNIEnv* env, jobject javaField) {
+ ScopedFastNativeObjectAccess soa(env);
+ ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField();
+ return soa.AddLocalReference<jobject>(
+ field->GetStringName(soa.Self(), true /* resolve */));
+}
+
static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) {
ScopedFastNativeObjectAccess soa(env);
ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField();
@@ -506,6 +513,7 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Field, getFloat, "(Ljava/lang/Object;)F"),
FAST_NATIVE_METHOD(Field, getInt, "(Ljava/lang/Object;)I"),
FAST_NATIVE_METHOD(Field, getLong, "(Ljava/lang/Object;)J"),
+ FAST_NATIVE_METHOD(Field, getNameInternal, "()Ljava/lang/String;"),
FAST_NATIVE_METHOD(Field, getShort, "(Ljava/lang/Object;)S"),
FAST_NATIVE_METHOD(Field, isAnnotationPresentNative, "(Ljava/lang/Class;)Z"),
FAST_NATIVE_METHOD(Field, set, "(Ljava/lang/Object;Ljava/lang/Object;)V"),
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 6e5e3d9337..6f0130eb15 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -55,7 +55,8 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
++i;
}
CHECK_NE(throws_index, -1);
- mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index);
+ mirror::ObjectArray<mirror::Class>* declared_exceptions =
+ klass->GetProxyThrows()->Get(throws_index);
return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
} else {
mirror::ObjectArray<mirror::Class>* result_array =
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index c58854b13e..d77cfa1d35 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -118,7 +118,7 @@ void InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
for (int signal = 0; signal < _NSIG; ++signal) {
android::NativeBridgeSignalHandlerFn fn = android::NativeBridgeGetSignalHandler(signal);
if (fn != nullptr) {
- SetSpecialSignalHandlerFn(signal, fn);
+ AddSpecialSignalHandlerFn(signal, fn);
}
}
#endif
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 7460d622b5..cbc502487f 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -105,7 +105,7 @@ static std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const cha
if (pid == -1) {
close(caller_to_addr2line[0]);
close(caller_to_addr2line[1]);
- close(addr2line_to_caller[1]);
+ close(addr2line_to_caller[0]);
close(addr2line_to_caller[1]);
return nullptr;
}
diff --git a/runtime/non_debuggable_classes.cc b/runtime/non_debuggable_classes.cc
index db121a90e2..829ea65876 100644
--- a/runtime/non_debuggable_classes.cc
+++ b/runtime/non_debuggable_classes.cc
@@ -27,16 +27,16 @@ namespace art {
std::vector<jclass> NonDebuggableClasses::non_debuggable_classes;
-void NonDebuggableClasses::AddNonDebuggableClass(ObjPtr<mirror::Class> klass) {
+void NonDebuggableClasses::AddNonDebuggableClass(jclass klass) {
Thread* self = Thread::Current();
JNIEnvExt* env = self->GetJniEnv();
+ ObjPtr<mirror::Class> mirror_klass(self->DecodeJObject(klass)->AsClass());
for (jclass c : non_debuggable_classes) {
- if (self->DecodeJObject(c)->AsClass() == klass.Ptr()) {
+ if (self->DecodeJObject(c)->AsClass() == mirror_klass.Ptr()) {
return;
}
}
- ScopedLocalRef<jclass> lr(env, env->AddLocalReference<jclass>(klass));
- non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(lr.get())));
+ non_debuggable_classes.push_back(reinterpret_cast<jclass>(env->NewGlobalRef(klass)));
}
} // namespace art
diff --git a/runtime/non_debuggable_classes.h b/runtime/non_debuggable_classes.h
index b72afd8299..e1b563339d 100644
--- a/runtime/non_debuggable_classes.h
+++ b/runtime/non_debuggable_classes.h
@@ -21,21 +21,17 @@
#include "base/mutex.h"
#include "jni.h"
-#include "obj_ptr.h"
namespace art {
-namespace mirror {
-class Class;
-} // namespace mirror
-
struct NonDebuggableClasses {
public:
static const std::vector<jclass>& GetNonDebuggableClasses() {
return non_debuggable_classes;
}
- static void AddNonDebuggableClass(ObjPtr<mirror::Class> klass) REQUIRES(Locks::mutator_lock_);
+ static void AddNonDebuggableClass(jclass klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
static std::vector<jclass> non_debuggable_classes;
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index f72a853393..71c6a82dc5 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -31,7 +31,8 @@ struct NthCallerVisitor : public StackVisitor {
n(n_in),
include_runtime_and_upcalls_(include_runtime_and_upcalls),
count(0),
- caller(nullptr) {}
+ caller(nullptr),
+ caller_pc(0) {}
bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
diff --git a/runtime/oat.h b/runtime/oat.h
index 1544121ed1..faa0129d6b 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '1', '1', '4', '\0' }; // hash-based DexCache types.
+ static constexpr uint8_t kOatVersion[] = { '1', '1', '7', '\0' }; // Read barriers on MIPS.
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 5ae2fc51b7..db6f8ee488 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -430,8 +430,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetImageFileLocationOatChecksum()
- != GetCombinedImageChecksum()) {
+ } else if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -641,15 +640,8 @@ bool OatFileAssistant::DexLocationToOdexFilename(const std::string& location,
std::string dir = location.substr(0, pos+1);
dir += "oat/" + std::string(GetInstructionSetString(isa));
- // Find the file portion of the dex location.
- std::string file;
- if (pos == std::string::npos) {
- file = location;
- } else {
- file = location.substr(pos+1);
- }
-
// Get the base part of the file without the extension.
+ std::string file = location.substr(pos+1);
pos = file.rfind('.');
if (pos == std::string::npos) {
*error_msg = "Dex location " + location + " has no extension.";
@@ -726,68 +718,36 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
-const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
- if (!image_info_load_attempted_) {
- image_info_load_attempted_ = true;
+std::unique_ptr<OatFileAssistant::ImageInfo>
+OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
+ CHECK(error_msg != nullptr);
- Runtime* runtime = Runtime::Current();
- std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
- if (!image_spaces.empty()) {
- cached_image_info_.location = image_spaces[0]->GetImageLocation();
-
- if (isa_ == kRuntimeISA) {
- const ImageHeader& image_header = image_spaces[0]->GetImageHeader();
- cached_image_info_.oat_checksum = image_header.GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header.GetOatDataBegin());
- cached_image_info_.patch_delta = image_header.GetPatchDelta();
- } else {
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(cached_image_info_.location.c_str(),
- isa_,
- &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- cached_image_info_.oat_checksum = image_header->GetOatChecksum();
- cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(
- image_header->GetOatDataBegin());
- cached_image_info_.patch_delta = image_header->GetPatchDelta();
- }
- }
- image_info_load_succeeded_ = (!image_spaces.empty());
+ Runtime* runtime = Runtime::Current();
+ std::unique_ptr<ImageInfo> info(new ImageInfo());
+ info->location = runtime->GetImageLocation();
- combined_image_checksum_ = CalculateCombinedImageChecksum(isa_);
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
+ if (image_header == nullptr) {
+ return nullptr;
}
- return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
-}
-// TODO: Use something better than xor.
-uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) {
- uint32_t checksum = 0;
- std::vector<gc::space::ImageSpace*> image_spaces =
- Runtime::Current()->GetHeap()->GetBootImageSpaces();
- if (isa == kRuntimeISA) {
- for (gc::space::ImageSpace* image_space : image_spaces) {
- checksum ^= image_space->GetImageHeader().GetOatChecksum();
- }
- } else {
- for (gc::space::ImageSpace* image_space : image_spaces) {
- std::string location = image_space->GetImageLocation();
- std::string error_msg;
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, &error_msg));
- CHECK(image_header != nullptr) << error_msg;
- checksum ^= image_header->GetOatChecksum();
- }
- }
- return checksum;
+ info->oat_checksum = image_header->GetOatChecksum();
+ info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ info->patch_delta = image_header->GetPatchDelta();
+ return info;
}
-uint32_t OatFileAssistant::GetCombinedImageChecksum() {
+const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
if (!image_info_load_attempted_) {
- GetImageInfo();
+ image_info_load_attempted_ = true;
+ std::string error_msg;
+ cached_image_info_ = ImageInfo::GetRuntimeImageInfo(isa_, &error_msg);
+ if (cached_image_info_ == nullptr) {
+ LOG(WARNING) << "Unable to get runtime image info: " << error_msg;
+ }
}
- return combined_image_checksum_;
+ return cached_image_info_.get();
}
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 3ede29f5e0..b84e711daa 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -276,14 +276,15 @@ class OatFileAssistant {
std::string* oat_filename,
std::string* error_msg);
- static uint32_t CalculateCombinedImageChecksum(InstructionSet isa = kRuntimeISA);
-
private:
struct ImageInfo {
uint32_t oat_checksum = 0;
uintptr_t oat_data_begin = 0;
int32_t patch_delta = 0;
std::string location;
+
+ static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
+ std::string* error_msg);
};
class OatFileInfo {
@@ -368,7 +369,7 @@ class OatFileAssistant {
std::unique_ptr<OatFile> file_;
bool status_attempted_ = false;
- OatStatus status_;
+ OatStatus status_ = OatStatus::kOatCannotOpen;
// For debugging only.
// If this flag is set, the file has been released to the user and the
@@ -414,8 +415,6 @@ class OatFileAssistant {
// The caller shouldn't clean up or free the returned pointer.
const ImageInfo* GetImageInfo();
- uint32_t GetCombinedImageChecksum();
-
// To implement Lock(), we lock a dummy file where the oat file would go
// (adding ".flock" to the target file name) and retain the lock for the
// remaining lifetime of the OatFileAssistant object.
@@ -445,9 +444,7 @@ class OatFileAssistant {
// TODO: The image info should probably be moved out of the oat file
// assistant to an image file manager.
bool image_info_load_attempted_ = false;
- bool image_info_load_succeeded_ = false;
- ImageInfo cached_image_info_;
- uint32_t combined_image_checksum_ = 0;
+ std::unique_ptr<ImageInfo> cached_image_info_;
DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
};
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 70796148a4..d04dbbee04 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -23,6 +23,7 @@
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
+#include "base/bit_vector-inl.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -145,13 +146,52 @@ std::vector<const OatFile*> OatFileManager::RegisterImageOatFiles(
return oat_files;
}
+class TypeIndexInfo {
+ public:
+ explicit TypeIndexInfo(const DexFile* dex_file)
+ : type_indexes_(GenerateTypeIndexes(dex_file)),
+ iter_(type_indexes_.Indexes().begin()),
+ end_(type_indexes_.Indexes().end()) { }
+
+ BitVector& GetTypeIndexes() {
+ return type_indexes_;
+ }
+ BitVector::IndexIterator& GetIterator() {
+ return iter_;
+ }
+ BitVector::IndexIterator& GetIteratorEnd() {
+ return end_;
+ }
+ void AdvanceIterator() {
+ iter_++;
+ }
+
+ private:
+ static BitVector GenerateTypeIndexes(const DexFile* dex_file) {
+ BitVector type_indexes(/*start_bits*/0, /*expandable*/true, Allocator::GetMallocAllocator());
+ for (uint16_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ uint16_t type_idx = class_def.class_idx_.index_;
+ type_indexes.SetBit(type_idx);
+ }
+ return type_indexes;
+ }
+
+ // BitVector with bits set for the type indexes of all classes in the input dex file.
+ BitVector type_indexes_;
+ BitVector::IndexIterator iter_;
+ BitVector::IndexIterator end_;
+};
+
class DexFileAndClassPair : ValueObject {
public:
- DexFileAndClassPair(const DexFile* dex_file, size_t current_class_index, bool from_loaded_oat)
- : cached_descriptor_(GetClassDescriptor(dex_file, current_class_index)),
+ DexFileAndClassPair(const DexFile* dex_file, TypeIndexInfo* type_info, bool from_loaded_oat)
+ : type_info_(type_info),
dex_file_(dex_file),
- current_class_index_(current_class_index),
- from_loaded_oat_(from_loaded_oat) {}
+ cached_descriptor_(dex_file_->StringByTypeIdx(dex::TypeIndex(*type_info->GetIterator()))),
+ from_loaded_oat_(from_loaded_oat) {
+ type_info_->AdvanceIterator();
+ }
DexFileAndClassPair(const DexFileAndClassPair& rhs) = default;
@@ -172,16 +212,12 @@ class DexFileAndClassPair : ValueObject {
}
bool DexFileHasMoreClasses() const {
- return current_class_index_ + 1 < dex_file_->NumClassDefs();
+ return type_info_->GetIterator() != type_info_->GetIteratorEnd();
}
void Next() {
- ++current_class_index_;
- cached_descriptor_ = GetClassDescriptor(dex_file_, current_class_index_);
- }
-
- size_t GetCurrentClassIndex() const {
- return current_class_index_;
+ cached_descriptor_ = dex_file_->StringByTypeIdx(dex::TypeIndex(*type_info_->GetIterator()));
+ type_info_->AdvanceIterator();
}
bool FromLoadedOat() const {
@@ -193,42 +229,36 @@ class DexFileAndClassPair : ValueObject {
}
private:
- static const char* GetClassDescriptor(const DexFile* dex_file, size_t index) {
- DCHECK(IsUint<16>(index));
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(static_cast<uint16_t>(index));
- return dex_file->StringByTypeIdx(class_def.class_idx_);
- }
-
- const char* cached_descriptor_;
+ TypeIndexInfo* type_info_;
const DexFile* dex_file_;
- size_t current_class_index_;
+ const char* cached_descriptor_;
bool from_loaded_oat_; // We only need to compare mismatches between what we load now
// and what was loaded before. Any old duplicates must have been
// OK, and any new "internal" duplicates are as well (they must
// be from multidex, which resolves correctly).
};
-static void AddDexFilesFromOat(const OatFile* oat_file,
- bool already_loaded,
- /*out*/std::priority_queue<DexFileAndClassPair>* heap,
- std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
+static void AddDexFilesFromOat(
+ const OatFile* oat_file,
+ /*out*/std::vector<const DexFile*>* dex_files,
+ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
for (const OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
std::string error;
std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error);
if (dex_file == nullptr) {
LOG(WARNING) << "Could not create dex file from oat file: " << error;
} else if (dex_file->NumClassDefs() > 0U) {
- heap->emplace(dex_file.get(), /*current_class_index*/0U, already_loaded);
+ dex_files->push_back(dex_file.get());
opened_dex_files->push_back(std::move(dex_file));
}
}
}
-static void AddNext(/*inout*/DexFileAndClassPair* original,
- /*inout*/std::priority_queue<DexFileAndClassPair>* heap) {
- if (original->DexFileHasMoreClasses()) {
- original->Next();
- heap->push(std::move(*original));
+static void AddNext(/*inout*/DexFileAndClassPair& original,
+ /*inout*/std::priority_queue<DexFileAndClassPair>& heap) {
+ if (original.DexFileHasMoreClasses()) {
+ original.Next();
+ heap.push(std::move(original));
}
}
@@ -297,7 +327,8 @@ static void IterateOverPathClassLoader(
static bool GetDexFilesFromClassLoader(
ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader,
- std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::vector<const DexFile*>* dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
// The boot class loader. We don't load any of these files, as we know we compiled against
// them correctly.
@@ -312,7 +343,7 @@ static bool GetDexFilesFromClassLoader(
return false;
}
- bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), queue);
+ bool recursive_result = GetDexFilesFromClassLoader(soa, class_loader->GetParent(), dex_files);
if (!recursive_result) {
// Something wrong up the chain.
return false;
@@ -322,7 +353,7 @@ static bool GetDexFilesFromClassLoader(
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file->NumClassDefs() > 0) {
- queue->emplace(cp_dex_file, 0U, true);
+ dex_files->push_back(cp_dex_file);
}
return true; // Continue looking.
};
@@ -341,7 +372,8 @@ static bool GetDexFilesFromClassLoader(
static void GetDexFilesFromDexElementsArray(
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
+ std::vector<const DexFile*>* dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_elements == nullptr) {
// Nothing to do.
return;
@@ -360,7 +392,7 @@ static void GetDexFilesFromDexElementsArray(
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
- queue->emplace(cp_dex_file, 0U, true);
+ dex_files->push_back(cp_dex_file);
}
return true; // Continue looking.
};
@@ -389,43 +421,95 @@ static void GetDexFilesFromDexElementsArray(
}
static bool AreSharedLibrariesOk(const std::string& shared_libraries,
- std::priority_queue<DexFileAndClassPair>& queue) {
+ std::vector<const DexFile*>& dex_files) {
+ // If no shared libraries, we expect no dex files.
if (shared_libraries.empty()) {
- if (queue.empty()) {
- // No shared libraries or oat files, as expected.
- return true;
+ return dex_files.empty();
+ }
+ // If we find the special shared library, skip the shared libraries check.
+ if (shared_libraries.compare(OatFile::kSpecialSharedLibrary) == 0) {
+ return true;
+ }
+ // Shared libraries is a series of dex file paths and their checksums, each separated by '*'.
+ std::vector<std::string> shared_libraries_split;
+ Split(shared_libraries, '*', &shared_libraries_split);
+
+ // Sanity check size of dex files and split shared libraries. Should be 2x as many entries in
+ // the split shared libraries since it contains pairs of filename/checksum.
+ if (dex_files.size() * 2 != shared_libraries_split.size()) {
+ return false;
+ }
+
+ for (size_t i = 0; i < dex_files.size(); ++i) {
+ if (dex_files[i]->GetLocation() != shared_libraries_split[i * 2]) {
+ return false;
}
- } else {
- if (shared_libraries.compare(OatFile::kSpecialSharedLibrary) == 0) {
- // If we find the special shared library, skip the shared libraries check.
- return true;
+ char* end;
+ size_t shared_lib_checksum = strtoul(shared_libraries_split[i * 2 + 1].c_str(), &end, 10);
+ uint32_t dex_checksum = dex_files[i]->GetLocationChecksum();
+ if (*end != '\0' || dex_checksum != shared_lib_checksum) {
+ return false;
}
- // Shared libraries is a series of dex file paths and their checksums, each separated by '*'.
- std::vector<std::string> shared_libraries_split;
- Split(shared_libraries, '*', &shared_libraries_split);
-
- size_t index = 0;
- std::priority_queue<DexFileAndClassPair> temp = queue;
- while (!temp.empty() && index < shared_libraries_split.size() - 1) {
- DexFileAndClassPair pair(temp.top());
- const DexFile* dex_file = pair.GetDexFile();
- const std::string& dex_filename = dex_file->GetLocation();
- if (dex_filename != shared_libraries_split[index]) {
- break;
- }
- char* end;
- size_t shared_lib_checksum = strtoul(shared_libraries_split[index + 1].c_str(), &end, 10);
- uint32_t dex_checksum = dex_file->GetLocationChecksum();
- if (*end != '\0' || dex_checksum != shared_lib_checksum) {
+ }
+
+ return true;
+}
+
+static bool CollisionCheck(std::vector<const DexFile*>& dex_files_loaded,
+ std::vector<const DexFile*>& dex_files_unloaded,
+ std::string* error_msg /*out*/) {
+ // Generate type index information for each dex file.
+ std::vector<TypeIndexInfo> loaded_types;
+ for (const DexFile* dex_file : dex_files_loaded) {
+ loaded_types.push_back(TypeIndexInfo(dex_file));
+ }
+ std::vector<TypeIndexInfo> unloaded_types;
+ for (const DexFile* dex_file : dex_files_unloaded) {
+ unloaded_types.push_back(TypeIndexInfo(dex_file));
+ }
+
+ // Populate the queue of dex file and class pairs with the loaded and unloaded dex files.
+ std::priority_queue<DexFileAndClassPair> queue;
+ for (size_t i = 0; i < dex_files_loaded.size(); ++i) {
+ if (loaded_types[i].GetIterator() != loaded_types[i].GetIteratorEnd()) {
+ queue.emplace(dex_files_loaded[i], &loaded_types[i], /*from_loaded_oat*/true);
+ }
+ }
+ for (size_t i = 0; i < dex_files_unloaded.size(); ++i) {
+ if (unloaded_types[i].GetIterator() != unloaded_types[i].GetIteratorEnd()) {
+ queue.emplace(dex_files_unloaded[i], &unloaded_types[i], /*from_loaded_oat*/false);
+ }
+ }
+
+ // Now drain the queue.
+ while (!queue.empty()) {
+ // Modifying the top element is only safe if we pop right after.
+ DexFileAndClassPair compare_pop(queue.top());
+ queue.pop();
+
+ // Compare against the following elements.
+ while (!queue.empty()) {
+ DexFileAndClassPair top(queue.top());
+ if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
+ // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
+ if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
+ *error_msg =
+ StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
+ compare_pop.GetCachedDescriptor(),
+ compare_pop.GetDexFile()->GetLocation().c_str(),
+ top.GetDexFile()->GetLocation().c_str());
+ return true;
+ }
+ queue.pop();
+ AddNext(top, queue);
+ } else {
+ // Something else. Done here.
break;
}
- temp.pop();
- index += 2;
}
-
- // Check is successful if it made it through the queue and all the shared libraries.
- return temp.empty() && index == shared_libraries_split.size();
+ AddNext(compare_pop, queue);
}
+
return false;
}
@@ -450,7 +534,7 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
DCHECK(oat_file != nullptr);
DCHECK(error_msg != nullptr);
- std::priority_queue<DexFileAndClassPair> queue;
+ std::vector<const DexFile*> dex_files_loaded;
// Try to get dex files from the given class loader. If the class loader is null, or we do
// not support one of the class loaders in the chain, conservatively compare against all
@@ -464,12 +548,12 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
Handle<mirror::ObjectArray<mirror::Object>> h_dex_elements =
hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements));
if (h_class_loader != nullptr &&
- GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &queue)) {
+ GetDexFilesFromClassLoader(soa, h_class_loader.Get(), &dex_files_loaded)) {
class_loader_ok = true;
// In this case, also take into account the dex_elements array, if given. We don't need to
// read it otherwise, as we'll compare against all open oat files anyways.
- GetDexFilesFromDexElementsArray(soa, h_dex_elements, &queue);
+ GetDexFilesFromDexElementsArray(soa, h_dex_elements, &dex_files_loaded);
} else if (h_class_loader != nullptr) {
VLOG(class_linker) << "Something unsupported with "
<< mirror::Class::PrettyClass(h_class_loader->GetClass());
@@ -486,10 +570,8 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
if (!class_loader_ok) {
// Add dex files from already loaded oat files, but skip boot.
- // Clean up the queue.
- while (!queue.empty()) {
- queue.pop();
- }
+ // Clean up the dex files.
+ dex_files_loaded.clear();
std::vector<const OatFile*> boot_oat_files = GetBootOatFiles();
// The same OatFile can be loaded multiple times at different addresses. In this case, we don't
@@ -503,10 +585,7 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
boot_oat_files.end() && location != oat_file->GetLocation() &&
unique_locations.find(location) == unique_locations.end()) {
unique_locations.insert(location);
- AddDexFilesFromOat(loaded_oat_file.get(),
- /*already_loaded*/true,
- &queue,
- /*out*/&opened_dex_files);
+ AddDexFilesFromOat(loaded_oat_file.get(), &dex_files_loaded, &opened_dex_files);
}
}
}
@@ -514,46 +593,15 @@ bool OatFileManager::HasCollisions(const OatFile* oat_file,
// Exit if shared libraries are ok. Do a full duplicate classes check otherwise.
const std::string
shared_libraries(oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
- if (AreSharedLibrariesOk(shared_libraries, queue)) {
+ if (AreSharedLibrariesOk(shared_libraries, dex_files_loaded)) {
return false;
}
ScopedTrace st("Collision check");
-
// Add dex files from the oat file to check.
- AddDexFilesFromOat(oat_file, /*already_loaded*/false, &queue, &opened_dex_files);
-
- // Now drain the queue.
- while (!queue.empty()) {
- // Modifying the top element is only safe if we pop right after.
- DexFileAndClassPair compare_pop(queue.top());
- queue.pop();
-
- // Compare against the following elements.
- while (!queue.empty()) {
- DexFileAndClassPair top(queue.top());
-
- if (strcmp(compare_pop.GetCachedDescriptor(), top.GetCachedDescriptor()) == 0) {
- // Same descriptor. Check whether it's crossing old-oat-files to new-oat-files.
- if (compare_pop.FromLoadedOat() != top.FromLoadedOat()) {
- *error_msg =
- StringPrintf("Found duplicated class when checking oat files: '%s' in %s and %s",
- compare_pop.GetCachedDescriptor(),
- compare_pop.GetDexFile()->GetLocation().c_str(),
- top.GetDexFile()->GetLocation().c_str());
- return true;
- }
- queue.pop();
- AddNext(&top, &queue);
- } else {
- // Something else. Done here.
- break;
- }
- }
- AddNext(&compare_pop, &queue);
- }
-
- return false;
+ std::vector<const DexFile*> dex_files_unloaded;
+ AddDexFilesFromOat(oat_file, &dex_files_unloaded, &opened_dex_files);
+ return CollisionCheck(dex_files_loaded, dex_files_unloaded, error_msg);
}
std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
@@ -729,9 +777,6 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
}
}
- // TODO(calin): Consider optimizing this knowing that is useless to record the
- // use of fully compiled apks.
- Runtime::Current()->NotifyDexLoaded(dex_location);
return dex_files;
}
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index b4e4285dc7..8eef5867e2 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -22,13 +22,14 @@
namespace art {
-OatQuickMethodHeader::OatQuickMethodHeader(
- uint32_t vmap_table_offset,
- uint32_t frame_size_in_bytes,
- uint32_t core_spill_mask,
- uint32_t fp_spill_mask,
- uint32_t code_size)
+OatQuickMethodHeader::OatQuickMethodHeader(uint32_t vmap_table_offset,
+ uint32_t method_info_offset,
+ uint32_t frame_size_in_bytes,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ uint32_t code_size)
: vmap_table_offset_(vmap_table_offset),
+ method_info_offset_(method_info_offset),
frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
code_size_(code_size) {}
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 3cdde5a065..f2a2af2a5f 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -20,6 +20,7 @@
#include "arch/instruction_set.h"
#include "base/macros.h"
#include "quick/quick_method_frame_info.h"
+#include "method_info.h"
#include "stack_map.h"
#include "utils.h"
@@ -30,11 +31,13 @@ class ArtMethod;
// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
class PACKED(4) OatQuickMethodHeader {
public:
- explicit OatQuickMethodHeader(uint32_t vmap_table_offset = 0U,
- uint32_t frame_size_in_bytes = 0U,
- uint32_t core_spill_mask = 0U,
- uint32_t fp_spill_mask = 0U,
- uint32_t code_size = 0U);
+ OatQuickMethodHeader() = default;
+ explicit OatQuickMethodHeader(uint32_t vmap_table_offset,
+ uint32_t method_info_offset,
+ uint32_t frame_size_in_bytes,
+ uint32_t core_spill_mask,
+ uint32_t fp_spill_mask,
+ uint32_t code_size);
~OatQuickMethodHeader();
@@ -63,8 +66,7 @@ class PACKED(4) OatQuickMethodHeader {
const void* GetOptimizedCodeInfoPtr() const {
DCHECK(IsOptimized());
- const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_);
- return data;
+ return reinterpret_cast<const void*>(code_ - vmap_table_offset_);
}
uint8_t* GetOptimizedCodeInfoPtr() {
@@ -76,6 +78,20 @@ class PACKED(4) OatQuickMethodHeader {
return CodeInfo(GetOptimizedCodeInfoPtr());
}
+ const void* GetOptimizedMethodInfoPtr() const {
+ DCHECK(IsOptimized());
+ return reinterpret_cast<const void*>(code_ - method_info_offset_);
+ }
+
+ uint8_t* GetOptimizedMethodInfoPtr() {
+ DCHECK(IsOptimized());
+ return code_ - method_info_offset_;
+ }
+
+ MethodInfo GetOptimizedMethodInfo() const {
+ return MethodInfo(reinterpret_cast<const uint8_t*>(GetOptimizedMethodInfoPtr()));
+ }
+
const uint8_t* GetCode() const {
return code_;
}
@@ -100,6 +116,18 @@ class PACKED(4) OatQuickMethodHeader {
return &vmap_table_offset_;
}
+ uint32_t GetMethodInfoOffset() const {
+ return method_info_offset_;
+ }
+
+ void SetMethodInfoOffset(uint32_t offset) {
+ method_info_offset_ = offset;
+ }
+
+ const uint32_t* GetMethodInfoOffsetAddr() const {
+ return &method_info_offset_;
+ }
+
const uint8_t* GetVmapTable() const {
CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler";
return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_;
@@ -160,12 +188,17 @@ class PACKED(4) OatQuickMethodHeader {
static constexpr uint32_t kCodeSizeMask = ~kShouldDeoptimizeMask;
// The offset in bytes from the start of the vmap table to the end of the header.
- uint32_t vmap_table_offset_;
+ uint32_t vmap_table_offset_ = 0u;
+ // The offset in bytes from the start of the method info to the end of the header.
+ // The method info offset is not in the CodeInfo since CodeInfo has good dedupe properties that
+ // would be lost from doing so. The method info memory region contains method indices since they
+ // are hard to dedupe.
+ uint32_t method_info_offset_ = 0u;
// The stack frame information.
QuickMethodFrameInfo frame_info_;
// The code size in bytes. The highest bit is used to signify if the compiled
// code with the method header has should_deoptimize flag.
- uint32_t code_size_;
+ uint32_t code_size_ = 0u;
// The actual code.
uint8_t code_[0];
};
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index c01e3f4152..e38f265c5a 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -13,11 +13,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+cc_library_headers {
+ name: "libopenjdkjvmti_headers",
+ host_supported: true,
+ export_include_dirs: ["include"],
+}
+
cc_defaults {
name: "libopenjdkjvmti_defaults",
defaults: ["art_defaults"],
host_supported: true,
srcs: ["events.cc",
+ "fixed_up_dex_file.cc",
"object_tagging.cc",
"OpenjdkJvmTi.cc",
"ti_class.cc",
@@ -40,6 +47,7 @@ cc_defaults {
"ti_timers.cc",
"transform.cc"],
include_dirs: ["art/runtime"],
+ header_libs: ["libopenjdkjvmti_headers"],
shared_libs: [
"libbase",
"libnativehelper",
@@ -49,7 +57,10 @@ cc_defaults {
art_cc_library {
name: "libopenjdkjvmti",
defaults: ["libopenjdkjvmti_defaults"],
- shared_libs: ["libart"],
+ shared_libs: [
+ "libart",
+ "libart-compiler",
+ ],
}
art_cc_library {
@@ -58,5 +69,8 @@ art_cc_library {
"art_debug_defaults",
"libopenjdkjvmti_defaults",
],
- shared_libs: ["libartd"],
+ shared_libs: [
+ "libartd",
+ "libartd-compiler",
+ ],
}
diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
index c016728fa5..39e603e1e7 100644
--- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc
@@ -35,7 +35,7 @@
#include <jni.h>
-#include "openjdkjvmti/jvmti.h"
+#include "jvmti.h"
#include "art_jvmti.h"
#include "base/logging.h"
@@ -79,20 +79,26 @@ EventHandler gEventHandler;
class JvmtiFunctions {
private:
- static bool IsValidEnv(jvmtiEnv* env) {
- return env != nullptr;
+ static jvmtiError getEnvironmentError(jvmtiEnv* env) {
+ if (env == nullptr) {
+ return ERR(INVALID_ENVIRONMENT);
+ } else if (art::Thread::Current() == nullptr) {
+ return ERR(UNATTACHED_THREAD);
+ } else {
+ return OK;
+ }
}
-#define ENSURE_VALID_ENV(env) \
- do { \
- if (!IsValidEnv(env)) { \
- return ERR(INVALID_ENVIRONMENT); \
- } \
+#define ENSURE_VALID_ENV(env) \
+ do { \
+ jvmtiError ensure_valid_env_ ## __LINE__ = getEnvironmentError(env); \
+ if (ensure_valid_env_ ## __LINE__ != OK) { \
+ return ensure_valid_env_ ## __LINE__ ; \
+ } \
} while (false)
#define ENSURE_HAS_CAP(env, cap) \
do { \
- ENSURE_VALID_ENV(env); \
if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.cap != 1) { \
return ERR(MUST_POSSESS_CAPABILITY); \
} \
@@ -121,18 +127,22 @@ class JvmtiFunctions {
}
static jvmtiError GetThreadState(jvmtiEnv* env, jthread thread, jint* thread_state_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadState(env, thread, thread_state_ptr);
}
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetCurrentThread(env, thread_ptr);
}
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetAllThreads(env, threads_count_ptr, threads_ptr);
}
static jvmtiError SuspendThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -141,11 +151,13 @@ class JvmtiFunctions {
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ResumeThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -154,6 +166,7 @@ class JvmtiFunctions {
jint request_count ATTRIBUTE_UNUSED,
const jthread* request_list ATTRIBUTE_UNUSED,
jvmtiError* results ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_suspend);
return ERR(NOT_IMPLEMENTED);
}
@@ -161,16 +174,19 @@ class JvmtiFunctions {
static jvmtiError StopThread(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject exception ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError InterruptThread(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_signal_thread);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadInfo(env, thread, info_ptr);
}
@@ -178,6 +194,7 @@ class JvmtiFunctions {
jthread thread ATTRIBUTE_UNUSED,
jint* owned_monitor_count_ptr ATTRIBUTE_UNUSED,
jobject** owned_monitors_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -187,6 +204,7 @@ class JvmtiFunctions {
jthread thread ATTRIBUTE_UNUSED,
jint* monitor_info_count_ptr ATTRIBUTE_UNUSED,
jvmtiMonitorStackDepthInfo** monitor_info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_owned_monitor_stack_depth_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -194,6 +212,7 @@ class JvmtiFunctions {
static jvmtiError GetCurrentContendedMonitor(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject* monitor_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_contended_monitor);
return ERR(NOT_IMPLEMENTED);
}
@@ -203,26 +222,31 @@ class JvmtiFunctions {
jvmtiStartFunction proc,
const void* arg,
jint priority) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::RunAgentThread(env, thread, proc, arg, priority);
}
static jvmtiError SetThreadLocalStorage(jvmtiEnv* env, jthread thread, const void* data) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::SetThreadLocalStorage(env, thread, data);
}
static jvmtiError GetThreadLocalStorage(jvmtiEnv* env, jthread thread, void** data_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadUtil::GetThreadLocalStorage(env, thread, data_ptr);
}
static jvmtiError GetTopThreadGroups(jvmtiEnv* env,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetTopThreadGroups(env, group_count_ptr, groups_ptr);
}
static jvmtiError GetThreadGroupInfo(jvmtiEnv* env,
jthreadGroup group,
jvmtiThreadGroupInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupInfo(env, group, info_ptr);
}
@@ -232,6 +256,7 @@ class JvmtiFunctions {
jthread** threads_ptr,
jint* group_count_ptr,
jthreadGroup** groups_ptr) {
+ ENSURE_VALID_ENV(env);
return ThreadGroupUtil::GetThreadGroupChildren(env,
group,
thread_count_ptr,
@@ -246,6 +271,7 @@ class JvmtiFunctions {
jint max_frame_count,
jvmtiFrameInfo* frame_buffer,
jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetStackTrace(env,
thread,
start_depth,
@@ -258,6 +284,7 @@ class JvmtiFunctions {
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr,
jint* thread_count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetAllStackTraces(env, max_frame_count, stack_info_ptr, thread_count_ptr);
}
@@ -266,6 +293,7 @@ class JvmtiFunctions {
const jthread* thread_list,
jint max_frame_count,
jvmtiStackInfo** stack_info_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetThreadListStackTraces(env,
thread_count,
thread_list,
@@ -274,10 +302,12 @@ class JvmtiFunctions {
}
static jvmtiError GetFrameCount(jvmtiEnv* env, jthread thread, jint* count_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameCount(env, thread, count_ptr);
}
static jvmtiError PopFrame(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_pop_frame);
return ERR(NOT_IMPLEMENTED);
}
@@ -287,12 +317,14 @@ class JvmtiFunctions {
jint depth,
jmethodID* method_ptr,
jlocation* location_ptr) {
+ ENSURE_VALID_ENV(env);
return StackUtil::GetFrameLocation(env, thread, depth, method_ptr, location_ptr);
}
static jvmtiError NotifyFramePop(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_frame_pop_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -300,6 +332,7 @@ class JvmtiFunctions {
static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -307,6 +340,7 @@ class JvmtiFunctions {
static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -314,6 +348,7 @@ class JvmtiFunctions {
static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -321,6 +356,7 @@ class JvmtiFunctions {
static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -328,11 +364,13 @@ class JvmtiFunctions {
static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_force_early_return);
return ERR(NOT_IMPLEMENTED);
}
@@ -343,6 +381,7 @@ class JvmtiFunctions {
jobject initial_object,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.FollowReferences(env,
@@ -358,12 +397,14 @@ class JvmtiFunctions {
jclass klass,
const jvmtiHeapCallbacks* callbacks,
const void* user_data) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.IterateThroughHeap(env, heap_filter, klass, callbacks, user_data);
}
static jvmtiError GetTag(jvmtiEnv* env, jobject object, jlong* tag_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -381,6 +422,7 @@ class JvmtiFunctions {
}
static jvmtiError SetTag(jvmtiEnv* env, jobject object, jlong tag) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
if (object == nullptr) {
@@ -405,6 +447,7 @@ class JvmtiFunctions {
jint* count_ptr,
jobject** object_result_ptr,
jlong** tag_result_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
JNIEnv* jni_env = GetJniEnv(env);
@@ -422,6 +465,7 @@ class JvmtiFunctions {
}
static jvmtiError ForceGarbageCollection(jvmtiEnv* env) {
+ ENSURE_VALID_ENV(env);
return HeapUtil::ForceGarbageCollection(env);
}
@@ -430,6 +474,7 @@ class JvmtiFunctions {
jobject object ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_reference_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -440,6 +485,7 @@ class JvmtiFunctions {
jvmtiStackReferenceCallback stack_ref_callback ATTRIBUTE_UNUSED,
jvmtiObjectReferenceCallback object_ref_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -448,6 +494,7 @@ class JvmtiFunctions {
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -458,6 +505,7 @@ class JvmtiFunctions {
jvmtiHeapObjectFilter object_filter ATTRIBUTE_UNUSED,
jvmtiHeapObjectCallback heap_object_callback ATTRIBUTE_UNUSED,
const void* user_data ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_tag_objects);
return ERR(NOT_IMPLEMENTED);
}
@@ -467,6 +515,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -475,6 +524,7 @@ class JvmtiFunctions {
jthread thread ATTRIBUTE_UNUSED,
jint depth ATTRIBUTE_UNUSED,
jobject* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -484,6 +534,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -493,6 +544,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -502,6 +554,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -511,6 +564,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble* value_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -520,6 +574,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jobject value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -529,6 +584,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jint value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -538,6 +594,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jlong value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -547,6 +604,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jfloat value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -556,6 +614,7 @@ class JvmtiFunctions {
jint depth ATTRIBUTE_UNUSED,
jint slot ATTRIBUTE_UNUSED,
jdouble value ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -563,6 +622,7 @@ class JvmtiFunctions {
static jvmtiError SetBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -570,6 +630,7 @@ class JvmtiFunctions {
static jvmtiError ClearBreakpoint(jvmtiEnv* env,
jmethodID method ATTRIBUTE_UNUSED,
jlocation location ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_breakpoint_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -577,6 +638,7 @@ class JvmtiFunctions {
static jvmtiError SetFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -584,6 +646,7 @@ class JvmtiFunctions {
static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_access_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -591,6 +654,7 @@ class JvmtiFunctions {
static jvmtiError SetFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
@@ -598,11 +662,13 @@ class JvmtiFunctions {
static jvmtiError ClearFieldModificationWatch(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
jfieldID field ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_generate_field_modification_events);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetLoadedClasses(jvmtiEnv* env, jint* class_count_ptr, jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
HeapUtil heap_util(ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get());
return heap_util.GetLoadedClasses(env, class_count_ptr, classes_ptr);
}
@@ -611,6 +677,7 @@ class JvmtiFunctions {
jobject initiating_loader,
jint* class_count_ptr,
jclass** classes_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoaderClasses(env, initiating_loader, class_count_ptr, classes_ptr);
}
@@ -618,21 +685,25 @@ class JvmtiFunctions {
jclass klass,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassSignature(env, klass, signature_ptr, generic_ptr);
}
static jvmtiError GetClassStatus(jvmtiEnv* env, jclass klass, jint* status_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassStatus(env, klass, status_ptr);
}
static jvmtiError GetSourceFileName(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_name_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_file_name);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetClassModifiers(jvmtiEnv* env, jclass klass, jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassModifiers(env, klass, modifiers_ptr);
}
@@ -640,6 +711,7 @@ class JvmtiFunctions {
jclass klass,
jint* method_count_ptr,
jmethodID** methods_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassMethods(env, klass, method_count_ptr, methods_ptr);
}
@@ -647,6 +719,7 @@ class JvmtiFunctions {
jclass klass,
jint* field_count_ptr,
jfieldID** fields_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassFields(env, klass, field_count_ptr, fields_ptr);
}
@@ -654,6 +727,7 @@ class JvmtiFunctions {
jclass klass,
jint* interface_count_ptr,
jclass** interfaces_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetImplementedInterfaces(env, klass, interface_count_ptr, interfaces_ptr);
}
@@ -661,6 +735,7 @@ class JvmtiFunctions {
jclass klass,
jint* minor_version_ptr,
jint* major_version_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassVersionNumbers(env, klass, minor_version_ptr, major_version_ptr);
}
@@ -669,38 +744,45 @@ class JvmtiFunctions {
jint* constant_pool_count_ptr ATTRIBUTE_UNUSED,
jint* constant_pool_byte_count_ptr ATTRIBUTE_UNUSED,
unsigned char** constant_pool_bytes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_constant_pool);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsInterface(jvmtiEnv* env, jclass klass, jboolean* is_interface_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsInterface(env, klass, is_interface_ptr);
}
static jvmtiError IsArrayClass(jvmtiEnv* env,
jclass klass,
jboolean* is_array_class_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::IsArrayClass(env, klass, is_array_class_ptr);
}
static jvmtiError IsModifiableClass(jvmtiEnv* env,
jclass klass,
jboolean* is_modifiable_class_ptr) {
+ ENSURE_VALID_ENV(env);
return Redefiner::IsModifiableClass(env, klass, is_modifiable_class_ptr);
}
static jvmtiError GetClassLoader(jvmtiEnv* env, jclass klass, jobject* classloader_ptr) {
+ ENSURE_VALID_ENV(env);
return ClassUtil::GetClassLoader(env, klass, classloader_ptr);
}
static jvmtiError GetSourceDebugExtension(jvmtiEnv* env,
jclass klass ATTRIBUTE_UNUSED,
char** source_debug_extension_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_source_debug_extension);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_retransform_classes);
std::string error_msg;
jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -719,6 +801,7 @@ class JvmtiFunctions {
static jvmtiError RedefineClasses(jvmtiEnv* env,
jint class_count,
const jvmtiClassDefinition* class_definitions) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_redefine_classes);
std::string error_msg;
jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
@@ -735,16 +818,19 @@ class JvmtiFunctions {
}
static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectSize(env, object, size_ptr);
}
static jvmtiError GetObjectHashCode(jvmtiEnv* env, jobject object, jint* hash_code_ptr) {
+ ENSURE_VALID_ENV(env);
return ObjectUtil::GetObjectHashCode(env, object, hash_code_ptr);
}
static jvmtiError GetObjectMonitorUsage(jvmtiEnv* env,
jobject object ATTRIBUTE_UNUSED,
jvmtiMonitorUsage* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_monitor_info);
return ERR(NOT_IMPLEMENTED);
}
@@ -755,6 +841,7 @@ class JvmtiFunctions {
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldName(env, klass, field, name_ptr, signature_ptr, generic_ptr);
}
@@ -762,6 +849,7 @@ class JvmtiFunctions {
jclass klass,
jfieldID field,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldDeclaringClass(env, klass, field, declaring_class_ptr);
}
@@ -769,6 +857,7 @@ class JvmtiFunctions {
jclass klass,
jfieldID field,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return FieldUtil::GetFieldModifiers(env, klass, field, modifiers_ptr);
}
@@ -776,6 +865,7 @@ class JvmtiFunctions {
jclass klass,
jfieldID field,
jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return FieldUtil::IsFieldSynthetic(env, klass, field, is_synthetic_ptr);
}
@@ -785,30 +875,35 @@ class JvmtiFunctions {
char** name_ptr,
char** signature_ptr,
char** generic_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodName(env, method, name_ptr, signature_ptr, generic_ptr);
}
static jvmtiError GetMethodDeclaringClass(jvmtiEnv* env,
jmethodID method,
jclass* declaring_class_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodDeclaringClass(env, method, declaring_class_ptr);
}
static jvmtiError GetMethodModifiers(jvmtiEnv* env,
jmethodID method,
jint* modifiers_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodModifiers(env, method, modifiers_ptr);
}
static jvmtiError GetMaxLocals(jvmtiEnv* env,
jmethodID method,
jint* max_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMaxLocals(env, method, max_ptr);
}
static jvmtiError GetArgumentsSize(jvmtiEnv* env,
jmethodID method,
jint* size_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetArgumentsSize(env, method, size_ptr);
}
@@ -816,6 +911,7 @@ class JvmtiFunctions {
jmethodID method,
jint* entry_count_ptr,
jvmtiLineNumberEntry** table_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_line_numbers);
return MethodUtil::GetLineNumberTable(env, method, entry_count_ptr, table_ptr);
}
@@ -824,6 +920,7 @@ class JvmtiFunctions {
jmethodID method,
jlocation* start_location_ptr,
jlocation* end_location_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::GetMethodLocation(env, method, start_location_ptr, end_location_ptr);
}
@@ -831,6 +928,7 @@ class JvmtiFunctions {
jmethodID method ATTRIBUTE_UNUSED,
jint* entry_count_ptr ATTRIBUTE_UNUSED,
jvmtiLocalVariableEntry** table_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_access_local_variables);
return ERR(NOT_IMPLEMENTED);
}
@@ -839,24 +937,29 @@ class JvmtiFunctions {
jmethodID method ATTRIBUTE_UNUSED,
jint* bytecode_count_ptr ATTRIBUTE_UNUSED,
unsigned char** bytecodes_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_bytecodes);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError IsMethodNative(jvmtiEnv* env, jmethodID method, jboolean* is_native_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodNative(env, method, is_native_ptr);
}
static jvmtiError IsMethodSynthetic(jvmtiEnv* env, jmethodID method, jboolean* is_synthetic_ptr) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_synthetic_attribute);
return MethodUtil::IsMethodSynthetic(env, method, is_synthetic_ptr);
}
static jvmtiError IsMethodObsolete(jvmtiEnv* env, jmethodID method, jboolean* is_obsolete_ptr) {
+ ENSURE_VALID_ENV(env);
return MethodUtil::IsMethodObsolete(env, method, is_obsolete_ptr);
}
static jvmtiError SetNativeMethodPrefix(jvmtiEnv* env, const char* prefix ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
@@ -864,43 +967,53 @@ class JvmtiFunctions {
static jvmtiError SetNativeMethodPrefixes(jvmtiEnv* env,
jint prefix_count ATTRIBUTE_UNUSED,
char** prefixes ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_set_native_method_prefix);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError CreateRawMonitor(jvmtiEnv* env, const char* name, jrawMonitorID* monitor_ptr) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::CreateRawMonitor(env, name, monitor_ptr);
}
static jvmtiError DestroyRawMonitor(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::DestroyRawMonitor(env, monitor);
}
static jvmtiError RawMonitorEnter(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorEnter(env, monitor);
}
static jvmtiError RawMonitorExit(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorExit(env, monitor);
}
static jvmtiError RawMonitorWait(jvmtiEnv* env, jrawMonitorID monitor, jlong millis) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorWait(env, monitor, millis);
}
static jvmtiError RawMonitorNotify(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotify(env, monitor);
}
static jvmtiError RawMonitorNotifyAll(jvmtiEnv* env, jrawMonitorID monitor) {
+ ENSURE_VALID_ENV(env);
return MonitorUtil::RawMonitorNotifyAll(env, monitor);
}
static jvmtiError SetJNIFunctionTable(jvmtiEnv* env, const jniNativeInterface* function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::SetJNIFunctionTable(env, function_table);
}
static jvmtiError GetJNIFunctionTable(jvmtiEnv* env, jniNativeInterface** function_table) {
+ ENSURE_VALID_ENV(env);
return JNIUtil::GetJNIFunctionTable(env, function_table);
}
@@ -955,14 +1068,16 @@ class JvmtiFunctions {
return gEventHandler.SetEvent(art_env, art_thread, GetArtJvmtiEvent(art_env, event_type), mode);
}
- static jvmtiError GenerateEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GenerateEvents(jvmtiEnv* env,
jvmtiEvent event_type ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
return OK;
}
- static jvmtiError GetExtensionFunctions(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionFunctions(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionFunctionInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension functions.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -970,9 +1085,10 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError GetExtensionEvents(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError GetExtensionEvents(jvmtiEnv* env,
jint* extension_count_ptr,
jvmtiExtensionEventInfo** extensions) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events.
*extension_count_ptr = 0;
*extensions = nullptr;
@@ -980,9 +1096,10 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError SetExtensionEventCallback(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetExtensionEventCallback(jvmtiEnv* env,
jint extension_event_index ATTRIBUTE_UNUSED,
jvmtiExtensionEvent callback ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
// We do not have any extension events, so any call is illegal.
return ERR(ILLEGAL_ARGUMENT);
}
@@ -999,8 +1116,8 @@ class JvmtiFunctions {
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = static_cast<ArtJvmTiEnv*>(env);
jvmtiError ret = OK;
- jvmtiCapabilities changed;
- jvmtiCapabilities potential_capabilities;
+ jvmtiCapabilities changed = {};
+ jvmtiCapabilities potential_capabilities = {};
ret = env->GetPotentialCapabilities(&potential_capabilities);
if (ret != OK) {
return ret;
@@ -1072,7 +1189,7 @@ class JvmtiFunctions {
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
ArtJvmTiEnv* art_env = reinterpret_cast<ArtJvmTiEnv*>(env);
- jvmtiCapabilities changed;
+ jvmtiCapabilities changed = {};
#define DEL_CAPABILITY(e) \
do { \
if (capabilities_ptr->e == 1) { \
@@ -1141,17 +1258,20 @@ class JvmtiFunctions {
static jvmtiError GetCurrentThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetCurrentThreadCpuTime(jvmtiEnv* env, jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_current_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetThreadCpuTimerInfo(jvmtiEnv* env,
jvmtiTimerInfo* info_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
@@ -1159,43 +1279,53 @@ class JvmtiFunctions {
static jvmtiError GetThreadCpuTime(jvmtiEnv* env,
jthread thread ATTRIBUTE_UNUSED,
jlong* nanos_ptr ATTRIBUTE_UNUSED) {
+ ENSURE_VALID_ENV(env);
ENSURE_HAS_CAP(env, can_get_thread_cpu_time);
return ERR(NOT_IMPLEMENTED);
}
static jvmtiError GetTimerInfo(jvmtiEnv* env, jvmtiTimerInfo* info_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTimerInfo(env, info_ptr);
}
static jvmtiError GetTime(jvmtiEnv* env, jlong* nanos_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetTime(env, nanos_ptr);
}
static jvmtiError GetAvailableProcessors(jvmtiEnv* env, jint* processor_count_ptr) {
+ ENSURE_VALID_ENV(env);
return TimerUtil::GetAvailableProcessors(env, processor_count_ptr);
}
static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToBootstrapClassLoaderSearch(env, segment);
}
static jvmtiError AddToSystemClassLoaderSearch(jvmtiEnv* env, const char* segment) {
+ ENSURE_VALID_ENV(env);
return SearchUtil::AddToSystemClassLoaderSearch(env, segment);
}
static jvmtiError GetSystemProperties(jvmtiEnv* env, jint* count_ptr, char*** property_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperties(env, count_ptr, property_ptr);
}
static jvmtiError GetSystemProperty(jvmtiEnv* env, const char* property, char** value_ptr) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::GetSystemProperty(env, property, value_ptr);
}
static jvmtiError SetSystemProperty(jvmtiEnv* env, const char* property, const char* value) {
+ ENSURE_VALID_ENV(env);
return PropertiesUtil::SetSystemProperty(env, property, value);
}
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr) {
+ ENSURE_VALID_ENV(env);
return PhaseUtil::GetPhase(env, phase_ptr);
}
@@ -1303,9 +1433,10 @@ class JvmtiFunctions {
}
}
- static jvmtiError SetVerboseFlag(jvmtiEnv* env ATTRIBUTE_UNUSED,
+ static jvmtiError SetVerboseFlag(jvmtiEnv* env,
jvmtiVerboseFlag flag,
jboolean value) {
+ ENSURE_VALID_ENV(env);
if (flag == jvmtiVerboseFlag::JVMTI_VERBOSE_OTHER) {
// OTHER is special, as it's 0, so can't do a bit check.
bool val = (value == JNI_TRUE) ? true : false;
@@ -1359,8 +1490,8 @@ class JvmtiFunctions {
return ERR(NONE);
}
- static jvmtiError GetJLocationFormat(jvmtiEnv* env ATTRIBUTE_UNUSED,
- jvmtiJlocationFormat* format_ptr) {
+ static jvmtiError GetJLocationFormat(jvmtiEnv* env, jvmtiJlocationFormat* format_ptr) {
+ ENSURE_VALID_ENV(env);
// Report BCI as jlocation format. We report dex bytecode indices.
if (format_ptr == nullptr) {
return ERR(NULL_POINTER);
@@ -1382,8 +1513,8 @@ extern const jvmtiInterface_1 gJvmtiInterface;
ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
: art_vm(runtime),
local_data(nullptr),
- capabilities(),
- object_tag_table(new ObjectTagTable(event_handler)) {
+ capabilities() {
+ object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
functions = &gJvmtiInterface;
}
@@ -1426,6 +1557,7 @@ extern "C" bool ArtPlugin_Initialize() {
ClassUtil::Register(&gEventHandler);
DumpUtil::Register(&gEventHandler);
SearchUtil::Register();
+ HeapUtil::Register();
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
@@ -1438,6 +1570,7 @@ extern "C" bool ArtPlugin_Deinitialize() {
ClassUtil::Unregister();
DumpUtil::Unregister();
SearchUtil::Unregister();
+ HeapUtil::Unregister();
return true;
}
diff --git a/runtime/openjdkjvmti/events-inl.h b/runtime/openjdkjvmti/events-inl.h
index 4f5eb0c33f..1ddbb869f9 100644
--- a/runtime/openjdkjvmti/events-inl.h
+++ b/runtime/openjdkjvmti/events-inl.h
@@ -126,6 +126,7 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
unsigned char** new_class_data) const {
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event");
+ DCHECK(*new_class_data == nullptr);
jint current_len = class_data_len;
unsigned char* current_class_data = const_cast<unsigned char*>(class_data);
ArtJvmTiEnv* last_env = nullptr;
@@ -168,15 +169,19 @@ inline void EventHandler::DispatchClassFileLoadHookEvent(art::Thread* thread,
// exactly the argument types of the corresponding Jvmti kEvent function pointer.
template <ArtJvmtiEvent kEvent, typename ...Args>
-inline void EventHandler::DispatchEvent(art::Thread* thread,
- Args... args) const {
- using FnType = void(jvmtiEnv*, Args...);
+inline void EventHandler::DispatchEvent(art::Thread* thread, Args... args) const {
for (ArtJvmTiEnv* env : envs) {
- if (ShouldDispatch<kEvent>(env, thread)) {
- FnType* callback = impl::GetCallback<kEvent>(env);
- if (callback != nullptr) {
- (*callback)(env, args...);
- }
+ DispatchEvent<kEvent, Args...>(env, thread, args...);
+ }
+}
+
+template <ArtJvmtiEvent kEvent, typename ...Args>
+inline void EventHandler::DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const {
+ using FnType = void(jvmtiEnv*, Args...);
+ if (ShouldDispatch<kEvent>(env, thread)) {
+ FnType* callback = impl::GetCallback<kEvent>(env);
+ if (callback != nullptr) {
+ (*callback)(env, args...);
}
}
}
diff --git a/runtime/openjdkjvmti/events.h b/runtime/openjdkjvmti/events.h
index 4e20d1776b..ae8bf0f803 100644
--- a/runtime/openjdkjvmti/events.h
+++ b/runtime/openjdkjvmti/events.h
@@ -156,9 +156,14 @@ class EventHandler {
ArtJvmtiEvent event,
jvmtiEventMode mode);
+ // Dispatch event to all registered environments.
template <ArtJvmtiEvent kEvent, typename ...Args>
ALWAYS_INLINE
inline void DispatchEvent(art::Thread* thread, Args... args) const;
+ // Dispatch event to the given environment, only.
+ template <ArtJvmtiEvent kEvent, typename ...Args>
+ ALWAYS_INLINE
+ inline void DispatchEvent(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const;
// Tell the event handler capabilities were added/lost so it can adjust the sent events.If
// caps_added is true then caps is all the newly set capabilities of the jvmtiEnv. If it is false
diff --git a/runtime/openjdkjvmti/fixed_up_dex_file.cc b/runtime/openjdkjvmti/fixed_up_dex_file.cc
new file mode 100644
index 0000000000..3338358796
--- /dev/null
+++ b/runtime/openjdkjvmti/fixed_up_dex_file.cc
@@ -0,0 +1,145 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "fixed_up_dex_file.h"
+#include "dex_file-inl.h"
+
+// Compiler includes.
+#include "dex/dex_to_dex_decompiler.h"
+
+// Runtime includes.
+#include "oat_file.h"
+#include "vdex_file.h"
+
+namespace openjdkjvmti {
+
+static void RecomputeDexChecksum(art::DexFile* dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
+ dex_file->CalculateChecksum();
+}
+
+// TODO This is more complicated then it seems like it should be.
+// The fact we don't keep around the data of where in the flat binary log of dex-quickening changes
+// each dex file starts means we need to search for it. Since JVMTI is the exception though we are
+// not going to put in the effort to optimize for it.
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
+ if (oat_dex == nullptr) {
+ return;
+ }
+ const art::OatFile* oat_file = oat_dex->GetOatFile();
+ if (oat_file == nullptr) {
+ return;
+ }
+ const art::VdexFile* vdex = oat_file->GetVdexFile();
+ if (vdex == nullptr || vdex->GetQuickeningInfo().size() == 0) {
+ return;
+ }
+ const art::ArrayRef<const uint8_t> quickening_info(vdex->GetQuickeningInfo());
+ const uint8_t* quickening_info_ptr = quickening_info.data();
+ for (const art::OatDexFile* cur_oat_dex : oat_file->GetOatDexFiles()) {
+ std::string error;
+ std::unique_ptr<const art::DexFile> cur_dex_file(cur_oat_dex->OpenDexFile(&error));
+ DCHECK(cur_dex_file.get() != nullptr);
+ // Is this the dex file we are looking for?
+ if (UNLIKELY(cur_dex_file->Begin() == original_dex_file.Begin())) {
+ // Simple sanity check.
+ CHECK_EQ(new_dex_file.NumClassDefs(), original_dex_file.NumClassDefs());
+ for (uint32_t i = 0; i < new_dex_file.NumClassDefs(); ++i) {
+ const art::DexFile::ClassDef& class_def = new_dex_file.GetClassDef(i);
+ const uint8_t* class_data = new_dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (art::ClassDataItemIterator it(new_dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod() && it.GetMethodCodeItem() != nullptr) {
+ uint32_t quickening_size = *reinterpret_cast<const uint32_t*>(quickening_info_ptr);
+ quickening_info_ptr += sizeof(uint32_t);
+ art::optimizer::ArtDecompileDEX(
+ *it.GetMethodCodeItem(),
+ art::ArrayRef<const uint8_t>(quickening_info_ptr, quickening_size),
+ /*decompile_return_instruction*/true);
+ quickening_info_ptr += quickening_size;
+ }
+ }
+ }
+ // We don't need to bother looking through the rest of the dex-files.
+ break;
+ } else {
+ // Not the dex file we want. Skip over all the quickening info for all its classes.
+ for (uint32_t i = 0; i < cur_dex_file->NumClassDefs(); ++i) {
+ const art::DexFile::ClassDef& class_def = cur_dex_file->GetClassDef(i);
+ const uint8_t* class_data = cur_dex_file->GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (art::ClassDataItemIterator it(*cur_dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod() && it.GetMethodCodeItem() != nullptr) {
+ uint32_t quickening_size = *reinterpret_cast<const uint32_t*>(quickening_info_ptr);
+ quickening_info_ptr += sizeof(uint32_t);
+ quickening_info_ptr += quickening_size;
+ }
+ }
+ }
+ }
+ }
+}
+
+std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& original) {
+ // Copy the data into mutable memory.
+ std::vector<unsigned char> data;
+ data.resize(original.Size());
+ memcpy(data.data(), original.Begin(), original.Size());
+ std::string error;
+ std::unique_ptr<const art::DexFile> new_dex_file(art::DexFile::Open(
+ data.data(),
+ data.size(),
+ /*location*/"Unquickening_dexfile.dex",
+ /*location_checksum*/0,
+ /*oat_dex_file*/nullptr,
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error));
+ if (new_dex_file.get() == nullptr) {
+ LOG(ERROR) << "Unable to open dex file from memory for unquickening! error: " << error;
+ return nullptr;
+ }
+
+ DoDexUnquicken(*new_dex_file, original);
+ RecomputeDexChecksum(const_cast<art::DexFile*>(new_dex_file.get()));
+ std::unique_ptr<FixedUpDexFile> ret(new FixedUpDexFile(std::move(new_dex_file), std::move(data)));
+ return ret;
+}
+
+} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/fixed_up_dex_file.h b/runtime/openjdkjvmti/fixed_up_dex_file.h
new file mode 100644
index 0000000000..db12f489e9
--- /dev/null
+++ b/runtime/openjdkjvmti/fixed_up_dex_file.h
@@ -0,0 +1,82 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
+#define ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
+
+#include <memory>
+#include <vector>
+
+#include "jni.h"
+#include "jvmti.h"
+#include "base/mutex.h"
+#include "dex_file.h"
+
+namespace openjdkjvmti {
+
+// A holder for a DexFile that has been 'fixed up' to ensure it is fully compliant with the
+// published standard (no internal/quick opcodes, all fields are the defined values, etc). This is
+// used to ensure that agents get a consistent dex file regardless of what version of android they
+// are running on.
+class FixedUpDexFile {
+ public:
+ static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ const art::DexFile& GetDexFile() {
+ return *dex_file_;
+ }
+
+ const unsigned char* Begin() {
+ return data_.data();
+ }
+
+ size_t Size() {
+ return data_.size();
+ }
+
+ private:
+ explicit FixedUpDexFile(std::unique_ptr<const art::DexFile> fixed_up_dex_file,
+ std::vector<unsigned char> data)
+ : dex_file_(std::move(fixed_up_dex_file)),
+ data_(std::move(data)) {}
+
+ // the fixed up DexFile
+ std::unique_ptr<const art::DexFile> dex_file_;
+ // The backing data for dex_file_.
+ const std::vector<unsigned char> data_;
+
+ DISALLOW_COPY_AND_ASSIGN(FixedUpDexFile);
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_FIXED_UP_DEX_FILE_H_
diff --git a/runtime/openjdkjvmti/jvmti.h b/runtime/openjdkjvmti/include/jvmti.h
index de07c163fc..de07c163fc 100644
--- a/runtime/openjdkjvmti/jvmti.h
+++ b/runtime/openjdkjvmti/include/jvmti.h
diff --git a/runtime/openjdkjvmti/jvmti_weak_table-inl.h b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
new file mode 100644
index 0000000000..f67fffccbb
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table-inl.h
@@ -0,0 +1,389 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
+
+#include "jvmti_weak_table.h"
+
+#include <limits>
+
+#include "art_jvmti.h"
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "instrumentation.h"
+#include "jni_env_ext-inl.h"
+#include "jvmti_allocator.h"
+#include "mirror/class.h"
+#include "mirror/object.h"
+#include "runtime.h"
+#include "ScopedLocalRef.h"
+
+namespace openjdkjvmti {
+
+template <typename T>
+void JvmtiWeakTable<T>::Lock() {
+ allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::Unlock() {
+ allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
+}
+template <typename T>
+void JvmtiWeakTable<T>::AssertLocked() {
+ allow_disallow_lock_.AssertHeld(art::Thread::Current());
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::UpdateTableWithReadBarrier() {
+ update_since_last_sweep_ = true;
+
+ auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
+ art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return original_root.Read<art::kWithReadBarrier>();
+ };
+
+ UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, T* result) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+ UpdateTableWithReadBarrier();
+ return GetTagLocked(self, obj, result);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Remove(art::mirror::Object* obj, /* out */ T* tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return RemoveLocked(self, obj, tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::mirror::Object* obj, T* tag) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return RemoveLocked(self, obj, tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::RemoveLocked(art::Thread* self, art::mirror::Object* obj, T* tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ if (tag != nullptr) {
+ *tag = it->second;
+ }
+ tagged_objects_.erase(it);
+ return true;
+ }
+
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+ // Update the table.
+ UpdateTableWithReadBarrier();
+
+ // And try again.
+ return RemoveLocked(self, obj, tag);
+ }
+
+ // Not in here.
+ return false;
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::Set(art::mirror::Object* obj, T new_tag) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return SetLocked(self, obj, new_tag);
+}
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::mirror::Object* obj, T new_tag) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return SetLocked(self, obj, new_tag);
+}
+
+template <typename T>
+bool JvmtiWeakTable<T>::SetLocked(art::Thread* self, art::mirror::Object* obj, T new_tag) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ it->second = new_tag;
+ return true;
+ }
+
+ if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. Explicitly update the table once.
+ // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
+
+ // Update the table.
+ UpdateTableWithReadBarrier();
+
+ // And try again.
+ return SetLocked(self, obj, new_tag);
+ }
+
+ // New element.
+ auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
+ DCHECK(insert_it.second);
+ return false;
+}
+
+template <typename T>
+void JvmtiWeakTable<T>::Sweep(art::IsMarkedVisitor* visitor) {
+ if (DoesHandleNullOnSweep()) {
+ SweepImpl<true>(visitor);
+ } else {
+ SweepImpl<false>(visitor);
+ }
+
+ // Under concurrent GC, there is a window between moving objects and sweeping of system
+ // weaks in which mutators are active. We may receive a to-space object pointer in obj,
+ // but still have from-space pointers in the table. We explicitly update the table then
+ // to ensure we compare against to-space pointers. But we want to do this only once. Once
+ // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
+ // so we re-enable the explicit update for the next marking.
+ update_since_last_sweep_ = false;
+}
+
+template <typename T>
+template <bool kHandleNull>
+void JvmtiWeakTable<T>::SweepImpl(art::IsMarkedVisitor* visitor) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+
+ auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
+ art::mirror::Object* original_obj) {
+ return visitor->IsMarked(original_obj);
+ };
+
+ UpdateTableWith<decltype(IsMarkedUpdater),
+ kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
+}
+
+template <typename T>
+template <typename Updater, typename JvmtiWeakTable<T>::TableUpdateNullTarget kTargetNull>
+ALWAYS_INLINE inline void JvmtiWeakTable<T>::UpdateTableWith(Updater& updater) {
+ // We optimistically hope that elements will still be well-distributed when re-inserting them.
+ // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
+ // vector and two passes.
+ float original_max_load_factor = tagged_objects_.max_load_factor();
+ tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
+ // For checking that a max load-factor actually does what we expect.
+ size_t original_bucket_count = tagged_objects_.bucket_count();
+
+ for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
+ DCHECK(!it->first.IsNull());
+ art::mirror::Object* original_obj = it->first.template Read<art::kWithoutReadBarrier>();
+ art::mirror::Object* target_obj = updater(it->first, original_obj);
+ if (original_obj != target_obj) {
+ if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
+ // Ignore null target, don't do anything.
+ } else {
+ T tag = it->second;
+ it = tagged_objects_.erase(it);
+ if (target_obj != nullptr) {
+ tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
+ DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
+ } else if (kTargetNull == kCallHandleNull) {
+ HandleNullSweep(tag);
+ }
+ continue; // Iterator was implicitly updated by erase.
+ }
+ }
+ it++;
+ }
+
+ tagged_objects_.max_load_factor(original_max_load_factor);
+ // TODO: consider rehash here.
+}
+
+template <typename T>
+template <typename Storage, class Allocator>
+struct JvmtiWeakTable<T>::ReleasableContainer {
+ using allocator_type = Allocator;
+
+ explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
+ : allocator(alloc),
+ data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
+ size(0),
+ capacity(reserve) {
+ }
+
+ ~ReleasableContainer() {
+ if (data != nullptr) {
+ allocator.deallocate(data, capacity);
+ capacity = 0;
+ size = 0;
+ }
+ }
+
+ Storage* Release() {
+ Storage* tmp = data;
+
+ data = nullptr;
+ size = 0;
+ capacity = 0;
+
+ return tmp;
+ }
+
+ void Resize(size_t new_capacity) {
+ CHECK_GT(new_capacity, capacity);
+
+ Storage* tmp = allocator.allocate(new_capacity);
+ DCHECK(tmp != nullptr);
+ if (data != nullptr) {
+ memcpy(tmp, data, sizeof(Storage) * size);
+ }
+ Storage* old = data;
+ data = tmp;
+ allocator.deallocate(old, capacity);
+ capacity = new_capacity;
+ }
+
+ void Pushback(const Storage& elem) {
+ if (size == capacity) {
+ size_t new_capacity = 2 * capacity + 1;
+ Resize(new_capacity);
+ }
+ data[size++] = elem;
+ }
+
+ Allocator allocator;
+ Storage* data;
+ size_t size;
+ size_t capacity;
+};
+
+template <typename T>
+jvmtiError JvmtiWeakTable<T>::GetTaggedObjects(jvmtiEnv* jvmti_env,
+ jint tag_count,
+ const T* tags,
+ jint* count_ptr,
+ jobject** object_result_ptr,
+ T** tag_result_ptr) {
+ if (tag_count < 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ if (tag_count > 0) {
+ for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+ if (tags[i] == 0) {
+ return ERR(ILLEGAL_ARGUMENT);
+ }
+ }
+ }
+ if (tags == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+ if (count_ptr == nullptr) {
+ return ERR(NULL_POINTER);
+ }
+
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ art::JNIEnvExt* jni_env = self->GetJniEnv();
+
+ constexpr size_t kDefaultSize = 10;
+ size_t initial_object_size;
+ size_t initial_tag_size;
+ if (tag_count == 0) {
+ initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+ initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
+ } else {
+ initial_object_size = initial_tag_size = kDefaultSize;
+ }
+ JvmtiAllocator<void> allocator(jvmti_env);
+ ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator,
+ initial_object_size);
+ ReleasableContainer<T, JvmtiAllocator<T>> selected_tags(allocator, initial_tag_size);
+
+ size_t count = 0;
+ for (auto& pair : tagged_objects_) {
+ bool select;
+ if (tag_count > 0) {
+ select = false;
+ for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
+ if (tags[i] == pair.second) {
+ select = true;
+ break;
+ }
+ }
+ } else {
+ select = true;
+ }
+
+ if (select) {
+ art::mirror::Object* obj = pair.first.template Read<art::kWithReadBarrier>();
+ if (obj != nullptr) {
+ count++;
+ if (object_result_ptr != nullptr) {
+ selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
+ }
+ if (tag_result_ptr != nullptr) {
+ selected_tags.Pushback(pair.second);
+ }
+ }
+ }
+ }
+
+ if (object_result_ptr != nullptr) {
+ *object_result_ptr = selected_objects.Release();
+ }
+ if (tag_result_ptr != nullptr) {
+ *tag_result_ptr = selected_tags.Release();
+ }
+ *count_ptr = static_cast<jint>(count);
+ return ERR(NONE);
+}
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_INL_H_
diff --git a/runtime/openjdkjvmti/jvmti_weak_table.h b/runtime/openjdkjvmti/jvmti_weak_table.h
new file mode 100644
index 0000000000..eeea75aa9d
--- /dev/null
+++ b/runtime/openjdkjvmti/jvmti_weak_table.h
@@ -0,0 +1,215 @@
+/* Copyright (C) 2017 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+#define ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
+
+#include <unordered_map>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "gc/system_weak.h"
+#include "gc_root-inl.h"
+#include "globals.h"
+#include "jvmti.h"
+#include "mirror/object.h"
+#include "thread-inl.h"
+
+namespace openjdkjvmti {
+
+class EventHandler;
+
+// A system-weak container mapping objects to elements of the template type. This corresponds
+// to a weak hash map. For historical reasons the stored value is called "tag."
+template <typename T>
+class JvmtiWeakTable : public art::gc::SystemWeakHolder {
+ public:
+ JvmtiWeakTable()
+ : art::gc::SystemWeakHolder(art::kTaggingLockLevel),
+ update_since_last_sweep_(false) {
+ }
+
+ // Remove the mapping for the given object, returning whether such a mapping existed (and the old
+ // value).
+ bool Remove(art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ bool RemoveLocked(art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Set the mapping for the given object. Returns true if this overwrites an already existing
+ // mapping.
+ virtual bool Set(art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+ virtual bool SetLocked(art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Return the value associated with the given object. Returns true if the mapping exists, false
+ // otherwise.
+ bool GetTag(art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_) {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, allow_disallow_lock_);
+ Wait(self);
+
+ return GetTagLocked(self, obj, result);
+ }
+ bool GetTagLocked(art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
+ art::Thread* self = art::Thread::Current();
+ allow_disallow_lock_.AssertHeld(self);
+ Wait(self);
+
+ return GetTagLocked(self, obj, result);
+ }
+
+ // Sweep the container. DO NOT CALL MANUALLY.
+ void Sweep(art::IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ // Return all objects that have a value mapping in tags.
+ jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
+ jint tag_count,
+ const T* tags,
+ /* out */ jint* count_ptr,
+ /* out */ jobject** object_result_ptr,
+ /* out */ T** tag_result_ptr)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ // Locking functions, to allow coarse-grained locking and amortization.
+ void Lock() ACQUIRE(allow_disallow_lock_);
+ void Unlock() RELEASE(allow_disallow_lock_);
+ void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+
+ protected:
+ // Should HandleNullSweep be called when Sweep detects the release of an object?
+ virtual bool DoesHandleNullOnSweep() {
+ return false;
+ }
+ // If DoesHandleNullOnSweep returns true, this function will be called.
+ virtual void HandleNullSweep(T tag ATTRIBUTE_UNUSED) {}
+
+ private:
+ bool SetLocked(art::Thread* self, art::mirror::Object* obj, T tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* tag)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_) {
+ auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
+ if (it != tagged_objects_.end()) {
+ *result = it->second;
+ return true;
+ }
+
+ // Performance optimization: To avoid multiple table updates, ensure that during GC we
+ // only update once. See the comment on the implementation of GetTagSlowPath.
+ if (art::kUseReadBarrier &&
+ self != nullptr &&
+ self->GetIsGcMarking() &&
+ !update_since_last_sweep_) {
+ return GetTagSlowPath(self, obj, result);
+ }
+
+ return false;
+ }
+
+ // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
+ // are asked to retrieve with a to-pointer.
+ bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, /* out */ T* result)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ // Update the table by doing read barriers on each element, ensuring that to-space pointers
+ // are stored.
+ void UpdateTableWithReadBarrier()
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ template <bool kHandleNull>
+ void SweepImpl(art::IsMarkedVisitor* visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!allow_disallow_lock_);
+
+ enum TableUpdateNullTarget {
+ kIgnoreNull,
+ kRemoveNull,
+ kCallHandleNull
+ };
+
+ template <typename Updater, TableUpdateNullTarget kTargetNull>
+ void UpdateTableWith(Updater& updater)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(allow_disallow_lock_);
+
+ template <typename Storage, class Allocator = std::allocator<T>>
+ struct ReleasableContainer;
+
+ struct HashGcRoot {
+ size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
+ }
+ };
+
+ struct EqGcRoot {
+ bool operator()(const art::GcRoot<art::mirror::Object>& r1,
+ const art::GcRoot<art::mirror::Object>& r2) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
+ }
+ };
+
+ std::unordered_map<art::GcRoot<art::mirror::Object>,
+ T,
+ HashGcRoot,
+ EqGcRoot> tagged_objects_
+ GUARDED_BY(allow_disallow_lock_)
+ GUARDED_BY(art::Locks::mutator_lock_);
+ // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
+ bool update_since_last_sweep_;
+};
+
+} // namespace openjdkjvmti
+
+#endif // ART_RUNTIME_OPENJDKJVMTI_JVMTI_WEAK_TABLE_H_
diff --git a/runtime/openjdkjvmti/object_tagging.cc b/runtime/openjdkjvmti/object_tagging.cc
index b27c2a3834..dcdd3ede13 100644
--- a/runtime/openjdkjvmti/object_tagging.cc
+++ b/runtime/openjdkjvmti/object_tagging.cc
@@ -34,354 +34,34 @@
#include <limits>
#include "art_jvmti.h"
-#include "base/logging.h"
#include "events-inl.h"
-#include "gc/allocation_listener.h"
-#include "instrumentation.h"
-#include "jni_env_ext-inl.h"
-#include "jvmti_allocator.h"
-#include "mirror/class.h"
-#include "mirror/object.h"
-#include "runtime.h"
-#include "ScopedLocalRef.h"
+#include "jvmti_weak_table-inl.h"
namespace openjdkjvmti {
-void ObjectTagTable::Lock() {
- allow_disallow_lock_.ExclusiveLock(art::Thread::Current());
-}
-void ObjectTagTable::Unlock() {
- allow_disallow_lock_.ExclusiveUnlock(art::Thread::Current());
-}
-void ObjectTagTable::AssertLocked() {
- allow_disallow_lock_.AssertHeld(art::Thread::Current());
-}
-
-void ObjectTagTable::UpdateTableWithReadBarrier() {
- update_since_last_sweep_ = true;
-
- auto WithReadBarrierUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root,
- art::mirror::Object* original_obj ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return original_root.Read<art::kWithReadBarrier>();
- };
-
- UpdateTableWith<decltype(WithReadBarrierUpdater), kIgnoreNull>(WithReadBarrierUpdater);
-}
-
-bool ObjectTagTable::GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
- UpdateTableWithReadBarrier();
- return GetTagLocked(self, obj, result);
-}
-
-void ObjectTagTable::Add(art::mirror::Object* obj, jlong tag) {
- // Same as Set(), as we don't have duplicates in an unordered_map.
- Set(obj, tag);
-}
-
-bool ObjectTagTable::Remove(art::mirror::Object* obj, jlong* tag) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return RemoveLocked(self, obj, tag);
-}
-bool ObjectTagTable::RemoveLocked(art::mirror::Object* obj, jlong* tag) {
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return RemoveLocked(self, obj, tag);
-}
-
-bool ObjectTagTable::RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- if (tag != nullptr) {
- *tag = it->second;
- }
- tagged_objects_.erase(it);
- return true;
- }
-
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
- // Update the table.
- UpdateTableWithReadBarrier();
-
- // And try again.
- return RemoveLocked(self, obj, tag);
- }
-
- // Not in here.
- return false;
-}
+// Instantiate for jlong = JVMTI tags.
+template class JvmtiWeakTable<jlong>;
bool ObjectTagTable::Set(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
return Remove(obj, &tmp);
}
-
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return SetLocked(self, obj, new_tag);
+ return JvmtiWeakTable<jlong>::Set(obj, new_tag);
}
bool ObjectTagTable::SetLocked(art::mirror::Object* obj, jlong new_tag) {
if (new_tag == 0) {
jlong tmp;
return RemoveLocked(obj, &tmp);
}
-
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return SetLocked(self, obj, new_tag);
-}
-
-bool ObjectTagTable::SetLocked(art::Thread* self, art::mirror::Object* obj, jlong new_tag) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- it->second = new_tag;
- return true;
- }
-
- if (art::kUseReadBarrier && self->GetIsGcMarking() && !update_since_last_sweep_) {
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. Explicitly update the table once.
- // Note: this will keep *all* objects in the table live, but should be a rare occurrence.
-
- // Update the table.
- UpdateTableWithReadBarrier();
-
- // And try again.
- return SetLocked(self, obj, new_tag);
- }
-
- // New element.
- auto insert_it = tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(obj), new_tag);
- DCHECK(insert_it.second);
- return false;
+ return JvmtiWeakTable<jlong>::SetLocked(obj, new_tag);
}
-void ObjectTagTable::Sweep(art::IsMarkedVisitor* visitor) {
- if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree)) {
- SweepImpl<true>(visitor);
- } else {
- SweepImpl<false>(visitor);
- }
-
- // Under concurrent GC, there is a window between moving objects and sweeping of system
- // weaks in which mutators are active. We may receive a to-space object pointer in obj,
- // but still have from-space pointers in the table. We explicitly update the table then
- // to ensure we compare against to-space pointers. But we want to do this only once. Once
- // sweeping is done, we know all objects are to-space pointers until the next GC cycle,
- // so we re-enable the explicit update for the next marking.
- update_since_last_sweep_ = false;
+bool ObjectTagTable::DoesHandleNullOnSweep() {
+ return event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kObjectFree);
}
-
-template <bool kHandleNull>
-void ObjectTagTable::SweepImpl(art::IsMarkedVisitor* visitor) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
-
- auto IsMarkedUpdater = [&](const art::GcRoot<art::mirror::Object>& original_root ATTRIBUTE_UNUSED,
- art::mirror::Object* original_obj) {
- return visitor->IsMarked(original_obj);
- };
-
- UpdateTableWith<decltype(IsMarkedUpdater),
- kHandleNull ? kCallHandleNull : kRemoveNull>(IsMarkedUpdater);
-}
-
void ObjectTagTable::HandleNullSweep(jlong tag) {
- event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(nullptr, tag);
-}
-
-template <typename T, ObjectTagTable::TableUpdateNullTarget kTargetNull>
-ALWAYS_INLINE inline void ObjectTagTable::UpdateTableWith(T& updater) {
- // We optimistically hope that elements will still be well-distributed when re-inserting them.
- // So play with the map mechanics, and postpone rehashing. This avoids the need of a side
- // vector and two passes.
- float original_max_load_factor = tagged_objects_.max_load_factor();
- tagged_objects_.max_load_factor(std::numeric_limits<float>::max());
- // For checking that a max load-factor actually does what we expect.
- size_t original_bucket_count = tagged_objects_.bucket_count();
-
- for (auto it = tagged_objects_.begin(); it != tagged_objects_.end();) {
- DCHECK(!it->first.IsNull());
- art::mirror::Object* original_obj = it->first.Read<art::kWithoutReadBarrier>();
- art::mirror::Object* target_obj = updater(it->first, original_obj);
- if (original_obj != target_obj) {
- if (kTargetNull == kIgnoreNull && target_obj == nullptr) {
- // Ignore null target, don't do anything.
- } else {
- jlong tag = it->second;
- it = tagged_objects_.erase(it);
- if (target_obj != nullptr) {
- tagged_objects_.emplace(art::GcRoot<art::mirror::Object>(target_obj), tag);
- DCHECK_EQ(original_bucket_count, tagged_objects_.bucket_count());
- } else if (kTargetNull == kCallHandleNull) {
- HandleNullSweep(tag);
- }
- continue; // Iterator was implicitly updated by erase.
- }
- }
- it++;
- }
-
- tagged_objects_.max_load_factor(original_max_load_factor);
- // TODO: consider rehash here.
-}
-
-template <typename T, class Allocator = std::allocator<T>>
-struct ReleasableContainer {
- using allocator_type = Allocator;
-
- explicit ReleasableContainer(const allocator_type& alloc, size_t reserve = 10)
- : allocator(alloc),
- data(reserve > 0 ? allocator.allocate(reserve) : nullptr),
- size(0),
- capacity(reserve) {
- }
-
- ~ReleasableContainer() {
- if (data != nullptr) {
- allocator.deallocate(data, capacity);
- capacity = 0;
- size = 0;
- }
- }
-
- T* Release() {
- T* tmp = data;
-
- data = nullptr;
- size = 0;
- capacity = 0;
-
- return tmp;
- }
-
- void Resize(size_t new_capacity) {
- CHECK_GT(new_capacity, capacity);
-
- T* tmp = allocator.allocate(new_capacity);
- DCHECK(tmp != nullptr);
- if (data != nullptr) {
- memcpy(tmp, data, sizeof(T) * size);
- }
- T* old = data;
- data = tmp;
- allocator.deallocate(old, capacity);
- capacity = new_capacity;
- }
-
- void Pushback(const T& elem) {
- if (size == capacity) {
- size_t new_capacity = 2 * capacity + 1;
- Resize(new_capacity);
- }
- data[size++] = elem;
- }
-
- Allocator allocator;
- T* data;
- size_t size;
- size_t capacity;
-};
-
-jvmtiError ObjectTagTable::GetTaggedObjects(jvmtiEnv* jvmti_env,
- jint tag_count,
- const jlong* tags,
- jint* count_ptr,
- jobject** object_result_ptr,
- jlong** tag_result_ptr) {
- if (tag_count < 0) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- if (tag_count > 0) {
- for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
- if (tags[i] == 0) {
- return ERR(ILLEGAL_ARGUMENT);
- }
- }
- }
- if (tags == nullptr) {
- return ERR(NULL_POINTER);
- }
- if (count_ptr == nullptr) {
- return ERR(NULL_POINTER);
- }
-
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- art::JNIEnvExt* jni_env = self->GetJniEnv();
-
- constexpr size_t kDefaultSize = 10;
- size_t initial_object_size;
- size_t initial_tag_size;
- if (tag_count == 0) {
- initial_object_size = (object_result_ptr != nullptr) ? tagged_objects_.size() : 0;
- initial_tag_size = (tag_result_ptr != nullptr) ? tagged_objects_.size() : 0;
- } else {
- initial_object_size = initial_tag_size = kDefaultSize;
- }
- JvmtiAllocator<void> allocator(jvmti_env);
- ReleasableContainer<jobject, JvmtiAllocator<jobject>> selected_objects(allocator, initial_object_size);
- ReleasableContainer<jlong, JvmtiAllocator<jlong>> selected_tags(allocator, initial_tag_size);
-
- size_t count = 0;
- for (auto& pair : tagged_objects_) {
- bool select;
- if (tag_count > 0) {
- select = false;
- for (size_t i = 0; i != static_cast<size_t>(tag_count); ++i) {
- if (tags[i] == pair.second) {
- select = true;
- break;
- }
- }
- } else {
- select = true;
- }
-
- if (select) {
- art::mirror::Object* obj = pair.first.Read<art::kWithReadBarrier>();
- if (obj != nullptr) {
- count++;
- if (object_result_ptr != nullptr) {
- selected_objects.Pushback(jni_env->AddLocalReference<jobject>(obj));
- }
- if (tag_result_ptr != nullptr) {
- selected_tags.Pushback(pair.second);
- }
- }
- }
- }
-
- if (object_result_ptr != nullptr) {
- *object_result_ptr = selected_objects.Release();
- }
- if (tag_result_ptr != nullptr) {
- *tag_result_ptr = selected_tags.Release();
- }
- *count_ptr = static_cast<jint>(count);
- return ERR(NONE);
+ event_handler_->DispatchEvent<ArtJvmtiEvent::kObjectFree>(jvmti_env_, nullptr, tag);
}
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/object_tagging.h b/runtime/openjdkjvmti/object_tagging.h
index 0296f1ad80..ca84e442dc 100644
--- a/runtime/openjdkjvmti/object_tagging.h
+++ b/runtime/openjdkjvmti/object_tagging.h
@@ -1,17 +1,32 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
+/* Copyright (C) 2016 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h. The
+ * copyright and license information for the file jvmti.h follows.
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
*/
#ifndef ART_RUNTIME_OPENJDKJVMTI_OBJECT_TAGGING_H_
@@ -20,62 +35,28 @@
#include <unordered_map>
#include "base/mutex.h"
-#include "gc/system_weak.h"
-#include "gc_root-inl.h"
#include "globals.h"
#include "jvmti.h"
+#include "jvmti_weak_table.h"
#include "mirror/object.h"
-#include "thread-inl.h"
namespace openjdkjvmti {
+struct ArtJvmTiEnv;
class EventHandler;
-class ObjectTagTable : public art::gc::SystemWeakHolder {
+class ObjectTagTable FINAL : public JvmtiWeakTable<jlong> {
public:
- explicit ObjectTagTable(EventHandler* event_handler)
- : art::gc::SystemWeakHolder(kTaggingLockLevel),
- update_since_last_sweep_(false),
- event_handler_(event_handler) {
- }
-
- void Add(art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
+ ObjectTagTable(EventHandler* event_handler, ArtJvmTiEnv* env)
+ : event_handler_(event_handler), jvmti_env_(env) {}
- bool Remove(art::mirror::Object* obj, jlong* tag)
+ bool Set(art::mirror::Object* obj, jlong tag) OVERRIDE
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_);
- bool RemoveLocked(art::mirror::Object* obj, jlong* tag)
+ bool SetLocked(art::mirror::Object* obj, jlong tag) OVERRIDE
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_);
- bool Set(art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
- bool SetLocked(art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool GetTag(art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- art::MutexLock mu(self, allow_disallow_lock_);
- Wait(self);
-
- return GetTagLocked(self, obj, result);
- }
- bool GetTagLocked(art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_) {
- art::Thread* self = art::Thread::Current();
- allow_disallow_lock_.AssertHeld(self);
- Wait(self);
-
- return GetTagLocked(self, obj, result);
- }
-
jlong GetTagOrZero(art::mirror::Object* obj)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
@@ -91,109 +72,13 @@ class ObjectTagTable : public art::gc::SystemWeakHolder {
return tmp;
}
- void Sweep(art::IsMarkedVisitor* visitor)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
-
- jvmtiError GetTaggedObjects(jvmtiEnv* jvmti_env,
- jint tag_count,
- const jlong* tags,
- jint* count_ptr,
- jobject** object_result_ptr,
- jlong** tag_result_ptr)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
-
- void Lock() ACQUIRE(allow_disallow_lock_);
- void Unlock() RELEASE(allow_disallow_lock_);
- void AssertLocked() ASSERT_CAPABILITY(allow_disallow_lock_);
+ protected:
+ bool DoesHandleNullOnSweep() OVERRIDE;
+ void HandleNullSweep(jlong tag) OVERRIDE;
private:
- bool SetLocked(art::Thread* self, art::mirror::Object* obj, jlong tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool RemoveLocked(art::Thread* self, art::mirror::Object* obj, jlong* tag)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- bool GetTagLocked(art::Thread* self, art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_) {
- auto it = tagged_objects_.find(art::GcRoot<art::mirror::Object>(obj));
- if (it != tagged_objects_.end()) {
- *result = it->second;
- return true;
- }
-
- if (art::kUseReadBarrier &&
- self != nullptr &&
- self->GetIsGcMarking() &&
- !update_since_last_sweep_) {
- return GetTagSlowPath(self, obj, result);
- }
-
- return false;
- }
-
- // Slow-path for GetTag. We didn't find the object, but we might be storing from-pointers and
- // are asked to retrieve with a to-pointer.
- bool GetTagSlowPath(art::Thread* self, art::mirror::Object* obj, jlong* result)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- // Update the table by doing read barriers on each element, ensuring that to-space pointers
- // are stored.
- void UpdateTableWithReadBarrier()
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- template <bool kHandleNull>
- void SweepImpl(art::IsMarkedVisitor* visitor)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!allow_disallow_lock_);
- void HandleNullSweep(jlong tag);
-
- enum TableUpdateNullTarget {
- kIgnoreNull,
- kRemoveNull,
- kCallHandleNull
- };
-
- template <typename T, TableUpdateNullTarget kTargetNull>
- void UpdateTableWith(T& updater)
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(allow_disallow_lock_);
-
- struct HashGcRoot {
- size_t operator()(const art::GcRoot<art::mirror::Object>& r) const
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return reinterpret_cast<uintptr_t>(r.Read<art::kWithoutReadBarrier>());
- }
- };
-
- struct EqGcRoot {
- bool operator()(const art::GcRoot<art::mirror::Object>& r1,
- const art::GcRoot<art::mirror::Object>& r2) const
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>();
- }
- };
-
- // The tag table is used when visiting roots. So it needs to have a low lock level.
- static constexpr art::LockLevel kTaggingLockLevel =
- static_cast<art::LockLevel>(art::LockLevel::kAbortLock + 1);
-
- std::unordered_map<art::GcRoot<art::mirror::Object>,
- jlong,
- HashGcRoot,
- EqGcRoot> tagged_objects_
- GUARDED_BY(allow_disallow_lock_)
- GUARDED_BY(art::Locks::mutator_lock_);
- // To avoid repeatedly scanning the whole table, remember if we did that since the last sweep.
- bool update_since_last_sweep_;
-
EventHandler* event_handler_;
+ ArtJvmTiEnv* jvmti_env_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc
index 4282e38b17..e94c4e6112 100644
--- a/runtime/openjdkjvmti/ti_class.cc
+++ b/runtime/openjdkjvmti/ti_class.cc
@@ -43,6 +43,7 @@
#include "common_throws.h"
#include "dex_file_annotations.h"
#include "events-inl.h"
+#include "fixed_up_dex_file.h"
#include "gc/heap.h"
#include "gc_root.h"
#include "handle.h"
@@ -55,6 +56,8 @@
#include "mirror/object_reference.h"
#include "mirror/object-inl.h"
#include "mirror/reference.h"
+#include "primitive.h"
+#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -62,6 +65,7 @@
#include "thread-inl.h"
#include "thread_list.h"
#include "ti_class_loader.h"
+#include "ti_phase.h"
#include "ti_redefine.h"
#include "utils.h"
@@ -77,9 +81,9 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
REQUIRES_SHARED(art::Locks::mutator_lock_) {
// Make the mmap
std::string error_msg;
+ art::ArraySlice<const unsigned char> final_data(final_dex_data, final_len);
std::unique_ptr<art::MemMap> map(Redefiner::MoveDataToMemMap(orig_location,
- final_len,
- final_dex_data,
+ final_data,
&error_msg));
if (map.get() == nullptr) {
LOG(WARNING) << "Unable to allocate mmap for redefined dex file! Error was: " << error_msg;
@@ -142,12 +146,26 @@ struct ClassCallback : public art::ClassLoadCallback {
// It is a primitive or array. Just return
return;
}
+ jvmtiPhase phase = PhaseUtil::GetPhaseUnchecked();
+ if (UNLIKELY(phase != JVMTI_PHASE_START && phase != JVMTI_PHASE_LIVE)) {
+ // We want to wait until we are at least in the START phase so that all WellKnownClasses and
+ // mirror classes have been initialized and loaded. The runtime relies on these classes having
+ // specific fields and methods present. Since PreDefine hooks don't need to abide by this
+ // restriction we will simply not send the event for these classes.
+ LOG(WARNING) << "Ignoring load of class <" << descriptor << "> as it is being loaded during "
+ << "runtime initialization.";
+ return;
+ }
+
+ // Strip the 'L' and ';' from the descriptor
std::string name(std::string(descriptor).substr(1, strlen(descriptor) - 2));
art::Thread* self = art::Thread::Current();
art::JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jobject> loader(
env, class_loader.IsNull() ? nullptr : env->AddLocalReference<jobject>(class_loader.Get()));
+ std::unique_ptr<FixedUpDexFile> dex_file_copy(FixedUpDexFile::Create(initial_dex_file));
+
// Go back to native.
art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
// Call all Non-retransformable agents.
@@ -161,14 +179,14 @@ struct ClassCallback : public art::ClassLoadCallback {
loader.get(),
name.c_str(),
static_cast<jobject>(nullptr), // Android doesn't seem to have protection domains
- static_cast<jint>(initial_dex_file.Size()),
- static_cast<const unsigned char*>(initial_dex_file.Begin()),
+ static_cast<jint>(dex_file_copy->Size()),
+ static_cast<const unsigned char*>(dex_file_copy->Begin()),
static_cast<jint*>(&post_no_redefine_len),
static_cast<unsigned char**>(&post_no_redefine_dex_data));
if (post_no_redefine_dex_data == nullptr) {
DCHECK_EQ(post_no_redefine_len, 0);
- post_no_redefine_dex_data = const_cast<unsigned char*>(initial_dex_file.Begin());
- post_no_redefine_len = initial_dex_file.Size();
+ post_no_redefine_dex_data = const_cast<unsigned char*>(dex_file_copy->Begin());
+ post_no_redefine_len = dex_file_copy->Size();
} else {
post_no_redefine_unique_ptr = std::unique_ptr<const unsigned char>(post_no_redefine_dex_data);
DCHECK_GT(post_no_redefine_len, 0);
@@ -197,7 +215,7 @@ struct ClassCallback : public art::ClassLoadCallback {
DCHECK_GT(final_len, 0);
}
- if (final_dex_data != initial_dex_file.Begin()) {
+ if (final_dex_data != dex_file_copy->Begin()) {
LOG(WARNING) << "Changing class " << descriptor;
art::ScopedObjectAccess soa(self);
art::StackHandleScope<2> hs(self);
@@ -215,14 +233,22 @@ struct ClassCallback : public art::ClassLoadCallback {
}
// Allocate the byte array to store the dex file bytes in.
- art::Handle<art::mirror::ByteArray> arr(hs.NewHandle(
- art::mirror::ByteArray::AllocateAndFill(
- self,
- reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
- post_no_redefine_len)));
+ art::MutableHandle<art::mirror::Object> arr(hs.NewHandle<art::mirror::Object>(nullptr));
+ if (post_no_redefine_dex_data == dex_file_copy->Begin() && name != "java/lang/Long") {
+ // we didn't have any non-retransformable agents. We can just cache a pointer to the
+ // initial_dex_file. It will be kept live by the class_loader.
+ jlong dex_ptr = reinterpret_cast<uintptr_t>(&initial_dex_file);
+ art::JValue val;
+ val.SetJ(dex_ptr);
+ arr.Assign(art::BoxPrimitive(art::Primitive::kPrimLong, val));
+ } else {
+ arr.Assign(art::mirror::ByteArray::AllocateAndFill(
+ self,
+ reinterpret_cast<const signed char*>(post_no_redefine_dex_data),
+ post_no_redefine_len));
+ }
if (arr.IsNull()) {
- LOG(WARNING) << "Unable to allocate byte array for initial dex-file bytes. Aborting "
- << "transformation";
+ LOG(WARNING) << "Unable to allocate memory for initial dex-file. Aborting transformation";
self->AssertPendingOOMException();
return;
}
@@ -246,7 +272,7 @@ struct ClassCallback : public art::ClassLoadCallback {
}
// Actually set the ClassExt's original bytes once we have actually succeeded.
- ext->SetOriginalDexFileBytes(arr.Get());
+ ext->SetOriginalDexFile(arr.Get());
// Set the return values
*final_class_def = &dex_file->GetClassDef(0);
*final_dex_file = dex_file.release();
diff --git a/runtime/openjdkjvmti/ti_class_definition.cc b/runtime/openjdkjvmti/ti_class_definition.cc
index 2c2a79bc58..153692b2fe 100644
--- a/runtime/openjdkjvmti/ti_class_definition.cc
+++ b/runtime/openjdkjvmti/ti_class_definition.cc
@@ -31,25 +31,145 @@
#include "ti_class_definition.h"
+#include "base/array_slice.h"
#include "dex_file.h"
+#include "fixed_up_dex_file.h"
#include "handle_scope-inl.h"
#include "handle.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "reflection.h"
#include "thread.h"
namespace openjdkjvmti {
-bool ArtClassDefinition::IsModified(art::Thread* self) const {
- if (modified) {
+bool ArtClassDefinition::IsModified() const {
+ // RedefineClasses calls always are 'modified' since they need to change the original_dex_file of
+ // the class.
+ if (redefined_) {
return true;
}
// Check if the dex file we want to set is the same as the current one.
+ // Unfortunately we need to do this check even if no modifications have been done since it could
+ // be that agents were removed in the mean-time so we still have a different dex file. The dex
+ // checksum means this is likely to be fairly fast.
+ return static_cast<jint>(original_dex_file_.size()) != dex_len_ ||
+ memcmp(&original_dex_file_.At(0), dex_data_.get(), dex_len_) != 0;
+}
+
+jvmtiError ArtClassDefinition::InitCommon(ArtJvmTiEnv* env, jclass klass) {
+ JNIEnv* jni_env = GetJniEnv(env);
+ if (jni_env == nullptr) {
+ return ERR(INTERNAL);
+ }
+ art::ScopedObjectAccess soa(jni_env);
+ art::ObjPtr<art::mirror::Class> m_klass(soa.Decode<art::mirror::Class>(klass));
+ if (m_klass.IsNull()) {
+ return ERR(INVALID_CLASS);
+ }
+ klass_ = klass;
+ loader_ = soa.AddLocalReference<jobject>(m_klass->GetClassLoader());
+ std::string descriptor_store;
+ std::string descriptor(m_klass->GetDescriptor(&descriptor_store));
+ name_ = descriptor.substr(1, descriptor.size() - 2);
+ // Android doesn't really have protection domains.
+ protection_domain_ = nullptr;
+ return OK;
+}
+
+// Gets the data surrounding the given class.
+static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
+ art::Handle<art::mirror::Class> klass,
+ /*out*/jint* dex_data_len,
+ /*out*/unsigned char** dex_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::StackHandleScope<3> hs(art::Thread::Current());
+ art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->GetExtData()));
+ const art::DexFile* dex_file = nullptr;
+ if (!ext.IsNull()) {
+ art::Handle<art::mirror::Object> orig_dex(hs.NewHandle(ext->GetOriginalDexFile()));
+ if (!orig_dex.IsNull()) {
+ if (orig_dex->IsArrayInstance()) {
+ DCHECK(orig_dex->GetClass()->GetComponentType()->IsPrimitiveByte());
+ art::Handle<art::mirror::ByteArray> orig_dex_bytes(
+ hs.NewHandle(art::down_cast<art::mirror::ByteArray*>(orig_dex->AsArray())));
+ *dex_data_len = static_cast<jint>(orig_dex_bytes->GetLength());
+ return CopyDataIntoJvmtiBuffer(
+ env,
+ reinterpret_cast<const unsigned char*>(orig_dex_bytes->GetData()),
+ *dex_data_len,
+ /*out*/dex_data);
+ } else if (orig_dex->IsDexCache()) {
+ dex_file = orig_dex->AsDexCache()->GetDexFile();
+ } else {
+ DCHECK_EQ(orig_dex->GetClass()->GetPrimitiveType(), art::Primitive::kPrimLong);
+ art::ObjPtr<art::mirror::Class> prim_long_class(
+ art::Runtime::Current()->GetClassLinker()->GetClassRoot(
+ art::ClassLinker::kPrimitiveLong));
+ art::JValue val;
+ if (!art::UnboxPrimitiveForResult(orig_dex.Get(), prim_long_class, &val)) {
+ // This should never happen.
+ return ERR(INTERNAL);
+ }
+ dex_file = reinterpret_cast<const art::DexFile*>(static_cast<uintptr_t>(val.GetJ()));
+ }
+ }
+ }
+ if (dex_file == nullptr) {
+ dex_file = &klass->GetDexFile();
+ }
+ std::unique_ptr<FixedUpDexFile> fixed_dex_file(FixedUpDexFile::Create(*dex_file));
+ *dex_data_len = static_cast<jint>(fixed_dex_file->Size());
+ return CopyDataIntoJvmtiBuffer(env,
+ fixed_dex_file->Begin(),
+ fixed_dex_file->Size(),
+ /*out*/dex_data);
+}
+
+jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, jclass klass) {
+ jvmtiError res = InitCommon(env, klass);
+ if (res != OK) {
+ return res;
+ }
+ unsigned char* new_data = nullptr;
+ art::Thread* self = art::Thread::Current();
+ art::ScopedObjectAccess soa(self);
art::StackHandleScope<1> hs(self);
- art::Handle<art::mirror::Class> h_klass(hs.NewHandle(self->DecodeJObject(klass)->AsClass()));
- const art::DexFile& cur_dex_file = h_klass->GetDexFile();
- return static_cast<jint>(cur_dex_file.Size()) != dex_len ||
- memcmp(cur_dex_file.Begin(), dex_data.get(), dex_len) != 0;
+ art::Handle<art::mirror::Class> m_klass(hs.NewHandle(self->DecodeJObject(klass)->AsClass()));
+ res = GetDexDataForRetransformation(env, m_klass, &dex_len_, &new_data);
+ if (res != OK) {
+ return res;
+ }
+ dex_data_ = MakeJvmtiUniquePtr(env, new_data);
+ if (m_klass->GetExtData() == nullptr || m_klass->GetExtData()->GetOriginalDexFile() == nullptr) {
+ // We have never redefined class this yet. Keep track of what the (de-quickened) dex file looks
+ // like so we can tell if anything has changed. Really we would like to just always do the
+ // 'else' block but the fact that we de-quickened stuff screws us over.
+ unsigned char* original_data_memory = nullptr;
+ res = CopyDataIntoJvmtiBuffer(env, dex_data_.get(), dex_len_, &original_data_memory);
+ original_dex_file_memory_ = MakeJvmtiUniquePtr(env, original_data_memory);
+ original_dex_file_ = art::ArraySlice<const unsigned char>(original_data_memory, dex_len_);
+ } else {
+ // We know that we have been redefined at least once (there is an original_dex_file set in
+ // the class) so we can just use the current dex file directly.
+ const art::DexFile& dex_file = m_klass->GetDexFile();
+ original_dex_file_ = art::ArraySlice<const unsigned char>(dex_file.Begin(), dex_file.Size());
+ }
+ return res;
+}
+
+jvmtiError ArtClassDefinition::Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def) {
+ jvmtiError res = InitCommon(env, def.klass);
+ if (res != OK) {
+ return res;
+ }
+ unsigned char* new_data = nullptr;
+ original_dex_file_ = art::ArraySlice<const unsigned char>(def.class_bytes, def.class_byte_count);
+ redefined_ = true;
+ dex_len_ = def.class_byte_count;
+ res = CopyDataIntoJvmtiBuffer(env, def.class_bytes, def.class_byte_count, /*out*/ &new_data);
+ dex_data_ = MakeJvmtiUniquePtr(env, new_data);
+ return res;
}
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_class_definition.h b/runtime/openjdkjvmti/ti_class_definition.h
index 3c251d4b44..43d0c3fc62 100644
--- a/runtime/openjdkjvmti/ti_class_definition.h
+++ b/runtime/openjdkjvmti/ti_class_definition.h
@@ -39,37 +39,89 @@ namespace openjdkjvmti {
// A struct that stores data needed for redefining/transforming classes. This structure should only
// even be accessed from a single thread and must not survive past the completion of the
// redefinition/retransformation function that created it.
-struct ArtClassDefinition {
+class ArtClassDefinition {
public:
- jclass klass;
- jobject loader;
- std::string name;
- jobject protection_domain;
- jint dex_len;
- JvmtiUniquePtr<unsigned char> dex_data;
- art::ArraySlice<const unsigned char> original_dex_file;
-
- ArtClassDefinition() = default;
+ ArtClassDefinition()
+ : klass_(nullptr),
+ loader_(nullptr),
+ name_(),
+ protection_domain_(nullptr),
+ dex_len_(0),
+ dex_data_(nullptr),
+ original_dex_file_memory_(nullptr),
+ original_dex_file_(),
+ redefined_(false) {}
+
+ jvmtiError Init(ArtJvmTiEnv* env, jclass klass);
+ jvmtiError Init(ArtJvmTiEnv* env, const jvmtiClassDefinition& def);
+
ArtClassDefinition(ArtClassDefinition&& o) = default;
+ ArtClassDefinition& operator=(ArtClassDefinition&& o) = default;
void SetNewDexData(ArtJvmTiEnv* env, jint new_dex_len, unsigned char* new_dex_data) {
+ DCHECK(IsInitialized());
if (new_dex_data == nullptr) {
return;
- } else if (new_dex_data != dex_data.get() || new_dex_len != dex_len) {
- SetModified();
- dex_len = new_dex_len;
- dex_data = MakeJvmtiUniquePtr(env, new_dex_data);
+ } else if (new_dex_data != dex_data_.get() || new_dex_len != dex_len_) {
+ dex_len_ = new_dex_len;
+ dex_data_ = MakeJvmtiUniquePtr(env, new_dex_data);
}
}
- void SetModified() {
- modified = true;
+ art::ArraySlice<const unsigned char> GetNewOriginalDexFile() const {
+ DCHECK(IsInitialized());
+ if (redefined_) {
+ return original_dex_file_;
+ } else {
+ return art::ArraySlice<const unsigned char>();
+ }
}
- bool IsModified(art::Thread* self) const REQUIRES_SHARED(art::Locks::mutator_lock_);
+ bool IsModified() const;
+
+ bool IsInitialized() const {
+ return klass_ != nullptr;
+ }
+
+ jclass GetClass() const {
+ DCHECK(IsInitialized());
+ return klass_;
+ }
+
+ jobject GetLoader() const {
+ DCHECK(IsInitialized());
+ return loader_;
+ }
+
+ const std::string& GetName() const {
+ DCHECK(IsInitialized());
+ return name_;
+ }
+
+ jobject GetProtectionDomain() const {
+ DCHECK(IsInitialized());
+ return protection_domain_;
+ }
+
+ art::ArraySlice<const unsigned char> GetDexData() const {
+ DCHECK(IsInitialized());
+ return art::ArraySlice<const unsigned char>(dex_data_.get(), dex_len_);
+ }
private:
- bool modified;
+ jvmtiError InitCommon(ArtJvmTiEnv* env, jclass klass);
+
+ jclass klass_;
+ jobject loader_;
+ std::string name_;
+ jobject protection_domain_;
+ jint dex_len_;
+ JvmtiUniquePtr<unsigned char> dex_data_;
+ JvmtiUniquePtr<unsigned char> original_dex_file_memory_;
+ art::ArraySlice<const unsigned char> original_dex_file_;
+ bool redefined_;
+
+ DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index c7294a9b8d..49d9aca46e 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -25,6 +25,7 @@
#include "gc_root-inl.h"
#include "jni_env_ext.h"
#include "jni_internal.h"
+#include "jvmti_weak_table-inl.h"
#include "mirror/class.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
@@ -41,6 +42,21 @@ namespace openjdkjvmti {
namespace {
+struct IndexCache {
+ // The number of interface fields implemented by the class. This is a prefix to all assigned
+ // field indices.
+ size_t interface_fields;
+
+ // It would be nice to also cache the following, but it is complicated to wire up into the
+ // generic visit:
+ // The number of fields in interfaces and superclasses. This is the first index assigned to
+ // fields of the class.
+ // size_t superclass_fields;
+};
+using IndexCachingTable = JvmtiWeakTable<IndexCache>;
+
+static IndexCachingTable gIndexCachingTable;
+
// Report the contents of a string, if a callback is set.
jint ReportString(art::ObjPtr<art::mirror::Object> obj,
jvmtiEnv* env,
@@ -162,6 +178,433 @@ jint ReportPrimitiveArray(art::ObjPtr<art::mirror::Object> obj,
return 0;
}
+template <typename UserData>
+bool VisitorFalse(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field ATTRIBUTE_UNUSED,
+ size_t field_index ATTRIBUTE_UNUSED,
+ UserData* user_data ATTRIBUTE_UNUSED) {
+ return false;
+}
+
+template <typename UserData, bool kCallVisitorOnRecursion>
+class FieldVisitor {
+ public:
+ // Report the contents of a primitive fields of the given object, if a callback is set.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ static bool ReportFields(art::ObjPtr<art::mirror::Object> obj,
+ UserData* user_data,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ FieldVisitor fv(user_data);
+
+ if (obj->IsClass()) {
+ // When visiting a class, we only visit the static fields of the given class. No field of
+ // superclasses is visited.
+ art::ObjPtr<art::mirror::Class> klass = obj->AsClass();
+ // Only report fields on resolved classes. We need valid field data.
+ if (!klass->IsResolved()) {
+ return false;
+ }
+ return fv.ReportFieldsImpl(nullptr,
+ obj->AsClass(),
+ obj->AsClass()->IsInterface(),
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ } else {
+ // See comment above. Just double-checking here, but an instance *should* mean the class was
+ // resolved.
+ DCHECK(obj->GetClass()->IsResolved() || obj->GetClass()->IsErroneousResolved());
+ return fv.ReportFieldsImpl(obj,
+ obj->GetClass(),
+ false,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor);
+ }
+ }
+
+ private:
+ explicit FieldVisitor(UserData* user_data) : user_data_(user_data) {}
+
+ // Report the contents of fields of the given object. If obj is null, report the static fields,
+ // otherwise the instance fields.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsImpl(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Compute the offset of field indices.
+ size_t interface_field_count = CountInterfaceFields(klass);
+
+ size_t tmp;
+ bool aborted = ReportFieldsRecursive(obj,
+ klass,
+ interface_field_count,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &tmp);
+ return aborted;
+ }
+
+ // Visit primitive fields in an object (instance). Return true if the visit was aborted.
+ template <typename StaticPrimitiveVisitor,
+ typename StaticReferenceVisitor,
+ typename InstancePrimitiveVisitor,
+ typename InstanceReferenceVisitor>
+ bool ReportFieldsRecursive(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ size_t interface_fields,
+ bool skip_java_lang_object,
+ StaticPrimitiveVisitor& static_prim_visitor,
+ StaticReferenceVisitor& static_ref_visitor,
+ InstancePrimitiveVisitor& instance_prim_visitor,
+ InstanceReferenceVisitor& instance_ref_visitor,
+ size_t* field_index_out)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(klass != nullptr);
+ size_t field_index;
+ if (klass->GetSuperClass() == nullptr) {
+ // j.l.Object. Start with the fields from interfaces.
+ field_index = interface_fields;
+ if (skip_java_lang_object) {
+ *field_index_out = field_index;
+ return false;
+ }
+ } else {
+ // Report superclass fields.
+ if (kCallVisitorOnRecursion) {
+ if (ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ static_prim_visitor,
+ static_ref_visitor,
+ instance_prim_visitor,
+ instance_ref_visitor,
+ &field_index)) {
+ return true;
+ }
+ } else {
+ // Still call, but with empty visitor. This is required for correct counting.
+ ReportFieldsRecursive(obj,
+ klass->GetSuperClass(),
+ interface_fields,
+ skip_java_lang_object,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ VisitorFalse<UserData>,
+ &field_index);
+ }
+ }
+
+ // Now visit fields for the current klass.
+
+ for (auto& static_field : klass->GetSFields()) {
+ if (static_field.IsPrimitiveType()) {
+ if (static_prim_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (static_ref_visitor(obj,
+ klass,
+ static_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ for (auto& instance_field : klass->GetIFields()) {
+ if (instance_field.IsPrimitiveType()) {
+ if (instance_prim_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ } else {
+ if (instance_ref_visitor(obj,
+ klass,
+ instance_field,
+ field_index,
+ user_data_)) {
+ return true;
+ }
+ }
+ field_index++;
+ }
+
+ *field_index_out = field_index;
+ return false;
+ }
+
+ // Implements a visit of the implemented interfaces of a given class.
+ template <typename T>
+ struct RecursiveInterfaceVisit {
+ static void VisitStatic(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ RecursiveInterfaceVisit rv;
+ rv.Visit(self, klass, visitor);
+ }
+
+ void Visit(art::Thread* self, art::ObjPtr<art::mirror::Class> klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // First visit the parent, to get the order right.
+ // (We do this in preparation for actual visiting of interface fields.)
+ if (klass->GetSuperClass() != nullptr) {
+ Visit(self, klass->GetSuperClass(), visitor);
+ }
+ for (uint32_t i = 0; i != klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> inf_klass =
+ art::mirror::Class::GetDirectInterface(self, klass, i);
+ DCHECK(inf_klass != nullptr);
+ VisitInterface(self, inf_klass, visitor);
+ }
+ }
+
+ void VisitInterface(art::Thread* self, art::ObjPtr<art::mirror::Class> inf_klass, T& visitor)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ auto it = visited_interfaces.find(inf_klass.Ptr());
+ if (it != visited_interfaces.end()) {
+ return;
+ }
+ visited_interfaces.insert(inf_klass.Ptr());
+
+ // Let the visitor know about this one. Note that this order is acceptable, as the ordering
+ // of these fields never matters for known visitors.
+ visitor(inf_klass);
+
+ // Now visit the superinterfaces.
+ for (uint32_t i = 0; i != inf_klass->NumDirectInterfaces(); ++i) {
+ art::ObjPtr<art::mirror::Class> super_inf_klass =
+ art::mirror::Class::GetDirectInterface(self, inf_klass, i);
+ DCHECK(super_inf_klass != nullptr);
+ VisitInterface(self, super_inf_klass, visitor);
+ }
+ }
+
+ std::unordered_set<art::mirror::Class*> visited_interfaces;
+ };
+
+ // Counting interface fields. Note that we cannot use the interface table, as that only contains
+ // "non-marker" interfaces (= interfaces with methods).
+ static size_t CountInterfaceFields(art::ObjPtr<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ // Do we have a cached value?
+ IndexCache tmp;
+ if (gIndexCachingTable.GetTag(klass.Ptr(), &tmp)) {
+ return tmp.interface_fields;
+ }
+
+ size_t count = 0;
+ auto visitor = [&count](art::ObjPtr<art::mirror::Class> inf_klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(inf_klass->IsInterface());
+ DCHECK_EQ(0u, inf_klass->NumInstanceFields());
+ count += inf_klass->NumStaticFields();
+ };
+ RecursiveInterfaceVisit<decltype(visitor)>::VisitStatic(art::Thread::Current(), klass, visitor);
+
+ // Store this into the cache.
+ tmp.interface_fields = count;
+ gIndexCachingTable.Set(klass.Ptr(), tmp);
+
+ return count;
+ }
+
+ UserData* user_data_;
+};
+
+// Debug helper. Prints the structure of an object.
+template <bool kStatic, bool kRef>
+struct DumpVisitor {
+ static bool Callback(art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ LOG(ERROR) << (kStatic ? "static " : "instance ")
+ << (kRef ? "ref " : "primitive ")
+ << field.PrettyField()
+ << " @ "
+ << field_index;
+ return false;
+ }
+};
+ATTRIBUTE_UNUSED
+void DumpObjectFields(art::ObjPtr<art::mirror::Object> obj)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (obj->IsClass()) {
+ FieldVisitor<void, false>:: ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ } else {
+ FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ DumpVisitor<true, false>::Callback,
+ DumpVisitor<true, true>::Callback,
+ DumpVisitor<false, false>::Callback,
+ DumpVisitor<false, true>::Callback);
+ }
+}
+
+class ReportPrimitiveField {
+ public:
+ static bool Report(art::ObjPtr<art::mirror::Object> obj,
+ ObjectTagTable* tag_table,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (UNLIKELY(cb->primitive_field_callback != nullptr)) {
+ jlong class_tag = tag_table->GetTagOrZero(obj->GetClass());
+ ReportPrimitiveField rpf(tag_table, class_tag, cb, user_data);
+ if (obj->IsClass()) {
+ return FieldVisitor<ReportPrimitiveField, false>::ReportFields(
+ obj,
+ &rpf,
+ ReportPrimitiveFieldCallback<true>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>);
+ } else {
+ return FieldVisitor<ReportPrimitiveField, true>::ReportFields(
+ obj,
+ &rpf,
+ VisitorFalse<ReportPrimitiveField>,
+ VisitorFalse<ReportPrimitiveField>,
+ ReportPrimitiveFieldCallback<false>,
+ VisitorFalse<ReportPrimitiveField>);
+ }
+ }
+ return false;
+ }
+
+
+ private:
+ ReportPrimitiveField(ObjectTagTable* tag_table,
+ jlong class_tag,
+ const jvmtiHeapCallbacks* cb,
+ const void* user_data)
+ : tag_table_(tag_table), class_tag_(class_tag), cb_(cb), user_data_(user_data) {}
+
+ template <bool kReportStatic>
+ static bool ReportPrimitiveFieldCallback(art::ObjPtr<art::mirror::Object> obj,
+ art::ObjPtr<art::mirror::Class> klass,
+ art::ArtField& field,
+ size_t field_index,
+ ReportPrimitiveField* user_data)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::Primitive::Type art_prim_type = field.GetTypeAsPrimitiveType();
+ jvmtiPrimitiveType prim_type =
+ static_cast<jvmtiPrimitiveType>(art::Primitive::Descriptor(art_prim_type)[0]);
+ DCHECK(prim_type == JVMTI_PRIMITIVE_TYPE_BOOLEAN ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_BYTE ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_CHAR ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_SHORT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_INT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_LONG ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_FLOAT ||
+ prim_type == JVMTI_PRIMITIVE_TYPE_DOUBLE);
+ jvmtiHeapReferenceInfo info;
+ info.field.index = field_index;
+
+ jvalue value;
+ memset(&value, 0, sizeof(jvalue));
+ art::ObjPtr<art::mirror::Object> src = kReportStatic ? klass : obj;
+ switch (art_prim_type) {
+ case art::Primitive::Type::kPrimBoolean:
+ value.z = field.GetBoolean(src) == 0 ? JNI_FALSE : JNI_TRUE;
+ break;
+ case art::Primitive::Type::kPrimByte:
+ value.b = field.GetByte(src);
+ break;
+ case art::Primitive::Type::kPrimChar:
+ value.c = field.GetChar(src);
+ break;
+ case art::Primitive::Type::kPrimShort:
+ value.s = field.GetShort(src);
+ break;
+ case art::Primitive::Type::kPrimInt:
+ value.i = field.GetInt(src);
+ break;
+ case art::Primitive::Type::kPrimLong:
+ value.j = field.GetLong(src);
+ break;
+ case art::Primitive::Type::kPrimFloat:
+ value.f = field.GetFloat(src);
+ break;
+ case art::Primitive::Type::kPrimDouble:
+ value.d = field.GetDouble(src);
+ break;
+ case art::Primitive::Type::kPrimVoid:
+ case art::Primitive::Type::kPrimNot: {
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+ }
+ }
+
+ jlong obj_tag = user_data->tag_table_->GetTagOrZero(src.Ptr());
+ const jlong saved_obj_tag = obj_tag;
+
+ jint ret = user_data->cb_->primitive_field_callback(kReportStatic
+ ? JVMTI_HEAP_REFERENCE_STATIC_FIELD
+ : JVMTI_HEAP_REFERENCE_FIELD,
+ &info,
+ user_data->class_tag_,
+ &obj_tag,
+ value,
+ prim_type,
+ const_cast<void*>(user_data->user_data_));
+
+ if (saved_obj_tag != obj_tag) {
+ user_data->tag_table_->Set(src.Ptr(), obj_tag);
+ }
+
+ if ((ret & JVMTI_VISIT_ABORT) != 0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ ObjectTagTable* tag_table_;
+ jlong class_tag_;
+ const jvmtiHeapCallbacks* cb_;
+ const void* user_data_;
+};
+
struct HeapFilter {
explicit HeapFilter(jint heap_filter)
: filter_out_tagged((heap_filter & JVMTI_HEAP_FILTER_TAGGED) != 0),
@@ -200,6 +643,14 @@ struct HeapFilter {
} // namespace
+void HeapUtil::Register() {
+ art::Runtime::Current()->AddSystemWeakHolder(&gIndexCachingTable);
+}
+
+void HeapUtil::Unregister() {
+ art::Runtime::Current()->RemoveSystemWeakHolder(&gIndexCachingTable);
+}
+
struct IterateThroughHeapData {
IterateThroughHeapData(HeapUtil* _heap_util,
jvmtiEnv* _env,
@@ -292,7 +743,12 @@ static void IterateThroughHeapObjectCallback(art::mirror::Object* obj, void* arg
ithd->stop_reports = (array_ret & JVMTI_VISIT_ABORT) != 0;
}
- // TODO Implement primitive field callback.
+ if (!ithd->stop_reports) {
+ ithd->stop_reports = ReportPrimitiveField::Report(obj,
+ ithd->heap_util->GetTags(),
+ ithd->callbacks,
+ ithd->user_data);
+ }
}
jvmtiError HeapUtil::IterateThroughHeap(jvmtiEnv* env,
@@ -426,12 +882,17 @@ class FollowReferencesHelper FINAL {
void AddRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ if (stop_reports_) {
+ return;
+ }
+ bool add_to_worklist = ReportRoot(root_obj, info);
// We use visited_ to mark roots already so we do not need another set.
if (visited_->find(root_obj) == visited_->end()) {
visited_->insert(root_obj);
- worklist_->push_back(root_obj);
+ if (add_to_worklist) {
+ worklist_->push_back(root_obj);
+ }
}
- ReportRoot(root_obj, info);
}
// Remove NO_THREAD_SAFETY_ANALYSIS once ASSERT_CAPABILITY works correctly.
@@ -537,7 +998,7 @@ class FollowReferencesHelper FINAL {
UNREACHABLE();
}
- void ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
+ bool ReportRoot(art::mirror::Object* root_obj, const art::RootInfo& info)
REQUIRES_SHARED(art::Locks::mutator_lock_)
REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
jvmtiHeapReferenceInfo ref_info;
@@ -546,6 +1007,7 @@ class FollowReferencesHelper FINAL {
if ((result & JVMTI_VISIT_ABORT) != 0) {
stop_reports_ = true;
}
+ return (result & JVMTI_VISIT_OBJECTS) != 0;
}
private:
@@ -568,64 +1030,49 @@ class FollowReferencesHelper FINAL {
return;
}
- // TODO: We'll probably have to rewrite this completely with our own visiting logic, if we
- // want to have a chance of getting the field indices computed halfway efficiently. For
- // now, ignore them altogether.
-
- struct InstanceReferenceVisitor {
- explicit InstanceReferenceVisitor(FollowReferencesHelper* helper_)
- : helper(helper_), stop_reports(false) {}
-
- void operator()(art::mirror::Object* src,
- art::MemberOffset field_offset,
- bool is_static ATTRIBUTE_UNUSED) const
- REQUIRES_SHARED(art::Locks::mutator_lock_)
- REQUIRES(!*helper->tag_table_->GetAllowDisallowLock()) {
- if (stop_reports) {
- return;
- }
-
- art::mirror::Object* trg = src->GetFieldObjectReferenceAddr(field_offset)->AsMirrorPtr();
+ // All instance fields.
+ auto report_instance_field = [&](art::ObjPtr<art::mirror::Object> src,
+ art::ObjPtr<art::mirror::Class> obj_klass ATTRIBUTE_UNUSED,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(src);
+ if (field_value != nullptr) {
jvmtiHeapReferenceInfo reference_info;
memset(&reference_info, 0, sizeof(reference_info));
- // TODO: Implement spec-compliant numbering.
- reference_info.field.index = field_offset.Int32Value();
+ reference_info.field.index = field_index;
jvmtiHeapReferenceKind kind =
- field_offset.Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
+ field.GetOffset().Int32Value() == art::mirror::Object::ClassOffset().Int32Value()
? JVMTI_HEAP_REFERENCE_CLASS
: JVMTI_HEAP_REFERENCE_FIELD;
const jvmtiHeapReferenceInfo* reference_info_ptr =
kind == JVMTI_HEAP_REFERENCE_CLASS ? nullptr : &reference_info;
- stop_reports = !helper->ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src, trg);
- }
-
- void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED)
- const {
- LOG(FATAL) << "Unreachable";
+ return !ReportReferenceMaybeEnqueue(kind, reference_info_ptr, src.Ptr(), field_value.Ptr());
}
- void VisitRootIfNonNull(
- art::mirror::CompressedReference<art::mirror::Object>* root ATTRIBUTE_UNUSED) const {
- LOG(FATAL) << "Unreachable";
- }
-
- // "mutable" required by the visitor API.
- mutable FollowReferencesHelper* helper;
- mutable bool stop_reports;
+ return false;
};
+ stop_reports_ = FieldVisitor<void, true>::ReportFields(obj,
+ nullptr,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ VisitorFalse<void>,
+ report_instance_field);
+ if (stop_reports_) {
+ return;
+ }
- InstanceReferenceVisitor visitor(this);
- // Visit references, not native roots.
- obj->VisitReferences<false>(visitor, art::VoidFunctor());
-
- stop_reports_ = visitor.stop_reports;
-
- if (!stop_reports_) {
- jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
- stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ jint string_ret = ReportString(obj, env, tag_table_, callbacks_, user_data_);
+ stop_reports_ = (string_ret & JVMTI_VISIT_ABORT) != 0;
+ if (stop_reports_) {
+ return;
}
+
+ stop_reports_ = ReportPrimitiveField::Report(obj, tag_table_, callbacks_, user_data_);
}
void VisitArray(art::mirror::Object* array)
@@ -719,26 +1166,38 @@ class FollowReferencesHelper FINAL {
DCHECK_EQ(h_klass.Get(), klass);
// Declared static fields.
- for (auto& field : klass->GetSFields()) {
- if (!field.IsPrimitiveType()) {
- art::ObjPtr<art::mirror::Object> field_value = field.GetObject(klass);
- if (field_value != nullptr) {
- jvmtiHeapReferenceInfo reference_info;
- memset(&reference_info, 0, sizeof(reference_info));
+ auto report_static_field = [&](art::ObjPtr<art::mirror::Object> obj ATTRIBUTE_UNUSED,
+ art::ObjPtr<art::mirror::Class> obj_klass,
+ art::ArtField& field,
+ size_t field_index,
+ void* user_data ATTRIBUTE_UNUSED)
+ REQUIRES_SHARED(art::Locks::mutator_lock_)
+ REQUIRES(!*tag_table_->GetAllowDisallowLock()) {
+ art::ObjPtr<art::mirror::Object> field_value = field.GetObject(obj_klass);
+ if (field_value != nullptr) {
+ jvmtiHeapReferenceInfo reference_info;
+ memset(&reference_info, 0, sizeof(reference_info));
- // TODO: Implement spec-compliant numbering.
- reference_info.field.index = field.GetOffset().Int32Value();
+ reference_info.field.index = static_cast<jint>(field_index);
- stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
- &reference_info,
- klass,
- field_value.Ptr());
- if (stop_reports_) {
- return;
- }
- }
+ return !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_STATIC_FIELD,
+ &reference_info,
+ obj_klass.Ptr(),
+ field_value.Ptr());
}
+ return false;
+ };
+ stop_reports_ = FieldVisitor<void, false>::ReportFields(klass,
+ nullptr,
+ VisitorFalse<void>,
+ report_static_field,
+ VisitorFalse<void>,
+ VisitorFalse<void>);
+ if (stop_reports_) {
+ return;
}
+
+ stop_reports_ = ReportPrimitiveField::Report(klass, tag_table_, callbacks_, user_data_);
}
void MaybeEnqueue(art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
diff --git a/runtime/openjdkjvmti/ti_heap.h b/runtime/openjdkjvmti/ti_heap.h
index 72ee097566..dccecb4aa3 100644
--- a/runtime/openjdkjvmti/ti_heap.h
+++ b/runtime/openjdkjvmti/ti_heap.h
@@ -49,6 +49,9 @@ class HeapUtil {
return tags_;
}
+ static void Register();
+ static void Unregister();
+
private:
ObjectTagTable* tags_;
};
diff --git a/runtime/openjdkjvmti/ti_phase.cc b/runtime/openjdkjvmti/ti_phase.cc
index e494cb6530..941cf7b73b 100644
--- a/runtime/openjdkjvmti/ti_phase.cc
+++ b/runtime/openjdkjvmti/ti_phase.cc
@@ -40,6 +40,7 @@
#include "scoped_thread_state_change-inl.h"
#include "thread-inl.h"
#include "thread_list.h"
+#include "ti_thread.h"
namespace openjdkjvmti {
@@ -69,6 +70,7 @@ struct PhaseUtil::PhaseCallback : public art::RuntimePhaseCallback {
break;
case RuntimePhase::kInit:
{
+ ThreadUtil::CacheData();
ScopedLocalRef<jthread> thread(GetJniEnv(), GetCurrentJThread());
art::ScopedThreadSuspension sts(art::Thread::Current(), art::ThreadState::kNative);
event_handler->DispatchEvent<ArtJvmtiEvent::kVmInit>(nullptr, GetJniEnv(), thread.get());
@@ -105,6 +107,16 @@ jvmtiError PhaseUtil::GetPhase(jvmtiEnv* env ATTRIBUTE_UNUSED, jvmtiPhase* phase
return ERR(NONE);
}
+bool PhaseUtil::IsLivePhase() {
+ jvmtiPhase now = PhaseUtil::current_phase_;
+ DCHECK(now == JVMTI_PHASE_ONLOAD ||
+ now == JVMTI_PHASE_PRIMORDIAL ||
+ now == JVMTI_PHASE_START ||
+ now == JVMTI_PHASE_LIVE ||
+ now == JVMTI_PHASE_DEAD);
+ return now == JVMTI_PHASE_LIVE;
+}
+
void PhaseUtil::SetToOnLoad() {
DCHECK_EQ(0u, static_cast<size_t>(PhaseUtil::current_phase_));
PhaseUtil::current_phase_ = JVMTI_PHASE_ONLOAD;
@@ -117,6 +129,7 @@ void PhaseUtil::SetToPrimordial() {
void PhaseUtil::SetToLive() {
DCHECK_EQ(static_cast<size_t>(0), static_cast<size_t>(PhaseUtil::current_phase_));
+ ThreadUtil::CacheData();
PhaseUtil::current_phase_ = JVMTI_PHASE_LIVE;
}
diff --git a/runtime/openjdkjvmti/ti_phase.h b/runtime/openjdkjvmti/ti_phase.h
index 851fc27de5..a2c0d114ef 100644
--- a/runtime/openjdkjvmti/ti_phase.h
+++ b/runtime/openjdkjvmti/ti_phase.h
@@ -42,6 +42,7 @@ class EventHandler;
class PhaseUtil {
public:
static jvmtiError GetPhase(jvmtiEnv* env, jvmtiPhase* phase_ptr);
+ static bool IsLivePhase();
static void Register(EventHandler* event_handler);
static void Unregister();
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index c4d20c007e..7d95de823e 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -178,7 +178,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
art::ClassLinker* cl = runtime->GetClassLinker();
auto ptr_size = cl->GetImagePointerSize();
const size_t method_size = art::ArtMethod::Size(ptr_size);
- auto* method_storage = allocator_->Alloc(GetThread(), method_size);
+ auto* method_storage = allocator_->Alloc(art::Thread::Current(), method_size);
CHECK(method_storage != nullptr) << "Unable to allocate storage for obsolete version of '"
<< old_method->PrettyMethod() << "'";
new_obsolete_method = new (method_storage) art::ArtMethod();
@@ -186,6 +186,7 @@ class ObsoleteMethodStackVisitor : public art::StackVisitor {
DCHECK_EQ(new_obsolete_method->GetDeclaringClass(), old_method->GetDeclaringClass());
new_obsolete_method->SetIsObsolete();
new_obsolete_method->SetDontCompile();
+ cl->SetEntryPointsForObsoleteMethod(new_obsolete_method);
obsolete_maps_->RecordObsolete(old_method, new_obsolete_method);
// Update JIT Data structures to point to the new method.
art::jit::Jit* jit = art::Runtime::Current()->GetJit();
@@ -261,13 +262,12 @@ jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class>
// Moves dex data to an anonymous, read-only mmap'd region.
std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& original_location,
- jint data_len,
- const unsigned char* dex_data,
+ art::ArraySlice<const unsigned char> data,
std::string* error_msg) {
std::unique_ptr<art::MemMap> map(art::MemMap::MapAnonymous(
StringPrintf("%s-transformed", original_location.c_str()).c_str(),
nullptr,
- data_len,
+ data.size(),
PROT_READ|PROT_WRITE,
/*low_4gb*/false,
/*reuse*/false,
@@ -275,7 +275,7 @@ std::unique_ptr<art::MemMap> Redefiner::MoveDataToMemMap(const std::string& orig
if (map == nullptr) {
return map;
}
- memcpy(map->Begin(), dex_data, data_len);
+ memcpy(map->Begin(), &data.At(0), data.size());
// Make the dex files mmap read only. This matches how other DexFiles are mmaped and prevents
// programs from corrupting it.
map->Protect(PROT_READ);
@@ -325,25 +325,26 @@ jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> def_vector;
def_vector.reserve(class_count);
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ jvmtiError res = env->IsModifiableClass(definitions[i].klass, &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
// We make a copy of the class_bytes to pass into the retransformation.
// This makes cleanup easier (since we unambiguously own the bytes) and also is useful since we
// will need to keep the original bytes around unaltered for subsequent RetransformClasses calls
// to get the passed in bytes.
unsigned char* class_bytes_copy = nullptr;
- jvmtiError res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
+ res = env->Allocate(definitions[i].class_byte_count, &class_bytes_copy);
if (res != OK) {
return res;
}
memcpy(class_bytes_copy, definitions[i].class_bytes, definitions[i].class_byte_count);
ArtClassDefinition def;
- def.dex_len = definitions[i].class_byte_count;
- def.dex_data = MakeJvmtiUniquePtr(env, class_bytes_copy);
- // We are definitely modified.
- def.SetModified();
- def.original_dex_file = art::ArraySlice<const unsigned char>(definitions[i].class_bytes,
- definitions[i].class_byte_count);
- res = Transformer::FillInTransformationData(env, definitions[i].klass, &def);
+ res = def.Init(env, definitions[i]);
if (res != OK) {
return res;
}
@@ -379,7 +380,7 @@ jvmtiError Redefiner::RedefineClassesDirect(ArtJvmTiEnv* env,
Redefiner r(runtime, self, error_msg);
for (const ArtClassDefinition& def : definitions) {
// Only try to transform classes that have been modified.
- if (def.IsModified(self)) {
+ if (def.IsModified()) {
jvmtiError res = r.AddRedefinition(env, def);
if (res != OK) {
return res;
@@ -392,25 +393,24 @@ jvmtiError Redefiner::RedefineClassesDirect(ArtJvmTiEnv* env,
jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition& def) {
std::string original_dex_location;
jvmtiError ret = OK;
- if ((ret = GetClassLocation(env, def.klass, &original_dex_location))) {
+ if ((ret = GetClassLocation(env, def.GetClass(), &original_dex_location))) {
*error_msg_ = "Unable to get original dex file location!";
return ret;
}
char* generic_ptr_unused = nullptr;
char* signature_ptr = nullptr;
- if ((ret = env->GetClassSignature(def.klass, &signature_ptr, &generic_ptr_unused)) != OK) {
+ if ((ret = env->GetClassSignature(def.GetClass(), &signature_ptr, &generic_ptr_unused)) != OK) {
*error_msg_ = "Unable to get class signature!";
return ret;
}
JvmtiUniquePtr<char> generic_unique_ptr(MakeJvmtiUniquePtr(env, generic_ptr_unused));
JvmtiUniquePtr<char> signature_unique_ptr(MakeJvmtiUniquePtr(env, signature_ptr));
std::unique_ptr<art::MemMap> map(MoveDataToMemMap(original_dex_location,
- def.dex_len,
- def.dex_data.get(),
+ def.GetDexData(),
error_msg_));
std::ostringstream os;
if (map.get() == nullptr) {
- os << "Failed to create anonymous mmap for modified dex file of class " << def.name
+ os << "Failed to create anonymous mmap for modified dex file of class " << def.GetName()
<< "in dex file " << original_dex_location << " because: " << *error_msg_;
*error_msg_ = os.str();
return ERR(OUT_OF_MEMORY);
@@ -427,16 +427,16 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
/*verify_checksum*/true,
error_msg_));
if (dex_file.get() == nullptr) {
- os << "Unable to load modified dex file for " << def.name << ": " << *error_msg_;
+ os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
*error_msg_ = os.str();
return ERR(INVALID_CLASS_FORMAT);
}
redefinitions_.push_back(
Redefiner::ClassRedefinition(this,
- def.klass,
+ def.GetClass(),
dex_file.release(),
signature_ptr,
- def.original_dex_file));
+ def.GetNewOriginalDexFile()));
return OK;
}
@@ -462,7 +462,7 @@ void Redefiner::RecordFailure(jvmtiError result,
result_ = result;
}
-art::mirror::ByteArray* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFileBytes() {
+art::mirror::Object* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFile() {
// If we have been specifically given a new set of bytes use that
if (original_dex_file_.size() != 0) {
return art::mirror::ByteArray::AllocateAndFill(
@@ -474,24 +474,21 @@ art::mirror::ByteArray* Redefiner::ClassRedefinition::AllocateOrGetOriginalDexFi
// See if we already have one set.
art::ObjPtr<art::mirror::ClassExt> ext(GetMirrorClass()->GetExtData());
if (!ext.IsNull()) {
- art::ObjPtr<art::mirror::ByteArray> old_original_bytes(ext->GetOriginalDexFileBytes());
- if (!old_original_bytes.IsNull()) {
+ art::ObjPtr<art::mirror::Object> old_original_dex_file(ext->GetOriginalDexFile());
+ if (!old_original_dex_file.IsNull()) {
// We do. Use it.
- return old_original_bytes.Ptr();
+ return old_original_dex_file.Ptr();
}
}
- // Copy the current dex_file
- const art::DexFile& current_dex_file = GetMirrorClass()->GetDexFile();
+ // return the current dex_cache which has the dex file in it.
+ art::ObjPtr<art::mirror::DexCache> current_dex_cache(GetMirrorClass()->GetDexCache());
// TODO Handle this or make it so it cannot happen.
- if (current_dex_file.NumClassDefs() != 1) {
+ if (current_dex_cache->GetDexFile()->NumClassDefs() != 1) {
LOG(WARNING) << "Current dex file has more than one class in it. Calling RetransformClasses "
<< "on this class might fail if no transformations are applied to it!";
}
- return art::mirror::ByteArray::AllocateAndFill(
- driver_->self_,
- reinterpret_cast<const signed char*>(current_dex_file.Begin()),
- current_dex_file.Size());
+ return current_dex_cache.Ptr();
}
struct CallbackCtx {
@@ -779,6 +776,8 @@ bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
CheckSameMethods();
}
+class RedefinitionDataIter;
+
// A wrapper that lets us hold onto the arbitrary sized data needed for redefinitions in a
// reasonably sane way. This adds no fields to the normal ObjectArray. By doing this we can avoid
// having to deal with the fact that we need to hold an arbitrary number of references live.
@@ -802,13 +801,15 @@ class RedefinitionDataHolder {
RedefinitionDataHolder(art::StackHandleScope<1>* hs,
art::Runtime* runtime,
art::Thread* self,
- int32_t num_redefinitions) REQUIRES_SHARED(art::Locks::mutator_lock_) :
+ std::vector<Redefiner::ClassRedefinition>* redefinitions)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) :
arr_(
hs->NewHandle(
art::mirror::ObjectArray<art::mirror::Object>::Alloc(
self,
runtime->GetClassLinker()->GetClassRoot(art::ClassLinker::kObjectArrayClass),
- num_redefinitions * kNumSlots))) {}
+ redefinitions->size() * kNumSlots))),
+ redefinitions_(redefinitions) {}
bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
return arr_.IsNull();
@@ -836,9 +837,9 @@ class RedefinitionDataHolder {
return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
}
- art::mirror::ByteArray* GetOriginalDexFileBytes(jint klass_index) const
+ art::mirror::Object* GetOriginalDexFile(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::ByteArray*>(GetSlot(klass_index, kSlotOrigDexFile));
+ return art::down_cast<art::mirror::Object*>(GetSlot(klass_index, kSlotOrigDexFile));
}
void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
@@ -861,7 +862,7 @@ class RedefinitionDataHolder {
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotMirrorClass, klass);
}
- void SetOriginalDexFileBytes(jint klass_index, art::mirror::ByteArray* bytes)
+ void SetOriginalDexFile(jint klass_index, art::mirror::Object* bytes)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOrigDexFile, bytes);
}
@@ -870,8 +871,27 @@ class RedefinitionDataHolder {
return arr_->GetLength() / kNumSlots;
}
+ std::vector<Redefiner::ClassRedefinition>* GetRedefinitions()
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return redefinitions_;
+ }
+
+ bool operator==(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return arr_.Get() == other.arr_.Get();
+ }
+
+ bool operator!=(const RedefinitionDataHolder& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter begin() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ RedefinitionDataIter end() REQUIRES_SHARED(art::Locks::mutator_lock_);
+
private:
mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
+ std::vector<Redefiner::ClassRedefinition>* redefinitions_;
art::mirror::Object* GetSlot(jint klass_index,
DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -890,8 +910,115 @@ class RedefinitionDataHolder {
DISALLOW_COPY_AND_ASSIGN(RedefinitionDataHolder);
};
-bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder) {
+class RedefinitionDataIter {
+ public:
+ RedefinitionDataIter(int32_t idx, RedefinitionDataHolder& holder) : idx_(idx), holder_(holder) {}
+
+ RedefinitionDataIter(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter(RedefinitionDataIter&&) = default;
+ RedefinitionDataIter& operator=(const RedefinitionDataIter&) = default;
+ RedefinitionDataIter& operator=(RedefinitionDataIter&&) = default;
+
+ bool operator==(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return idx_ == other.idx_ && holder_ == other.holder_;
+ }
+
+ bool operator!=(const RedefinitionDataIter& other) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return !(*this == other);
+ }
+
+ RedefinitionDataIter operator++() { // Value after modification.
+ idx_++;
+ return *this;
+ }
+
+ RedefinitionDataIter operator++(int) {
+ RedefinitionDataIter temp = *this;
+ idx_++;
+ return temp;
+ }
+
+ RedefinitionDataIter operator+(ssize_t delta) const {
+ RedefinitionDataIter temp = *this;
+ temp += delta;
+ return temp;
+ }
+
+ RedefinitionDataIter& operator+=(ssize_t delta) {
+ idx_ += delta;
+ return *this;
+ }
+
+ Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return (*holder_.GetRedefinitions())[idx_];
+ }
+
+ RedefinitionDataHolder& GetHolder() {
+ return holder_;
+ }
+
+ art::mirror::ClassLoader* GetSourceClassLoader() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetSourceClassLoader(idx_);
+ }
+ art::mirror::Object* GetJavaDexFile() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetJavaDexFile(idx_);
+ }
+ art::mirror::LongArray* GetNewDexFileCookie() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexFileCookie(idx_);
+ }
+ art::mirror::DexCache* GetNewDexCache() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetNewDexCache(idx_);
+ }
+ art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetMirrorClass(idx_);
+ }
+ art::mirror::Object* GetOriginalDexFile() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return holder_.GetOriginalDexFile(idx_);
+ }
+ int32_t GetIndex() const {
+ return idx_;
+ }
+
+ void SetSourceClassLoader(art::mirror::ClassLoader* loader)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetSourceClassLoader(idx_, loader);
+ }
+ void SetJavaDexFile(art::mirror::Object* dexfile) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetJavaDexFile(idx_, dexfile);
+ }
+ void SetNewDexFileCookie(art::mirror::LongArray* cookie)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexFileCookie(idx_, cookie);
+ }
+ void SetNewDexCache(art::mirror::DexCache* cache) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetNewDexCache(idx_, cache);
+ }
+ void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetMirrorClass(idx_, klass);
+ }
+ void SetOriginalDexFile(art::mirror::Object* bytes)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ holder_.SetOriginalDexFile(idx_, bytes);
+ }
+
+ private:
+ int32_t idx_;
+ RedefinitionDataHolder& holder_;
+};
+
+RedefinitionDataIter RedefinitionDataHolder::begin() {
+ return RedefinitionDataIter(0, *this);
+}
+
+RedefinitionDataIter RedefinitionDataHolder::end() {
+ return RedefinitionDataIter(Length(), *this);
+}
+
+bool Redefiner::ClassRedefinition::CheckVerification(const RedefinitionDataIter& iter) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
art::StackHandleScope<2> hs(driver_->self_);
std::string error;
@@ -899,7 +1026,7 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
art::verifier::MethodVerifier::FailureKind failure =
art::verifier::MethodVerifier::VerifyClass(driver_->self_,
dex_file_.get(),
- hs.NewHandle(holder.GetNewDexCache(klass_index)),
+ hs.NewHandle(iter.GetNewDexCache()),
hs.NewHandle(GetClassLoader()),
dex_file_->GetClassDef(0), /*class_def*/
nullptr, /*compiler_callbacks*/
@@ -918,21 +1045,20 @@ bool Redefiner::ClassRedefinition::CheckVerification(int32_t klass_index,
// dexfile. This is so that even if multiple classes with the same classloader are redefined at
// once they are all added to the classloader.
bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::StackHandleScope<2> hs(driver_->self_);
art::MutableHandle<art::mirror::LongArray> old_cookie(
hs.NewHandle<art::mirror::LongArray>(nullptr));
bool has_older_cookie = false;
// See if we already have a cookie that a previous redefinition got from the same classloader.
- for (int32_t i = 0; i < klass_index; i++) {
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
// Since every instance of this classloader should have the same cookie associated with it we
// can stop looking here.
has_older_cookie = true;
- old_cookie.Assign(holder->GetNewDexFileCookie(i));
+ old_cookie.Assign(old_data.GetNewDexFileCookie());
break;
}
}
@@ -953,14 +1079,14 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
// Save the cookie.
- holder->SetNewDexFileCookie(klass_index, new_cookie.Get());
+ cur_data->SetNewDexFileCookie(new_cookie.Get());
// If there are other copies of this same classloader we need to make sure that we all have the
// same cookie.
if (has_older_cookie) {
- for (int32_t i = 0; i < klass_index; i++) {
+ for (auto old_data = cur_data->GetHolder().begin(); old_data != *cur_data; ++old_data) {
// We will let the GC take care of the cookie we allocated for this one.
- if (holder->GetSourceClassLoader(i) == source_class_loader.Get()) {
- holder->SetNewDexFileCookie(i, new_cookie.Get());
+ if (old_data.GetSourceClassLoader() == source_class_loader.Get()) {
+ old_data.SetNewDexFileCookie(new_cookie.Get());
}
}
}
@@ -969,32 +1095,32 @@ bool Redefiner::ClassRedefinition::AllocateAndRememberNewDexFileCookie(
}
bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
- int32_t klass_index, /*out*/RedefinitionDataHolder* holder) {
+ /*out*/RedefinitionDataIter* cur_data) {
art::ScopedObjectAccessUnchecked soa(driver_->self_);
art::StackHandleScope<2> hs(driver_->self_);
- holder->SetMirrorClass(klass_index, GetMirrorClass());
+ cur_data->SetMirrorClass(GetMirrorClass());
// This shouldn't allocate
art::Handle<art::mirror::ClassLoader> loader(hs.NewHandle(GetClassLoader()));
// The bootclasspath is handled specially so it doesn't have a j.l.DexFile.
if (!art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
- holder->SetSourceClassLoader(klass_index, loader.Get());
+ cur_data->SetSourceClassLoader(loader.Get());
art::Handle<art::mirror::Object> dex_file_obj(hs.NewHandle(
ClassLoaderHelper::FindSourceDexFileObject(driver_->self_, loader)));
- holder->SetJavaDexFile(klass_index, dex_file_obj.Get());
+ cur_data->SetJavaDexFile(dex_file_obj.Get());
if (dex_file_obj == nullptr) {
RecordFailure(ERR(INTERNAL), "Unable to find dex file!");
return false;
}
// Allocate the new dex file cookie.
- if (!AllocateAndRememberNewDexFileCookie(klass_index, loader, dex_file_obj, holder)) {
+ if (!AllocateAndRememberNewDexFileCookie(loader, dex_file_obj, cur_data)) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate dex file array for class loader");
return false;
}
}
- holder->SetNewDexCache(klass_index, CreateNewDexCache(loader));
- if (holder->GetNewDexCache(klass_index) == nullptr) {
+ cur_data->SetNewDexCache(CreateNewDexCache(loader));
+ if (cur_data->GetNewDexCache() == nullptr) {
driver_->self_->AssertPendingException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate DexCache");
@@ -1002,8 +1128,8 @@ bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
}
// We won't always need to set this field.
- holder->SetOriginalDexFileBytes(klass_index, AllocateOrGetOriginalDexFileBytes());
- if (holder->GetOriginalDexFileBytes(klass_index) == nullptr) {
+ cur_data->SetOriginalDexFile(AllocateOrGetOriginalDexFile());
+ if (cur_data->GetOriginalDexFile() == nullptr) {
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate array for original dex file");
@@ -1048,13 +1174,11 @@ bool Redefiner::EnsureAllClassAllocationsFinished() {
}
bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
// Allocate the data this redefinition requires.
- if (!redef.FinishRemainingAllocations(cnt, &holder)) {
+ if (!data.GetRedefinition().FinishRemainingAllocations(&data)) {
return false;
}
- cnt++;
}
return true;
}
@@ -1069,22 +1193,39 @@ void Redefiner::ReleaseAllDexFiles() {
}
}
-bool Redefiner::CheckAllClassesAreVerified(const RedefinitionDataHolder& holder) {
- int32_t cnt = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (!redef.CheckVerification(cnt, holder)) {
+bool Redefiner::CheckAllClassesAreVerified(RedefinitionDataHolder& holder) {
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (!data.GetRedefinition().CheckVerification(data)) {
return false;
}
- cnt++;
}
return true;
}
+class ScopedDisableConcurrentAndMovingGc {
+ public:
+ ScopedDisableConcurrentAndMovingGc(art::gc::Heap* heap, art::Thread* self)
+ : heap_(heap), self_(self) {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->IncrementDisableMovingGC(self_);
+ }
+ }
+
+ ~ScopedDisableConcurrentAndMovingGc() {
+ if (heap_->IsGcConcurrentAndMoving()) {
+ heap_->DecrementDisableMovingGC(self_);
+ }
+ }
+ private:
+ art::gc::Heap* heap_;
+ art::Thread* self_;
+};
+
jvmtiError Redefiner::Run() {
art::StackHandleScope<1> hs(self_);
// Allocate an array to hold onto all java temporary objects associated with this redefinition.
// We will let this be collected after the end of this function.
- RedefinitionDataHolder holder(&hs, runtime_, self_, redefinitions_.size());
+ RedefinitionDataHolder holder(&hs, runtime_, self_, &redefinitions_);
if (holder.IsNull()) {
self_->AssertPendingOOMException();
self_->ClearException();
@@ -1107,57 +1248,43 @@ jvmtiError Redefiner::Run() {
// cleaned up by the GC eventually.
return result_;
}
+
// At this point we can no longer fail without corrupting the runtime state.
- int32_t counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
- if (holder.GetSourceClassLoader(counter) == nullptr) {
- runtime_->GetClassLinker()->AppendToBootClassPath(self_, redef.GetDexFile());
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+ if (data.GetSourceClassLoader() == nullptr) {
+ runtime_->GetClassLinker()->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
}
- counter++;
}
UnregisterAllBreakpoints();
+
// Disable GC and wait for it to be done if we are a moving GC. This is fine since we are done
// allocating so no deadlocks.
- art::gc::Heap* heap = runtime_->GetHeap();
- if (heap->IsGcConcurrentAndMoving()) {
- // GC moving objects can cause deadlocks as we are deoptimizing the stack.
- heap->IncrementDisableMovingGC(self_);
- }
+ ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
+
// Do transition to final suspension
// TODO We might want to give this its own suspended state!
// TODO This isn't right. We need to change state without any chance of suspend ideally!
- self_->TransitionFromRunnableToSuspended(art::ThreadState::kNative);
- runtime_->GetThreadList()->SuspendAll(
- "Final installation of redefined Classes!", /*long_suspend*/true);
- counter = 0;
- for (Redefiner::ClassRedefinition& redef : redefinitions_) {
+ art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+ art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend*/true);
+ for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
- if (holder.GetSourceClassLoader(counter) != nullptr) {
- ClassLoaderHelper::UpdateJavaDexFile(holder.GetJavaDexFile(counter),
- holder.GetNewDexFileCookie(counter));
+ ClassRedefinition& redef = data.GetRedefinition();
+ if (data.GetSourceClassLoader() != nullptr) {
+ ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
}
- art::mirror::Class* klass = holder.GetMirrorClass(counter);
+ art::mirror::Class* klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
- redef.UpdateClass(klass, holder.GetNewDexCache(counter),
- holder.GetOriginalDexFileBytes(counter));
- counter++;
+ redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFile());
}
// TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
// are, force a full-world deoptimization before finishing redefinition. If we don't do this then
// methods that have been jitted prior to the current redefinition being applied might continue
// to use the old versions of the intrinsics!
// TODO Shrink the obsolete method maps if possible?
- // TODO Put this into a scoped thing.
- runtime_->GetThreadList()->ResumeAll();
- // Get back shared mutator lock as expected for return.
- self_->TransitionFromSuspendedToRunnable();
// TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
// owns the DexFile and when ownership is transferred.
ReleaseAllDexFiles();
- if (heap->IsGcConcurrentAndMoving()) {
- heap->DecrementDisableMovingGC(self_);
- }
return OK;
}
@@ -1228,7 +1355,7 @@ void Redefiner::ClassRedefinition::UpdateFields(art::ObjPtr<art::mirror::Class>
void Redefiner::ClassRedefinition::UpdateClass(
art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- art::ObjPtr<art::mirror::ByteArray> original_dex_file) {
+ art::ObjPtr<art::mirror::Object> original_dex_file) {
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
const art::DexFile::ClassDef& class_def = dex_file_->GetClassDef(0);
UpdateMethods(mclass, new_dex_cache, class_def);
@@ -1242,7 +1369,7 @@ void Redefiner::ClassRedefinition::UpdateClass(
mclass->SetDexTypeIndex(dex_file_->GetIndexForTypeId(*dex_file_->FindTypeId(class_sig_.c_str())));
art::ObjPtr<art::mirror::ClassExt> ext(mclass->GetExtData());
CHECK(!ext.IsNull());
- ext->SetOriginalDexFileBytes(original_dex_file);
+ ext->SetOriginalDexFile(original_dex_file);
}
// This function does all (java) allocations we need to do for the Class being redefined.
@@ -1259,8 +1386,6 @@ bool Redefiner::ClassRedefinition::EnsureClassAllocationsFinished() {
art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
if (ext == nullptr) {
// No memory. Clear exception (it's not useful) and return error.
- // TODO This doesn't need to be fatal. We could just not support obsolete methods after hitting
- // this case.
driver_->self_->AssertPendingOOMException();
driver_->self_->ClearException();
RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 4e6d05f056..809a681902 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -66,6 +66,7 @@
namespace openjdkjvmti {
class RedefinitionDataHolder;
+class RedefinitionDataIter;
// Class that can redefine a single class's methods.
// TODO We should really make this be driven by an outside class so we can do multiple classes at
@@ -98,8 +99,7 @@ class Redefiner {
static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
static std::unique_ptr<art::MemMap> MoveDataToMemMap(const std::string& original_location,
- jint data_len,
- const unsigned char* dex_data,
+ art::ArraySlice<const unsigned char> data,
std::string* error_msg);
private:
@@ -136,21 +136,20 @@ class Redefiner {
REQUIRES_SHARED(art::Locks::mutator_lock_);
// This may return nullptr with a OOME pending if allocation fails.
- art::mirror::ByteArray* AllocateOrGetOriginalDexFileBytes()
+ art::mirror::Object* AllocateOrGetOriginalDexFile()
REQUIRES_SHARED(art::Locks::mutator_lock_);
void RecordFailure(jvmtiError e, const std::string& err) {
driver_->RecordFailure(e, class_sig_, err);
}
- bool FinishRemainingAllocations(int32_t klass_index, /*out*/RedefinitionDataHolder* holder)
+ bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool AllocateAndRememberNewDexFileCookie(
- int32_t klass_index,
art::Handle<art::mirror::ClassLoader> source_class_loader,
art::Handle<art::mirror::Object> dex_file_obj,
- /*out*/RedefinitionDataHolder* holder)
+ /*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
@@ -161,8 +160,7 @@ class Redefiner {
bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
// Checks that the contained class can be successfully verified.
- bool CheckVerification(int32_t klass_index,
- const RedefinitionDataHolder& holder)
+ bool CheckVerification(const RedefinitionDataIter& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
// Preallocates all needed allocations in klass so that we can pause execution safely.
@@ -197,7 +195,7 @@ class Redefiner {
void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
art::ObjPtr<art::mirror::DexCache> new_dex_cache,
- art::ObjPtr<art::mirror::ByteArray> original_dex_file)
+ art::ObjPtr<art::mirror::Object> original_dex_file)
REQUIRES(art::Locks::mutator_lock_);
void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -241,7 +239,7 @@ class Redefiner {
jvmtiError Run() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
- bool CheckAllClassesAreVerified(const RedefinitionDataHolder& holder)
+ bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
REQUIRES_SHARED(art::Locks::mutator_lock_);
bool EnsureAllClassAllocationsFinished() REQUIRES_SHARED(art::Locks::mutator_lock_);
bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
@@ -255,6 +253,8 @@ class Redefiner {
}
friend struct CallbackCtx;
+ friend class RedefinitionDataHolder;
+ friend class RedefinitionDataIter;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index 788ac30873..e5ff090ddf 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -44,6 +44,7 @@
#include "mirror/object-inl.h"
#include "mirror/string.h"
#include "obj_ptr.h"
+#include "ti_phase.h"
#include "runtime.h"
#include "runtime_callbacks.h"
#include "ScopedLocalRef.h"
@@ -54,6 +55,8 @@
namespace openjdkjvmti {
+art::ArtField* ThreadUtil::context_class_loader_ = nullptr;
+
struct ThreadCallback : public art::ThreadLifecycleCallback, public art::RuntimePhaseCallback {
jthread GetThreadObject(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
if (self->GetPeer() == nullptr) {
@@ -121,6 +124,16 @@ void ThreadUtil::Register(EventHandler* handler) {
runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gThreadCallback);
}
+void ThreadUtil::CacheData() {
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ art::ObjPtr<art::mirror::Class> thread_class =
+ soa.Decode<art::mirror::Class>(art::WellKnownClasses::java_lang_Thread);
+ CHECK(thread_class != nullptr);
+ context_class_loader_ = thread_class->FindDeclaredInstanceField("contextClassLoader",
+ "Ljava/lang/ClassLoader;");
+ CHECK(context_class_loader_ != nullptr);
+}
+
void ThreadUtil::Unregister() {
art::ScopedThreadStateChange stsc(art::Thread::Current(),
art::ThreadState::kWaitingForDebuggerToAttach);
@@ -146,22 +159,6 @@ jvmtiError ThreadUtil::GetCurrentThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread*
return ERR(NONE);
}
-// Read the context classloader from a Java thread object. This is a lazy implementation
-// that assumes GetThreadInfo isn't called too often. If we instead cache the ArtField,
-// we will have to add synchronization as this can't be cached on startup (which is
-// potentially runtime startup).
-static art::ObjPtr<art::mirror::Object> GetContextClassLoader(art::ObjPtr<art::mirror::Object> peer)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- if (peer == nullptr) {
- return nullptr;
- }
- art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
- art::ArtField* cc_field = klass->FindDeclaredInstanceField("contextClassLoader",
- "Ljava/lang/ClassLoader;");
- CHECK(cc_field != nullptr);
- return cc_field->GetObject(peer);
-}
-
// Get the native thread. The spec says a null object denotes the current thread.
static art::Thread* GetNativeThread(jthread thread,
const art::ScopedObjectAccessAlreadyRunnable& soa)
@@ -178,6 +175,9 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
if (info_ptr == nullptr) {
return ERR(NULL_POINTER);
}
+ if (!PhaseUtil::IsLivePhase()) {
+ return JVMTI_ERROR_WRONG_PHASE;
+ }
art::ScopedObjectAccess soa(art::Thread::Current());
@@ -217,7 +217,10 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
}
// Context classloader.
- art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ DCHECK(context_class_loader_ != nullptr);
+ art::ObjPtr<art::mirror::Object> ccl = peer != nullptr
+ ? context_class_loader_->GetObject(peer)
+ : nullptr;
info_ptr->context_class_loader = ccl == nullptr
? nullptr
: soa.AddLocalReference<jobject>(ccl);
@@ -272,7 +275,10 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
}
// Context classloader.
- art::ObjPtr<art::mirror::Object> ccl = GetContextClassLoader(peer);
+ DCHECK(context_class_loader_ != nullptr);
+ art::ObjPtr<art::mirror::Object> ccl = peer != nullptr
+ ? context_class_loader_->GetObject(peer)
+ : nullptr;
info_ptr->context_class_loader = ccl == nullptr
? nullptr
: soa.AddLocalReference<jobject>(ccl);
diff --git a/runtime/openjdkjvmti/ti_thread.h b/runtime/openjdkjvmti/ti_thread.h
index f6f93ee91a..c7f75d8aec 100644
--- a/runtime/openjdkjvmti/ti_thread.h
+++ b/runtime/openjdkjvmti/ti_thread.h
@@ -35,6 +35,10 @@
#include "jni.h"
#include "jvmti.h"
+namespace art {
+class ArtField;
+}
+
namespace openjdkjvmti {
class EventHandler;
@@ -44,6 +48,9 @@ class ThreadUtil {
static void Register(EventHandler* event_handler);
static void Unregister();
+ // To be called when it is safe to cache data.
+ static void CacheData();
+
static jvmtiError GetAllThreads(jvmtiEnv* env, jint* threads_count_ptr, jthread** threads_ptr);
static jvmtiError GetCurrentThread(jvmtiEnv* env, jthread* thread_ptr);
@@ -60,6 +67,9 @@ class ThreadUtil {
jvmtiStartFunction proc,
const void* arg,
jint priority);
+
+ private:
+ static art::ArtField* context_class_loader_;
};
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.cc b/runtime/openjdkjvmti/transform.cc
index 36421b9137..15d8dd0fc2 100644
--- a/runtime/openjdkjvmti/transform.cc
+++ b/runtime/openjdkjvmti/transform.cc
@@ -42,6 +42,7 @@
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
+#include "jvalue.h"
#include "jvmti.h"
#include "linear_alloc.h"
#include "mem_map.h"
@@ -69,17 +70,18 @@ jvmtiError Transformer::RetransformClassesDirect(
for (ArtClassDefinition& def : *definitions) {
jint new_len = -1;
unsigned char* new_data = nullptr;
+ art::ArraySlice<const unsigned char> dex_data = def.GetDexData();
event_handler->DispatchEvent<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
self,
GetJniEnv(env),
- def.klass,
- def.loader,
- def.name.c_str(),
- def.protection_domain,
- def.dex_len,
- static_cast<const unsigned char*>(def.dex_data.get()),
- &new_len,
- &new_data);
+ def.GetClass(),
+ def.GetLoader(),
+ def.GetName().c_str(),
+ def.GetProtectionDomain(),
+ static_cast<jint>(dex_data.size()),
+ &dex_data.At(0),
+ /*out*/&new_len,
+ /*out*/&new_data);
def.SetNewDexData(env, new_len, new_data);
}
return OK;
@@ -109,8 +111,15 @@ jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
std::vector<ArtClassDefinition> definitions;
jvmtiError res = OK;
for (jint i = 0; i < class_count; i++) {
+ jboolean is_modifiable = JNI_FALSE;
+ res = env->IsModifiableClass(classes[i], &is_modifiable);
+ if (res != OK) {
+ return res;
+ } else if (!is_modifiable) {
+ return ERR(UNMODIFIABLE_CLASS);
+ }
ArtClassDefinition def;
- res = FillInTransformationData(env, classes[i], &def);
+ res = def.Init(env, classes[i]);
if (res != OK) {
return res;
}
@@ -139,63 +148,4 @@ jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string*
return OK;
}
-jvmtiError Transformer::GetDexDataForRetransformation(ArtJvmTiEnv* env,
- art::Handle<art::mirror::Class> klass,
- /*out*/jint* dex_data_len,
- /*out*/unsigned char** dex_data) {
- art::StackHandleScope<2> hs(art::Thread::Current());
- art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->GetExtData()));
- if (!ext.IsNull()) {
- art::Handle<art::mirror::ByteArray> orig_dex(hs.NewHandle(ext->GetOriginalDexFileBytes()));
- if (!orig_dex.IsNull()) {
- *dex_data_len = static_cast<jint>(orig_dex->GetLength());
- return CopyDataIntoJvmtiBuffer(env,
- reinterpret_cast<const unsigned char*>(orig_dex->GetData()),
- *dex_data_len,
- /*out*/dex_data);
- }
- }
- // TODO De-quicken the dex file before passing it to the agents.
- LOG(WARNING) << "Dex file is not de-quickened yet! Quickened dex instructions might be present";
- const art::DexFile& dex = klass->GetDexFile();
- *dex_data_len = static_cast<jint>(dex.Size());
- return CopyDataIntoJvmtiBuffer(env, dex.Begin(), *dex_data_len, /*out*/dex_data);
-}
-
-// TODO Move this function somewhere more appropriate.
-// Gets the data surrounding the given class.
-// TODO Make this less magical.
-jvmtiError Transformer::FillInTransformationData(ArtJvmTiEnv* env,
- jclass klass,
- ArtClassDefinition* def) {
- JNIEnv* jni_env = GetJniEnv(env);
- if (jni_env == nullptr) {
- // TODO Different error might be better?
- return ERR(INTERNAL);
- }
- art::ScopedObjectAccess soa(jni_env);
- art::StackHandleScope<3> hs(art::Thread::Current());
- art::Handle<art::mirror::Class> hs_klass(hs.NewHandle(soa.Decode<art::mirror::Class>(klass)));
- if (hs_klass.IsNull()) {
- return ERR(INVALID_CLASS);
- }
- def->klass = klass;
- def->loader = soa.AddLocalReference<jobject>(hs_klass->GetClassLoader());
- std::string descriptor_store;
- std::string descriptor(hs_klass->GetDescriptor(&descriptor_store));
- def->name = descriptor.substr(1, descriptor.size() - 2);
- // TODO is this always null?
- def->protection_domain = nullptr;
- if (def->dex_data.get() == nullptr) {
- unsigned char* new_data;
- jvmtiError res = GetDexDataForRetransformation(env, hs_klass, &def->dex_len, &new_data);
- if (res == OK) {
- def->dex_data = MakeJvmtiUniquePtr(env, new_data);
- } else {
- return res;
- }
- }
- return OK;
-}
-
} // namespace openjdkjvmti
diff --git a/runtime/openjdkjvmti/transform.h b/runtime/openjdkjvmti/transform.h
index c6a36e8e20..ba40e04b44 100644
--- a/runtime/openjdkjvmti/transform.h
+++ b/runtime/openjdkjvmti/transform.h
@@ -61,18 +61,6 @@ class Transformer {
jint class_count,
const jclass* classes,
/*out*/std::string* error_msg);
-
- // Gets the data surrounding the given class.
- static jvmtiError FillInTransformationData(ArtJvmTiEnv* env,
- jclass klass,
- ArtClassDefinition* def);
-
- private:
- static jvmtiError GetDexDataForRetransformation(ArtJvmTiEnv* env,
- art::Handle<art::mirror::Class> klass,
- /*out*/jint* dex_data_length,
- /*out*/unsigned char** dex_data)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
};
} // namespace openjdkjvmti
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9113f83cd4..0784e599b5 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -96,7 +96,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
// .WithType<std::vector<ti::Agent>>().AppendValues()
// .IntoKey(M::AgentLib)
.Define("-agentpath:_")
- .WithType<std::vector<ti::Agent>>().AppendValues()
+ .WithType<std::list<ti::Agent>>().AppendValues()
.IntoKey(M::AgentPath)
.Define("-Xms_")
.WithType<MemoryKiB>()
@@ -708,6 +708,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xps-min-classes-to-save:integervalue\n");
UsageMessage(stream, " -Xps-min-notification-before-wake:integervalue\n");
UsageMessage(stream, " -Xps-max-notification-before-wake:integervalue\n");
+ UsageMessage(stream, " -Xps-profile-path:file-path\n");
UsageMessage(stream, " -Xcompiler:filename\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
diff --git a/runtime/primitive.cc b/runtime/primitive.cc
index 2380284535..1ec345a359 100644
--- a/runtime/primitive.cc
+++ b/runtime/primitive.cc
@@ -44,7 +44,7 @@ static const char* kBoxedDescriptors[] = {
"Ljava/lang/Void;",
};
-#define COUNT_OF(x) (sizeof(x) / sizeof(x[0]))
+#define COUNT_OF(x) (sizeof(x) / sizeof((x)[0]))
const char* Primitive::PrettyDescriptor(Primitive::Type type) {
static_assert(COUNT_OF(kTypeNames) == static_cast<size_t>(Primitive::kPrimLast) + 1,
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index b009b47195..3347070468 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -215,9 +215,8 @@ bool RecordConstructorIPut(ArtMethod* method,
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- mirror::DexCache* dex_cache = method->GetDexCache();
- ArtField* field = dex_cache->GetResolvedField(field_index, pointer_size);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->LookupResolvedField(field_index, method, /* is_static */ false);
if (UNLIKELY(field == nullptr)) {
return false;
}
@@ -227,7 +226,9 @@ bool RecordConstructorIPut(ArtMethod* method,
if (iputs[old_pos].field_index == DexFile::kDexNoIndex16) {
break;
}
- ArtField* f = dex_cache->GetResolvedField(iputs[old_pos].field_index, pointer_size);
+ ArtField* f = class_linker->LookupResolvedField(iputs[old_pos].field_index,
+ method,
+ /* is_static */ false);
DCHECK(f != nullptr);
if (f == field) {
auto back_it = std::copy(iputs + old_pos + 1, iputs + arraysize(iputs), iputs + old_pos);
@@ -732,9 +733,9 @@ bool InlineMethodAnalyser::ComputeSpecialAccessorInfo(ArtMethod* method,
if (method == nullptr) {
return false;
}
- mirror::DexCache* dex_cache = method->GetDexCache();
- PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- ArtField* field = dex_cache->GetResolvedField(field_idx, pointer_size);
+ ObjPtr<mirror::DexCache> dex_cache = method->GetDexCache();
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ ArtField* field = class_linker->LookupResolvedField(field_idx, method, /* is_static */ false);
if (field == nullptr || field->IsStatic()) {
return false;
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 3c64d40720..87bc7df214 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -671,14 +671,14 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
soa.Self()->ClearException();
jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
if (exception_class == nullptr) {
- soa.Self()->AssertPendingOOMException();
+ soa.Self()->AssertPendingException();
return nullptr;
}
jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
CHECK(mid != nullptr);
jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th);
if (exception_instance == nullptr) {
- soa.Self()->AssertPendingOOMException();
+ soa.Self()->AssertPendingException();
return nullptr;
}
soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance));
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index e254dfe627..2f70ded8eb 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -509,7 +509,6 @@ class ReflectionTest : public CommonCompilerTest {
};
TEST_F(ReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_READ_BARRIER_WITH_OPTIMIZING_FOR_UNSUPPORTED_INSTRUCTION_SETS();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 42a0ca9373..48efbe5413 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -106,7 +106,6 @@
#include "native/dalvik_system_VMStack.h"
#include "native/dalvik_system_ZygoteHooks.h"
#include "native/java_lang_Class.h"
-#include "native/java_lang_DexCache.h"
#include "native/java_lang_Object.h"
#include "native/java_lang_String.h"
#include "native/java_lang_StringFactory.h"
@@ -286,6 +285,13 @@ Runtime::~Runtime() {
LOG(WARNING) << "Current thread not detached in Runtime shutdown";
}
+ if (jit_ != nullptr) {
+ // Stop the profile saver thread before marking the runtime as shutting down.
+ // The saver will try to dump the profiles before being sopped and that
+ // requires holding the mutator lock.
+ jit_->StopProfileSaver();
+ }
+
{
ScopedTrace trace2("Wait for shutdown cond");
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
@@ -327,8 +333,6 @@ Runtime::~Runtime() {
// Delete thread pool before the thread list since we don't want to wait forever on the
// JIT compiler threads.
jit_->DeleteThreadPool();
- // Similarly, stop the profile saver thread before deleting the thread list.
- jit_->StopProfileSaver();
}
// TODO Maybe do some locking.
@@ -802,11 +806,11 @@ void Runtime::InitNonZygoteOrPostFork(
// before fork aren't attributed to an app.
heap_->ResetGcPerformanceInfo();
-
- if (!is_system_server &&
+ // We may want to collect profiling samples for system server, but we never want to JIT there.
+ if ((!is_system_server || !jit_options_->UseJitCompilation()) &&
!safe_mode_ &&
(jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
- jit_.get() == nullptr) {
+ jit_ == nullptr) {
// Note that when running ART standalone (not zygote, nor zygote fork),
// the jit may have already been created.
CreateJit();
@@ -1539,7 +1543,6 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_dalvik_system_VMStack(env);
register_dalvik_system_ZygoteHooks(env);
register_java_lang_Class(env);
- register_java_lang_DexCache(env);
register_java_lang_Object(env);
register_java_lang_invoke_MethodHandleImpl(env);
register_java_lang_ref_FinalizerReference(env);
@@ -1963,9 +1966,7 @@ void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
}
void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir) {
+ const std::string& profile_output_filename) {
if (jit_.get() == nullptr) {
// We are not JITing. Nothing to do.
return;
@@ -1987,18 +1988,7 @@ void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
return;
}
- jit_->StartProfileSaver(profile_output_filename,
- code_paths,
- foreign_dex_profile_path,
- app_dir);
-}
-
-void Runtime::NotifyDexLoaded(const std::string& dex_location) {
- VLOG(profiler) << "Notify dex loaded: " << dex_location;
- // We know that if the ProfileSaver is started then we can record profile information.
- if (ProfileSaver::IsStarted()) {
- ProfileSaver::NotifyDexUse(dex_location);
- }
+ jit_->StartProfileSaver(profile_output_filename, code_paths);
}
// Transaction support.
@@ -2165,6 +2155,19 @@ void Runtime::CreateJit() {
jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
if (jit_.get() == nullptr) {
LOG(WARNING) << "Failed to create JIT " << error_msg;
+ return;
+ }
+
+ // In case we have a profile path passed as a command line argument,
+ // register the current class path for profiling now. Note that we cannot do
+ // this before we create the JIT and having it here is the most convenient way.
+ // This is used when testing profiles with dalvikvm command as there is no
+ // framework to register the dex files for profiling.
+ if (jit_options_->GetSaveProfilingInfo() &&
+ !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) {
+ std::vector<std::string> dex_filenames;
+ Split(class_path_string_, ':', &dex_filenames);
+ RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath());
}
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 4a0169db68..92feabb459 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -454,10 +454,7 @@ class Runtime {
}
void RegisterAppInfo(const std::vector<std::string>& code_paths,
- const std::string& profile_output_filename,
- const std::string& foreign_dex_profile_path,
- const std::string& app_dir);
- void NotifyDexLoaded(const std::string& dex_location);
+ const std::string& profile_output_filename);
// Transaction support.
bool IsActiveTransaction() const {
@@ -736,7 +733,7 @@ class Runtime {
std::string class_path_string_;
std::vector<std::string> properties_;
- std::vector<ti::Agent> agents_;
+ std::list<ti::Agent> agents_;
std::vector<Plugin> plugins_;
// The default stack size for managed threads created by the runtime.
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index e68a1b2681..16190cd3c4 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -120,8 +120,8 @@ RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
RUNTIME_OPTIONS_KEY (std::string, CpuAbiList)
RUNTIME_OPTIONS_KEY (std::string, Fingerprint)
RUNTIME_OPTIONS_KEY (ExperimentalFlags, Experimental, ExperimentalFlags::kNone) // -Xexperimental:{...}
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
-RUNTIME_OPTIONS_KEY (std::vector<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::list<ti::Agent>, AgentLib) // -agentlib:<libname>=<options>
+RUNTIME_OPTIONS_KEY (std::list<ti::Agent>, AgentPath) // -agentpath:<libname>=<options>
RUNTIME_OPTIONS_KEY (std::vector<Plugin>, Plugins) // -Xplugin:<library>
// Not parse-able from command line, but can be provided explicitly.
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index a3286ac3d4..5f03741d1f 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -141,6 +141,8 @@ class ScopedObjectAccessUnchecked : public ScopedObjectAccessAlreadyRunnable {
ALWAYS_INLINE explicit ScopedObjectAccessUnchecked(Thread* self)
REQUIRES(!Locks::thread_suspend_count_lock_);
+ ALWAYS_INLINE ~ScopedObjectAccessUnchecked() REQUIRES(!Locks::thread_suspend_count_lock_) {}
+
// Used when we want a scoped JNI thread state but have no thread/JNIEnv. Consequently doesn't
// change into Runnable or acquire a share on the mutator_lock_.
explicit ScopedObjectAccessUnchecked(JavaVM* vm) ALWAYS_INLINE
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 51a24e4e01..333128b8c0 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -142,8 +142,10 @@ ArtMethod* StackVisitor::GetMethod() const {
InlineInfo inline_info = GetCurrentInlineInfo();
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
+ MethodInfo method_info = method_header->GetOptimizedMethodInfo();
DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
return GetResolvedMethod(*GetCurrentQuickFrame(),
+ method_info,
inline_info,
encoding.inline_info.encoding,
depth_in_stack_map);
@@ -647,7 +649,7 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
}
const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (code == GetQuickInstrumentationEntryPoint()) {
+ if (code == GetQuickInstrumentationEntryPoint() || code == GetInvokeObsoleteMethodStub()) {
return;
}
diff --git a/runtime/stack.h b/runtime/stack.h
index 90a0aee353..bdaa4c3ca2 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -197,6 +197,11 @@ class ShadowFrame {
return *reinterpret_cast<const int32_t*>(vreg);
}
+ // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts.
+ int16_t GetVRegShort(size_t i) const {
+ return static_cast<int16_t>(GetVReg(i));
+ }
+
uint32_t* GetVRegAddr(size_t i) {
return &vregs_[i];
}
@@ -425,8 +430,15 @@ class ShadowFrame {
private:
ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
uint32_t dex_pc, bool has_reference_array)
- : link_(link), method_(method), result_register_(nullptr), dex_pc_ptr_(nullptr),
- code_item_(nullptr), number_of_vregs_(num_vregs), dex_pc_(dex_pc) {
+ : link_(link),
+ method_(method),
+ result_register_(nullptr),
+ dex_pc_ptr_(nullptr),
+ code_item_(nullptr),
+ number_of_vregs_(num_vregs),
+ dex_pc_(dex_pc),
+ cached_hotness_countdown_(0),
+ hotness_countdown_(0) {
// TODO(iam): Remove this parameter, it's an an artifact of portable removal
DCHECK(has_reference_array);
if (has_reference_array) {
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index d657311ae9..250ff2af1a 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -118,7 +118,8 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps,
- InstructionSet instruction_set) const {
+ InstructionSet instruction_set,
+ const MethodInfo& method_info) const {
CodeInfoEncoding encoding = ExtractEncoding();
size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
vios->Stream()
@@ -139,6 +140,7 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios,
stack_map.Dump(vios,
*this,
encoding,
+ method_info,
code_offset,
number_of_dex_registers,
instruction_set,
@@ -189,6 +191,7 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const CodeInfoEncoding& encoding,
+ const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
@@ -222,12 +225,13 @@ void StackMap::Dump(VariableIndentationOutputStream* vios,
// We do not know the length of the dex register maps of inlined frames
// at this level, so we just pass null to `InlineInfo::Dump` to tell
// it not to look at these maps.
- inline_info.Dump(vios, code_info, nullptr);
+ inline_info.Dump(vios, code_info, method_info, nullptr);
}
}
void InlineInfo::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
+ const MethodInfo& method_info,
uint16_t number_of_dex_registers[]) const {
InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding;
vios->Stream() << "InlineInfo with depth "
@@ -245,7 +249,7 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios,
} else {
vios->Stream()
<< std::dec
- << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, i);
+ << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i);
}
vios->Stream() << ")\n";
if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) {
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 67f0b5715d..a22498661e 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,7 @@
#include "bit_memory_region.h"
#include "dex_file.h"
#include "memory_region.h"
+#include "method_info.h"
#include "leb128.h"
namespace art {
@@ -367,7 +368,8 @@ class DexRegisterLocationCatalog {
return region_.size();
}
- void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info);
+ void Dump(VariableIndentationOutputStream* vios,
+ const CodeInfo& code_info);
// Special (invalid) Dex register location catalog entry index meaning
// that there is no location for a given Dex register (i.e., it is
@@ -571,7 +573,7 @@ class DexRegisterMap {
}
}
- bool IsDexRegisterLive(uint16_t dex_register_number) const {
+ ALWAYS_INLINE bool IsDexRegisterLive(uint16_t dex_register_number) const {
size_t live_bit_mask_offset_in_bits = GetLiveBitMaskOffset() * kBitsPerByte;
return region_.LoadBit(live_bit_mask_offset_in_bits + dex_register_number);
}
@@ -686,7 +688,13 @@ struct FieldEncoding {
class StackMapEncoding {
public:
- StackMapEncoding() {}
+ StackMapEncoding()
+ : dex_pc_bit_offset_(0),
+ dex_register_map_bit_offset_(0),
+ inline_info_bit_offset_(0),
+ register_mask_index_bit_offset_(0),
+ stack_mask_index_bit_offset_(0),
+ total_bit_size_(0) {}
// Set stack map bit layout based on given sizes.
// Returns the size of stack map in bits.
@@ -862,6 +870,7 @@ class StackMap {
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
const CodeInfoEncoding& encoding,
+ const MethodInfo& method_info,
uint32_t code_offset,
uint16_t number_of_dex_registers,
InstructionSet instruction_set,
@@ -885,12 +894,12 @@ class StackMap {
class InlineInfoEncoding {
public:
- void SetFromSizes(size_t method_index_max,
+ void SetFromSizes(size_t method_index_idx_max,
size_t dex_pc_max,
size_t extra_data_max,
size_t dex_register_map_size) {
total_bit_size_ = kMethodIndexBitOffset;
- total_bit_size_ += MinimumBitsToStore(method_index_max);
+ total_bit_size_ += MinimumBitsToStore(method_index_idx_max);
dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_);
// Note: We're not encoding the dex pc if there is none. That's the case
@@ -908,7 +917,7 @@ class InlineInfoEncoding {
total_bit_size_ += MinimumBitsToStore(dex_register_map_size);
}
- ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const {
+ ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const {
return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_);
}
ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
@@ -975,16 +984,23 @@ class InlineInfo {
}
}
- ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth) const {
+ ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth) const {
DCHECK(!EncodesArtMethodAtDepth(encoding, depth));
- return encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
+ return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth));
}
- ALWAYS_INLINE void SetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
- uint32_t depth,
- uint32_t index) {
- encoding.GetMethodIndexEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+ ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding,
+ uint32_t depth,
+ uint32_t index) {
+ encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index);
+ }
+
+
+ ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding,
+ const MethodInfo& method_info,
+ uint32_t depth) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth));
}
ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding,
@@ -1012,7 +1028,8 @@ class InlineInfo {
ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding,
uint32_t depth) const {
uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth));
- uint32_t high_bits = encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth));
+ uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load(
+ GetRegionAtDepth(encoding, depth));
if (high_bits == 0) {
return reinterpret_cast<ArtMethod*>(low_bits);
} else {
@@ -1040,6 +1057,7 @@ class InlineInfo {
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& info,
+ const MethodInfo& method_info,
uint16_t* number_of_dex_registers) const;
private:
@@ -1219,12 +1237,18 @@ class InvokeInfo {
encoding.GetInvokeTypeEncoding().Store(region_, invoke_type);
}
- ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding) const {
+ ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const {
return encoding.GetMethodIndexEncoding().Load(region_);
}
- ALWAYS_INLINE void SetMethodIndex(const InvokeInfoEncoding& encoding, uint32_t method_index) {
- encoding.GetMethodIndexEncoding().Store(region_, method_index);
+ ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding,
+ uint32_t method_index_idx) {
+ encoding.GetMethodIndexEncoding().Store(region_, method_index_idx);
+ }
+
+ ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding,
+ MethodInfo method_info) const {
+ return method_info.GetMethodIndex(GetMethodIndexIdx(encoding));
}
bool IsValid() const { return region_.pointer() != nullptr; }
@@ -1542,7 +1566,8 @@ class CodeInfo {
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps,
- InstructionSet instruction_set) const;
+ InstructionSet instruction_set,
+ const MethodInfo& method_info) const;
// Check that the code info has valid stack map and abort if it does not.
void AssertValidStackMap(const CodeInfoEncoding& encoding) const {
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 482e0e39a9..02a1e4d8a5 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -94,9 +94,7 @@ inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mu
if (held_mutex != nullptr &&
held_mutex != Locks::mutator_lock_ &&
held_mutex != cond_var_mutex) {
- std::vector<BaseMutex*>& expected_mutexes = Locks::expected_mutexes_on_weak_ref_access_;
- CHECK(std::find(expected_mutexes.begin(), expected_mutexes.end(), held_mutex) !=
- expected_mutexes.end())
+ CHECK(Locks::IsExpectedOnWeakRefAccess(held_mutex))
<< "Holding unexpected mutex " << held_mutex->GetName()
<< " when accessing weak ref";
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index ff66cc1697..008c388229 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -16,6 +16,10 @@
#include "thread.h"
+#if !defined(__APPLE__)
+#include <sched.h>
+#endif
+
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
@@ -1591,8 +1595,21 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
if (thread != nullptr) {
int policy;
sched_param sp;
+#if !defined(__APPLE__)
+ // b/36445592 Don't use pthread_getschedparam since pthread may have exited.
+ policy = sched_getscheduler(tid);
+ if (policy == -1) {
+ PLOG(WARNING) << "sched_getscheduler(" << tid << ")";
+ }
+ int sched_getparam_result = sched_getparam(tid, &sp);
+ if (sched_getparam_result == -1) {
+ PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)";
+ sp.sched_priority = -1;
+ }
+#else
CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
__FUNCTION__);
+#endif
os << " sched=" << policy << "/" << sp.sched_priority
<< " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
}
@@ -1934,7 +1951,6 @@ Thread::Thread(bool daemon)
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
tlsPtr_.name = new std::string(kThreadNameDuringStartup);
- tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
static_assert((sizeof(Thread) % 4) == 0U,
"art::Thread has a size which is not a multiple of 4.");
@@ -2118,7 +2134,6 @@ Thread::~Thread() {
delete tlsPtr_.instrumentation_stack;
delete tlsPtr_.name;
delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
- free(tlsPtr_.nested_signal_state);
Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
diff --git a/runtime/thread.h b/runtime/thread.h
index d5fd9e9e51..de0b892f5f 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -1115,21 +1115,12 @@ class Thread {
return tlsPtr_.mterp_alt_ibase;
}
- // Notify that a signal is being handled. This is to protect us from doing recursive
- // NPE handling after a SIGSEGV.
- void NoteSignalBeingHandled() {
- if (tls32_.handling_signal_) {
- LOG(FATAL) << "Detected signal while processing a signal";
- }
- tls32_.handling_signal_ = true;
- }
-
- void NoteSignalHandlerDone() {
- tls32_.handling_signal_ = false;
+ bool HandlingSignal() const {
+ return tls32_.handling_signal_;
}
- jmp_buf* GetNestedSignalState() {
- return tlsPtr_.nested_signal_state;
+ void SetHandlingSignal(bool handling_signal) {
+ tls32_.handling_signal_ = handling_signal;
}
bool IsTransitioningToRunnable() const {
@@ -1460,7 +1451,7 @@ class Thread {
thread_local_start(nullptr), thread_local_pos(nullptr), thread_local_end(nullptr),
thread_local_objects(0), mterp_current_ibase(nullptr), mterp_default_ibase(nullptr),
mterp_alt_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
- thread_local_alloc_stack_end(nullptr), nested_signal_state(nullptr),
+ thread_local_alloc_stack_end(nullptr),
flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr) {
std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
}
@@ -1606,9 +1597,6 @@ class Thread {
// Support for Mutex lock hierarchy bug detection.
BaseMutex* held_mutexes[kLockLevelCount];
- // Recorded thread state for nested signals.
- jmp_buf* nested_signal_state;
-
// The function used for thread flip.
Closure* flip_function;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index caed36936a..8d72fe80c2 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -736,7 +736,7 @@ void ThreadList::SuspendAllInternal(Thread* self,
// EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
if ((errno != EAGAIN) && (errno != EINTR)) {
if (errno == ETIMEDOUT) {
- LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
+ LOG(::android::base::FATAL)
<< "Timed out waiting for threads to suspend, waited for "
<< PrettyDuration(NanoTime() - start_time);
} else {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 7aa98cd33d..0333fe8db4 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -162,7 +162,7 @@ class Transaction FINAL {
FieldValueKind kind;
bool is_volatile;
- FieldValue() = default;
+ FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
FieldValue(FieldValue&& log) = default;
private:
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index 97c1228038..920629276a 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -508,7 +508,7 @@ TEST_F(TransactionTest, ResolveString) {
dex::StringIndex string_idx = dex_file->GetIndexForStringId(*string_id);
ASSERT_TRUE(string_idx.IsValid());
// String should only get resolved by the initializer.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
// Do the transaction, then roll back.
Transaction transaction;
@@ -518,7 +518,7 @@ TEST_F(TransactionTest, ResolveString) {
ASSERT_TRUE(h_klass->IsInitialized());
// Make sure the string got resolved by the transaction.
{
- mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache);
+ mirror::String* s = class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get());
ASSERT_TRUE(s != nullptr);
EXPECT_STREQ(s->ToModifiedUtf8().c_str(), kResolvedString);
EXPECT_EQ(s, h_dex_cache->GetResolvedString(string_idx));
@@ -526,7 +526,7 @@ TEST_F(TransactionTest, ResolveString) {
Runtime::Current()->ExitTransactionMode();
transaction.Rollback();
// Check that the string did not stay resolved.
- EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache) == nullptr);
+ EXPECT_TRUE(class_linker_->LookupString(*dex_file, string_idx, h_dex_cache.Get()) == nullptr);
EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
ASSERT_FALSE(h_klass->IsInitialized());
ASSERT_FALSE(soa.Self()->IsExceptionPending());
diff --git a/runtime/type_lookup_table.h b/runtime/type_lookup_table.h
index 3f6f76f510..fd68deb71c 100644
--- a/runtime/type_lookup_table.h
+++ b/runtime/type_lookup_table.h
@@ -148,7 +148,7 @@ class TypeLookupTable {
return mask_;
}
- // Attempt to set an entry on it's hash' slot. If there is alrady something there, return false.
+ // Attempt to set an entry on its hash's slot. If there is already something there, return false.
// Otherwise return true.
bool SetOnInitialPos(const Entry& entry, uint32_t hash);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 6a20eaf9e0..8d216cef0c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -52,77 +52,6 @@ namespace art {
using android::base::StringAppendF;
using android::base::StringPrintf;
-static const uint8_t kBase64Map[256] = {
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255,
- 255, 254, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, // NOLINT
- 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255, // NOLINT
- 255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
- 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, // NOLINT
- 49, 50, 51, 255, 255, 255, 255, 255, 255, 255, 255, 255, // NOLINT
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255
-};
-
-uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
- std::vector<uint8_t> tmp;
- uint32_t t = 0, y = 0;
- int g = 3;
- for (size_t i = 0; src[i] != '\0'; ++i) {
- uint8_t c = kBase64Map[src[i] & 0xFF];
- if (c == 255) continue;
- // the final = symbols are read and used to trim the remaining bytes
- if (c == 254) {
- c = 0;
- // prevent g < 0 which would potentially allow an overflow later
- if (--g < 0) {
- *dst_size = 0;
- return nullptr;
- }
- } else if (g != 3) {
- // we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
- }
- t = (t << 6) | c;
- if (++y == 4) {
- tmp.push_back((t >> 16) & 255);
- if (g > 1) {
- tmp.push_back((t >> 8) & 255);
- }
- if (g > 2) {
- tmp.push_back(t & 255);
- }
- y = t = 0;
- }
- }
- if (y != 0) {
- *dst_size = 0;
- return nullptr;
- }
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst.release();
-}
-
pid_t GetTid() {
#if defined(__APPLE__)
uint64_t owner;
diff --git a/runtime/utils.h b/runtime/utils.h
index 96e5bfa8ec..2011d9eba4 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -70,8 +70,6 @@ static inline uint32_t PointerToLowMemUInt32(const void* p) {
return intp & 0xFFFFFFFFU;
}
-uint8_t* DecodeBase64(const char* src, size_t* dst_size);
-
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
@@ -325,6 +323,18 @@ constexpr size_t ArrayCount(const T (&)[size]) {
return size;
}
+// Return -1 if <, 0 if ==, 1 if >.
+template <typename T>
+inline static int32_t Compare(T lhs, T rhs) {
+ return (lhs < rhs) ? -1 : ((lhs == rhs) ? 0 : 1);
+}
+
+// Return -1 if < 0, 0 if == 0, 1 if > 0.
+template <typename T>
+inline static int32_t Signum(T opnd) {
+ return (opnd < 0) ? -1 : ((opnd == 0) ? 0 : 1);
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/utils/dex_cache_arrays_layout-inl.h b/runtime/utils/dex_cache_arrays_layout-inl.h
index f9a1405354..95904af011 100644
--- a/runtime/utils/dex_cache_arrays_layout-inl.h
+++ b/runtime/utils/dex_cache_arrays_layout-inl.h
@@ -51,7 +51,11 @@ inline DexCacheArraysLayout::DexCacheArraysLayout(PointerSize pointer_size, cons
: DexCacheArraysLayout(pointer_size, dex_file->GetHeader(), dex_file->NumCallSiteIds()) {
}
-constexpr size_t DexCacheArraysLayout::Alignment() {
+inline size_t DexCacheArraysLayout::Alignment() const {
+ return Alignment(pointer_size_);
+}
+
+inline constexpr size_t DexCacheArraysLayout::Alignment(PointerSize pointer_size) {
// mirror::Type/String/MethodTypeDexCacheType alignment is 8,
// i.e. higher than or equal to the pointer alignment.
static_assert(alignof(mirror::TypeDexCacheType) == 8,
@@ -60,8 +64,8 @@ constexpr size_t DexCacheArraysLayout::Alignment() {
"Expecting alignof(StringDexCacheType) == 8");
static_assert(alignof(mirror::MethodTypeDexCacheType) == 8,
"Expecting alignof(MethodTypeDexCacheType) == 8");
- // This is the same as alignof(MethodTypeDexCacheType).
- return alignof(mirror::StringDexCacheType);
+ // This is the same as alignof(FieldDexCacheType) for the given pointer size.
+ return 2u * static_cast<size_t>(pointer_size);
}
template <typename T>
@@ -100,8 +104,8 @@ inline size_t DexCacheArraysLayout::MethodsAlignment() const {
}
inline size_t DexCacheArraysLayout::StringOffset(uint32_t string_idx) const {
- return strings_offset_ + ElementOffset(PointerSize::k64,
- string_idx % mirror::DexCache::kDexCacheStringCacheSize);
+ uint32_t string_hash = string_idx % mirror::DexCache::kDexCacheStringCacheSize;
+ return strings_offset_ + ElementOffset(PointerSize::k64, string_hash);
}
inline size_t DexCacheArraysLayout::StringsSize(size_t num_elements) const {
@@ -119,15 +123,20 @@ inline size_t DexCacheArraysLayout::StringsAlignment() const {
}
inline size_t DexCacheArraysLayout::FieldOffset(uint32_t field_idx) const {
- return fields_offset_ + ElementOffset(pointer_size_, field_idx);
+ uint32_t field_hash = field_idx % mirror::DexCache::kDexCacheFieldCacheSize;
+ return fields_offset_ + 2u * static_cast<size_t>(pointer_size_) * field_hash;
}
inline size_t DexCacheArraysLayout::FieldsSize(size_t num_elements) const {
- return ArraySize(pointer_size_, num_elements);
+ size_t cache_size = mirror::DexCache::kDexCacheFieldCacheSize;
+ if (num_elements < cache_size) {
+ cache_size = num_elements;
+ }
+ return 2u * static_cast<size_t>(pointer_size_) * num_elements;
}
inline size_t DexCacheArraysLayout::FieldsAlignment() const {
- return static_cast<size_t>(pointer_size_);
+ return 2u * static_cast<size_t>(pointer_size_);
}
inline size_t DexCacheArraysLayout::MethodTypesSize(size_t num_elements) const {
diff --git a/runtime/utils/dex_cache_arrays_layout.h b/runtime/utils/dex_cache_arrays_layout.h
index ed677ed3f4..377a3749a6 100644
--- a/runtime/utils/dex_cache_arrays_layout.h
+++ b/runtime/utils/dex_cache_arrays_layout.h
@@ -57,7 +57,9 @@ class DexCacheArraysLayout {
return size_;
}
- static constexpr size_t Alignment();
+ size_t Alignment() const;
+
+ static constexpr size_t Alignment(PointerSize pointer_size);
size_t TypesOffset() const {
return types_offset_;
@@ -125,8 +127,6 @@ class DexCacheArraysLayout {
const size_t call_sites_offset_;
const size_t size_;
- static size_t Alignment(PointerSize pointer_size);
-
static size_t ElementOffset(PointerSize element_size, uint32_t idx);
static size_t ArraySize(PointerSize element_size, uint32_t num_elements);
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 2481c8ba46..9ff104bbe1 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -120,4 +120,30 @@ const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor) const {
}
}
+bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* error_msg) {
+ size_t i = 0;
+ for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr);
+ dex_file_start != nullptr;
+ dex_file_start = GetNextDexFileData(dex_file_start), ++i) {
+ size_t size = reinterpret_cast<const DexFile::Header*>(dex_file_start)->file_size_;
+ // TODO: Supply the location information for a vdex file.
+ static constexpr char kVdexLocation[] = "";
+ std::string location = DexFile::GetMultiDexLocation(i, kVdexLocation);
+ std::unique_ptr<const DexFile> dex(DexFile::Open(dex_file_start,
+ size,
+ location,
+ GetLocationChecksum(i),
+ nullptr /*oat_dex_file*/,
+ false /*verify*/,
+ false /*verify_checksum*/,
+ error_msg));
+ if (dex == nullptr) {
+ return false;
+ }
+ dex_files->push_back(std::move(dex));
+ }
+ return true;
+}
+
} // namespace art
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 7daf2f8d7b..9840555bbd 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -27,6 +27,8 @@
namespace art {
+class DexFile;
+
// VDEX files contain extracted DEX files. The VdexFile class maps the file to
// memory and provides tools for accessing its individual sections.
//
@@ -61,7 +63,7 @@ class VdexFile {
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- static constexpr uint8_t kVdexVersion[] = { '0', '0', '3', '\0' }; // Remove verify-profile
+ static constexpr uint8_t kVdexVersion[] = { '0', '0', '5', '\0' }; // access flags
uint8_t magic_[4];
uint8_t version_[4];
@@ -122,6 +124,12 @@ class VdexFile {
return reinterpret_cast<const uint32_t*>(Begin() + sizeof(Header))[dex_file_index];
}
+ // Opens all the dex files contained in this vdex file. This is currently
+ // used for dumping tools only, and has not been tested for use by the
+ // remainder of the runtime.
+ bool OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ std::string* error_msg);
+
private:
explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 52f7e348ce..740b7dd7d4 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -309,6 +309,7 @@ PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPie
// Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
// would be thrown at runtime, but we need to continue verification and *not* create a
// hard failure or abort.
+ CheckConstructorInvariants(this);
}
std::string UnresolvedMergedType::Dump() const {
@@ -789,7 +790,7 @@ void RegType::CheckInvariants() const {
if (!klass_.IsNull()) {
CHECK(!descriptor_.empty()) << *this;
std::string temp;
- CHECK_EQ(descriptor_.ToString(), klass_.Read()->GetDescriptor(&temp)) << *this;
+ CHECK_EQ(descriptor_, klass_.Read()->GetDescriptor(&temp)) << *this;
}
}
@@ -820,9 +821,7 @@ UnresolvedMergedType::UnresolvedMergedType(const RegType& resolved,
reg_type_cache_(reg_type_cache),
resolved_part_(resolved),
unresolved_types_(unresolved, false, unresolved.GetAllocator()) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
void UnresolvedMergedType::CheckInvariants() const {
CHECK(reg_type_cache_ != nullptr);
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 472381dd9b..dedf77f7db 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -274,14 +274,17 @@ class RegType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: descriptor_(descriptor),
klass_(klass),
- cache_id_(cache_id) {
+ cache_id_(cache_id) {}
+
+ template <typename Class>
+ void CheckConstructorInvariants(Class* this_ ATTRIBUTE_UNUSED) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(std::is_final<Class>::value, "Class must be final.");
if (kIsDebugBuild) {
CheckInvariants();
}
}
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
-
const StringPiece descriptor_;
mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
@@ -289,6 +292,8 @@ class RegType {
friend class RegTypeCache;
private:
+ virtual void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+
/*
* A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
* S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
@@ -339,7 +344,9 @@ class ConflictType FINAL : public RegType {
private:
ConflictType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : RegType(klass, descriptor, cache_id) {}
+ : RegType(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const ConflictType* instance_;
};
@@ -368,7 +375,9 @@ class UndefinedType FINAL : public RegType {
private:
UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : RegType(klass, descriptor, cache_id) {}
+ : RegType(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const UndefinedType* instance_;
};
@@ -387,7 +396,7 @@ class Cat1Type : public PrimitiveType {
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
-class IntegerType : public Cat1Type {
+class IntegerType FINAL : public Cat1Type {
public:
bool IsInteger() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
@@ -401,7 +410,9 @@ class IntegerType : public Cat1Type {
private:
IntegerType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const IntegerType* instance_;
};
@@ -419,7 +430,9 @@ class BooleanType FINAL : public Cat1Type {
private:
BooleanType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const BooleanType* instance_;
};
@@ -438,7 +451,9 @@ class ByteType FINAL : public Cat1Type {
private:
ByteType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const ByteType* instance_;
};
@@ -456,7 +471,9 @@ class ShortType FINAL : public Cat1Type {
private:
ShortType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const ShortType* instance_;
};
@@ -474,7 +491,9 @@ class CharType FINAL : public Cat1Type {
private:
CharType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const CharType* instance_;
};
@@ -492,7 +511,9 @@ class FloatType FINAL : public Cat1Type {
private:
FloatType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat1Type(klass, descriptor, cache_id) {}
+ : Cat1Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const FloatType* instance_;
};
@@ -517,7 +538,9 @@ class LongLoType FINAL : public Cat2Type {
private:
LongLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat2Type(klass, descriptor, cache_id) {}
+ : Cat2Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const LongLoType* instance_;
};
@@ -535,7 +558,9 @@ class LongHiType FINAL : public Cat2Type {
private:
LongHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat2Type(klass, descriptor, cache_id) {}
+ : Cat2Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const LongHiType* instance_;
};
@@ -554,7 +579,9 @@ class DoubleLoType FINAL : public Cat2Type {
private:
DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat2Type(klass, descriptor, cache_id) {}
+ : Cat2Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const DoubleLoType* instance_;
};
@@ -572,7 +599,9 @@ class DoubleHiType FINAL : public Cat2Type {
private:
DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : Cat2Type(klass, descriptor, cache_id) {}
+ : Cat2Type(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
static const DoubleHiType* instance_;
};
@@ -637,7 +666,9 @@ class PreciseConstType FINAL : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : ConstantType(constant, cache_id) {}
+ : ConstantType(constant, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsPreciseConstant() const OVERRIDE { return true; }
@@ -648,7 +679,9 @@ class PreciseConstLoType FINAL : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : ConstantType(constant, cache_id) {}
+ : ConstantType(constant, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsPreciseConstantLo() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
@@ -657,7 +690,9 @@ class PreciseConstHiType FINAL : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : ConstantType(constant, cache_id) {}
+ : ConstantType(constant, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsPreciseConstantHi() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
@@ -667,6 +702,7 @@ class ImpreciseConstType FINAL : public ConstantType {
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
+ CheckConstructorInvariants(this);
}
bool IsImpreciseConstant() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
@@ -676,7 +712,9 @@ class ImpreciseConstLoType FINAL : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : ConstantType(constant, cache_id) {}
+ : ConstantType(constant, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsImpreciseConstantLo() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
@@ -685,7 +723,9 @@ class ImpreciseConstHiType FINAL : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : ConstantType(constant, cache_id) {}
+ : ConstantType(constant, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsImpreciseConstantHi() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
@@ -718,7 +758,9 @@ class UninitializedReferenceType FINAL : public UninitializedType {
const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
- : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
+ : UninitializedType(klass, descriptor, allocation_pc, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsUninitializedReference() const OVERRIDE { return true; }
@@ -735,9 +777,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
uint32_t allocation_pc, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; }
@@ -747,7 +787,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
};
// Similar to UninitializedReferenceType but special case for the this argument
@@ -759,9 +799,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType {
uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
virtual bool IsUninitializedThisReference() const OVERRIDE { return true; }
@@ -771,7 +809,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
};
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
@@ -780,9 +818,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; }
@@ -792,7 +828,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
};
// A type of register holding a reference to an Object of type GetClass or a
@@ -801,7 +837,9 @@ class ReferenceType FINAL : public RegType {
public:
ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
- : RegType(klass, descriptor, cache_id) {}
+ : RegType(klass, descriptor, cache_id) {
+ CheckConstructorInvariants(this);
+ }
bool IsReference() const OVERRIDE { return true; }
@@ -848,9 +886,7 @@ class UnresolvedReferenceType FINAL : public UnresolvedType {
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
REQUIRES_SHARED(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
bool IsUnresolvedReference() const OVERRIDE { return true; }
@@ -860,7 +896,7 @@ class UnresolvedReferenceType FINAL : public UnresolvedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
};
// Type representing the super-class of an unresolved type.
@@ -872,9 +908,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
: UnresolvedType("", cache_id),
unresolved_child_id_(child_id),
reg_type_cache_(reg_type_cache) {
- if (kIsDebugBuild) {
- CheckInvariants();
- }
+ CheckConstructorInvariants(this);
}
bool IsUnresolvedSuperClass() const OVERRIDE { return true; }
@@ -889,7 +923,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -925,7 +959,7 @@ class UnresolvedMergedType FINAL : public UnresolvedType {
std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
const RegTypeCache* const reg_type_cache_;
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 000cf7c393..8e4c166492 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -68,13 +68,17 @@ const VerifierDeps::DexFileDeps* VerifierDeps::GetDexFileDeps(const DexFile& dex
return (it == dex_deps_.end()) ? nullptr : it->second.get();
}
+// Access flags that impact vdex verification.
+static constexpr uint32_t kAccVdexAccessFlags =
+ kAccPublic | kAccPrivate | kAccProtected | kAccStatic | kAccInterface;
+
template <typename T>
uint16_t VerifierDeps::GetAccessFlags(T* element) {
static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant");
if (element == nullptr) {
return VerifierDeps::kUnresolvedMarker;
} else {
- uint16_t access_flags = Low16Bits(element->GetAccessFlags());
+ uint16_t access_flags = Low16Bits(element->GetAccessFlags()) & kAccVdexAccessFlags;
CHECK_NE(access_flags, VerifierDeps::kUnresolvedMarker);
return access_flags;
}
@@ -458,8 +462,7 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file,
}
if (!IsInClassPath(source)) {
- if (!destination->IsInterface()) {
- DCHECK(!source->IsInterface());
+ if (!destination->IsInterface() && !source->IsInterface()) {
// Find the super class at the classpath boundary. Only that class
// can change the assignability.
do {
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index 2610252aa7..5aef062728 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -34,7 +34,6 @@
namespace art {
-jclass WellKnownClasses::com_android_dex_Dex;
jclass WellKnownClasses::dalvik_annotation_optimization_CriticalNative;
jclass WellKnownClasses::dalvik_annotation_optimization_FastNative;
jclass WellKnownClasses::dalvik_system_BaseDexClassLoader;
@@ -80,7 +79,6 @@ jclass WellKnownClasses::libcore_util_EmptyArray;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk;
jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer;
-jmethodID WellKnownClasses::com_android_dex_Dex_create;
jmethodID WellKnownClasses::dalvik_system_VMRuntime_runFinalization;
jmethodID WellKnownClasses::java_lang_Boolean_valueOf;
jmethodID WellKnownClasses::java_lang_Byte_valueOf;
@@ -268,7 +266,6 @@ uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) {
#undef STRING_INIT_LIST
void WellKnownClasses::Init(JNIEnv* env) {
- com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex");
dalvik_annotation_optimization_CriticalNative =
CacheClass(env, "dalvik/annotation/optimization/CriticalNative");
dalvik_annotation_optimization_FastNative = CacheClass(env, "dalvik/annotation/optimization/FastNative");
@@ -317,7 +314,6 @@ void WellKnownClasses::Init(JNIEnv* env) {
org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
dalvik_system_VMRuntime_runFinalization = CacheMethod(env, dalvik_system_VMRuntime, true, "runFinalization", "(J)V");
- com_android_dex_Dex_create = CacheMethod(env, com_android_dex_Dex, true, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;");
java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V");
java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index db8a53c44c..c18473197b 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -44,7 +44,6 @@ struct WellKnownClasses {
static ObjPtr<mirror::Class> ToClass(jclass global_jclass) REQUIRES_SHARED(Locks::mutator_lock_);
- static jclass com_android_dex_Dex;
static jclass dalvik_annotation_optimization_CriticalNative;
static jclass dalvik_annotation_optimization_FastNative;
static jclass dalvik_system_BaseDexClassLoader;
@@ -90,7 +89,6 @@ struct WellKnownClasses {
static jclass org_apache_harmony_dalvik_ddmc_Chunk;
static jclass org_apache_harmony_dalvik_ddmc_DdmServer;
- static jmethodID com_android_dex_Dex_create;
static jmethodID dalvik_system_VMRuntime_runFinalization;
static jmethodID java_lang_Boolean_valueOf;
static jmethodID java_lang_Byte_valueOf;