summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.bp25
-rw-r--r--runtime/alloc_instrumentation.md2
-rw-r--r--runtime/app_info.cc13
-rw-r--r--runtime/app_info.h7
-rw-r--r--runtime/app_info_test.cc14
-rw-r--r--runtime/arch/arm/asm_support_arm.S4
-rw-r--r--runtime/arch/arm/asm_support_arm.h20
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc2
-rw-r--r--runtime/arch/arm/jni_entrypoints_arm.S7
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S282
-rw-r--r--runtime/arch/arm/quick_entrypoints_cc_arm.cc3
-rw-r--r--runtime/arch/arm64/asm_support_arm64.h13
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.cc12
-rw-r--r--runtime/arch/arm64/instruction_set_features_arm64.h4
-rw-r--r--runtime/arch/arm64/jni_entrypoints_arm64.S7
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S268
-rw-r--r--runtime/arch/instruction_set_features.cc27
-rw-r--r--runtime/arch/instruction_set_features.h6
-rw-r--r--runtime/arch/quick_alloc_entrypoints.S48
-rw-r--r--runtime/arch/x86/asm_support_x86.h2
-rw-r--r--runtime/arch/x86/jni_entrypoints_x86.S8
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S329
-rw-r--r--runtime/arch/x86_64/asm_support_x86_64.h2
-rw-r--r--runtime/arch/x86_64/jni_entrypoints_x86_64.S8
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S302
-rw-r--r--runtime/art_method-inl.h18
-rw-r--r--runtime/art_method.cc78
-rw-r--r--runtime/art_method.h14
-rw-r--r--runtime/art_standalone_runtime_tests.xml10
-rw-r--r--runtime/backtrace_helper.h2
-rw-r--r--runtime/base/locks.h2
-rw-r--r--runtime/base/mutex.cc12
-rw-r--r--runtime/check_reference_map_visitor.h2
-rw-r--r--runtime/class_linker-inl.h6
-rw-r--r--runtime/class_linker.cc317
-rw-r--r--runtime/class_linker.h22
-rw-r--r--runtime/class_linker_test.cc19
-rw-r--r--runtime/class_loader_context.cc9
-rw-r--r--runtime/class_loader_context.h5
-rw-r--r--runtime/class_loader_utils.h2
-rw-r--r--runtime/class_table-inl.h37
-rw-r--r--runtime/class_table.h9
-rw-r--r--runtime/common_runtime_test.cc3
-rw-r--r--runtime/common_runtime_test.h4
-rw-r--r--runtime/common_throws.cc21
-rw-r--r--runtime/common_throws.h5
-rw-r--r--runtime/debug_print.cc56
-rw-r--r--runtime/debug_print.h3
-rw-r--r--runtime/dex/dex_file_annotations.cc97
-rw-r--r--runtime/dex/dex_file_annotations.h6
-rw-r--r--runtime/dex2oat_environment_test.h21
-rw-r--r--runtime/dexopt_test.cc32
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h241
-rw-r--r--runtime/entrypoints/entrypoint_utils.h11
-rw-r--r--runtime/entrypoints/jni/jni_entrypoints.cc4
-rw-r--r--runtime/entrypoints/quick/quick_default_externs.h1
-rw-r--r--runtime/entrypoints/quick/quick_default_init_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_dexcache_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints.h1
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc14
-rw-r--r--runtime/entrypoints/quick/quick_jni_entrypoints.cc42
-rw-r--r--runtime/entrypoints/quick/quick_thread_entrypoints.cc30
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc154
-rw-r--r--runtime/entrypoints_order_test.cc4
-rw-r--r--runtime/exec_utils.cc258
-rw-r--r--runtime/exec_utils.h49
-rw-r--r--runtime/exec_utils_test.cc116
-rw-r--r--runtime/gc/accounting/atomic_stack.h41
-rw-r--r--runtime/gc/accounting/bitmap.cc2
-rw-r--r--runtime/gc/accounting/bitmap.h42
-rw-r--r--runtime/gc/accounting/mod_union_table.cc10
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h48
-rw-r--r--runtime/gc/accounting/space_bitmap.cc34
-rw-r--r--runtime/gc/accounting/space_bitmap.h16
-rw-r--r--runtime/gc/allocation_record.cc20
-rw-r--r--runtime/gc/allocator/art-dlmalloc.cc (renamed from runtime/gc/allocator/dlmalloc.cc)6
-rw-r--r--runtime/gc/allocator/art-dlmalloc.h (renamed from runtime/gc/allocator/dlmalloc.h)8
-rw-r--r--runtime/gc/collector/concurrent_copying.cc6
-rw-r--r--runtime/gc/collector/garbage_collector.cc16
-rw-r--r--runtime/gc/collector/garbage_collector.h3
-rw-r--r--runtime/gc/collector/immune_spaces_test.cc2
-rw-r--r--runtime/gc/collector/mark_compact-inl.h350
-rw-r--r--runtime/gc/collector/mark_compact.cc2588
-rw-r--r--runtime/gc/collector/mark_compact.h560
-rw-r--r--runtime/gc/collector_type.h8
-rw-r--r--runtime/gc/heap-inl.h10
-rw-r--r--runtime/gc/heap-visit-objects-inl.h2
-rw-r--r--runtime/gc/heap.cc185
-rw-r--r--runtime/gc/heap.h43
-rw-r--r--runtime/gc/heap_verification_test.cc14
-rw-r--r--runtime/gc/reference_processor.cc14
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h6
-rw-r--r--runtime/gc/space/bump_pointer_space-walk-inl.h47
-rw-r--r--runtime/gc/space/bump_pointer_space.cc81
-rw-r--r--runtime/gc/space/bump_pointer_space.h65
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h2
-rw-r--r--runtime/gc/space/dlmalloc_space.cc20
-rw-r--r--runtime/gc/space/image_space.cc419
-rw-r--r--runtime/gc/space/image_space.h214
-rw-r--r--runtime/gc/space/image_space_test.cc50
-rw-r--r--runtime/gc/space/malloc_space.h2
-rw-r--r--runtime/gc/space/region_space.h2
-rw-r--r--runtime/gc/system_weak.h8
-rw-r--r--runtime/gc/system_weak_test.cc25
-rw-r--r--runtime/gc/verification-inl.h63
-rw-r--r--runtime/gc/verification.cc44
-rw-r--r--runtime/gc/verification.h12
-rw-r--r--runtime/handle.cc2
-rw-r--r--runtime/instrumentation.cc679
-rw-r--r--runtime/instrumentation.h136
-rw-r--r--runtime/instrumentation_test.cc3
-rw-r--r--runtime/intern_table.cc15
-rw-r--r--runtime/interpreter/interpreter.cc65
-rw-r--r--runtime/interpreter/interpreter_cache-inl.h4
-rw-r--r--runtime/interpreter/interpreter_cache.h2
-rw-r--r--runtime/interpreter/interpreter_common.cc43
-rw-r--r--runtime/interpreter/interpreter_common.h100
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc678
-rw-r--r--runtime/interpreter/interpreter_intrinsics.h41
-rw-r--r--runtime/interpreter/interpreter_switch_impl-inl.h7
-rw-r--r--runtime/interpreter/mterp/arm64ng/main.S8
-rw-r--r--runtime/interpreter/mterp/armng/main.S8
-rw-r--r--runtime/interpreter/mterp/nterp.cc45
-rw-r--r--runtime/interpreter/mterp/nterp.h1
-rw-r--r--runtime/interpreter/mterp/x86_64ng/main.S7
-rw-r--r--runtime/interpreter/mterp/x86ng/main.S9
-rw-r--r--runtime/interpreter/shadow_frame.h21
-rw-r--r--runtime/interpreter/unstarted_runtime.cc3
-rw-r--r--runtime/interpreter/unstarted_runtime_test.cc6
-rw-r--r--runtime/jit/jit.cc189
-rw-r--r--runtime/jit/jit.h14
-rw-r--r--runtime/jit/jit_code_cache.cc47
-rw-r--r--runtime/jit/jit_code_cache.h28
-rw-r--r--runtime/jit/jit_memory_region.cc2
-rw-r--r--runtime/jni/java_vm_ext-inl.h2
-rw-r--r--runtime/jni/java_vm_ext.cc10
-rw-r--r--runtime/jni/jni_internal.cc39
-rw-r--r--runtime/lock_word.h5
-rw-r--r--runtime/managed_stack.h26
-rw-r--r--runtime/method_handles.cc8
-rw-r--r--runtime/metrics/reporter.cc12
-rw-r--r--runtime/metrics/reporter.h3
-rw-r--r--runtime/metrics/reporter_test.cc8
-rw-r--r--runtime/metrics/statsd.cc28
-rw-r--r--runtime/mirror/array-inl.h9
-rw-r--r--runtime/mirror/array.h3
-rw-r--r--runtime/mirror/class-inl.h11
-rw-r--r--runtime/mirror/class-refvisitor-inl.h25
-rw-r--r--runtime/mirror/class.h32
-rw-r--r--runtime/mirror/class_ext-inl.h20
-rw-r--r--runtime/mirror/class_ext.h23
-rw-r--r--runtime/mirror/dex_cache-inl.h31
-rw-r--r--runtime/mirror/dex_cache.h6
-rw-r--r--runtime/mirror/object-inl.h4
-rw-r--r--runtime/mirror/object-refvisitor-inl.h98
-rw-r--r--runtime/mirror/object.cc2
-rw-r--r--runtime/mirror/object.h11
-rw-r--r--runtime/mirror/object_array-inl.h23
-rw-r--r--runtime/mirror/object_array.h4
-rw-r--r--runtime/mirror/object_reference.h1
-rw-r--r--runtime/mirror/stack_frame_info.cc67
-rw-r--r--runtime/mirror/stack_frame_info.h71
-rw-r--r--runtime/mirror/string-inl.h4
-rw-r--r--runtime/mirror/var_handle.cc2
-rw-r--r--runtime/monitor.cc14
-rw-r--r--runtime/monitor_objects_stack_visitor.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc2
-rw-r--r--runtime/native/java_lang_Class.cc70
-rw-r--r--runtime/native/java_lang_StackStreamFactory.cc53
-rw-r--r--runtime/native/java_lang_StackStreamFactory.h28
-rw-r--r--runtime/native/java_lang_ref_Reference.cc4
-rw-r--r--runtime/native/java_lang_reflect_Method.cc1
-rw-r--r--runtime/native/jdk_internal_misc_Unsafe.cc2
-rw-r--r--runtime/native/sun_misc_Unsafe.cc2
-rw-r--r--runtime/native_stack_dump.cc97
-rw-r--r--runtime/native_stack_dump.h14
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.cc15
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/oat_file_assistant.cc424
-rw-r--r--runtime/oat_file_assistant.h155
-rw-r--r--runtime/oat_file_assistant_context.cc168
-rw-r--r--runtime/oat_file_assistant_context.h85
-rw-r--r--runtime/oat_file_assistant_test.cc1725
-rw-r--r--runtime/oat_file_manager.cc61
-rw-r--r--runtime/oat_quick_method_header.h16
-rw-r--r--runtime/offsets.h16
-rw-r--r--runtime/parsed_options.cc4
-rw-r--r--runtime/quick_exception_handler.cc70
-rw-r--r--runtime/quick_exception_handler.h3
-rw-r--r--runtime/read_barrier-inl.h15
-rw-r--r--runtime/read_barrier.h2
-rw-r--r--runtime/read_barrier_config.h13
-rw-r--r--runtime/read_barrier_option.h1
-rw-r--r--runtime/reflection.cc2
-rw-r--r--runtime/reflection.h1
-rw-r--r--runtime/runtime.cc201
-rw-r--r--runtime/runtime.h30
-rw-r--r--runtime/runtime_callbacks.cc4
-rw-r--r--runtime/runtime_callbacks.h11
-rw-r--r--runtime/runtime_common.h2
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/stack.cc43
-rw-r--r--runtime/stack.h10
-rw-r--r--runtime/stack_map.h11
-rw-r--r--runtime/subtype_check_info.h2
-rw-r--r--runtime/thread-inl.h4
-rw-r--r--runtime/thread.cc214
-rw-r--r--runtime/thread.h84
-rw-r--r--runtime/thread_list.cc35
-rw-r--r--runtime/thread_pool.cc5
-rw-r--r--runtime/thread_pool.h3
-rw-r--r--runtime/trace.cc1
-rw-r--r--runtime/trace.h1
-rw-r--r--runtime/transaction.cc1
-rw-r--r--runtime/verifier/register_line-inl.h9
217 files changed, 10432 insertions, 4306 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 9e0e78ebdb..d42848f678 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -130,7 +130,7 @@ libart_cc_defaults {
"exec_utils.cc",
"fault_handler.cc",
"gc/allocation_record.cc",
- "gc/allocator/dlmalloc.cc",
+ "gc/allocator/art-dlmalloc.cc",
"gc/allocator/rosalloc.cc",
"gc/accounting/bitmap.cc",
"gc/accounting/card_table.cc",
@@ -142,6 +142,7 @@ libart_cc_defaults {
"gc/collector/garbage_collector.cc",
"gc/collector/immune_region.cc",
"gc/collector/immune_spaces.cc",
+ "gc/collector/mark_compact.cc",
"gc/collector/mark_sweep.cc",
"gc/collector/partial_mark_sweep.cc",
"gc/collector/semi_space.cc",
@@ -173,7 +174,6 @@ libart_cc_defaults {
"interpreter/interpreter.cc",
"interpreter/interpreter_cache.cc",
"interpreter/interpreter_common.cc",
- "interpreter/interpreter_intrinsics.cc",
"interpreter/interpreter_switch_impl0.cc",
"interpreter/interpreter_switch_impl1.cc",
"interpreter/interpreter_switch_impl2.cc",
@@ -209,6 +209,7 @@ libart_cc_defaults {
"mirror/method_handles_lookup.cc",
"mirror/method_type.cc",
"mirror/object.cc",
+ "mirror/stack_frame_info.cc",
"mirror/stack_trace_element.cc",
"mirror/string.cc",
"mirror/throwable.cc",
@@ -225,6 +226,7 @@ libart_cc_defaults {
"native/dalvik_system_ZygoteHooks.cc",
"native/java_lang_Class.cc",
"native/java_lang_Object.cc",
+ "native/java_lang_StackStreamFactory.cc",
"native/java_lang_String.cc",
"native/java_lang_StringFactory.cc",
"native/java_lang_System.cc",
@@ -254,6 +256,7 @@ libart_cc_defaults {
"oat.cc",
"oat_file.cc",
"oat_file_assistant.cc",
+ "oat_file_assistant_context.cc",
"oat_file_manager.cc",
"oat_quick_method_header.cc",
"object_lock.cc",
@@ -415,7 +418,6 @@ libart_cc_defaults {
],
static_libs: [
"libstatslog_art",
- "libtinyxml2",
],
generated_sources: [
"apex-info-list-tinyxml",
@@ -456,17 +458,20 @@ libart_cc_defaults {
header_libs: [
"art_cmdlineparser_headers",
"cpp-define-generator-definitions",
+ "dlmalloc",
"jni_platform_headers",
"libart_headers",
"libnativehelper_header_only",
],
- export_header_lib_headers: ["libart_headers"],
+ export_header_lib_headers: [
+ "dlmalloc",
+ "libart_headers",
+ ],
whole_static_libs: [
"libcpu_features",
],
shared_libs: [
"libartpalette",
- "libbacktrace",
"libbase", // For common macros.
"liblog",
"liblz4",
@@ -496,7 +501,6 @@ libart_static_cc_defaults {
name: "libart_static_base_defaults",
whole_static_libs: [
"libartpalette",
- "libbacktrace",
"libbase",
"liblog",
"liblz4",
@@ -679,6 +683,7 @@ art_cc_defaults {
],
shared_libs: [
"libbase",
+ "libziparchive",
],
static_libs: [
"libprocinfo",
@@ -848,7 +853,11 @@ art_cc_defaults {
"verifier/reg_type_test.cc",
],
shared_libs: [
- "libbacktrace",
+ "libunwindstack",
+ "libziparchive",
+ ],
+ static_libs: [
+ "libgmock",
],
header_libs: [
"art_cmdlineparser_headers", // For parsed_options_test.
@@ -877,6 +886,7 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_runtime_tests_defaults",
],
+ data: [":generate-boot-image"],
target: {
host: {
required: ["dex2oat"],
@@ -930,6 +940,7 @@ art_cc_test {
"art_standalone_gtest_defaults",
"art_runtime_compiler_tests_defaults",
],
+ data: [":generate-boot-image"],
shared_libs: [
"libart-compiler",
],
diff --git a/runtime/alloc_instrumentation.md b/runtime/alloc_instrumentation.md
index 513bbe3809..66e9a6a8b9 100644
--- a/runtime/alloc_instrumentation.md
+++ b/runtime/alloc_instrumentation.md
@@ -17,7 +17,7 @@ corresponding `UninstrumentQuickAlloc`... function.
- These in turn are called by `SetStatsEnabled()`, `SetAllocationListener()`, et al, which
require the mutator lock is not held.
-- With a started runtime, `SetEntrypointsInstrumented()` calls `ScopedSupendAll(`) before updating
+- With a started runtime, `SetEntrypointsInstrumented()` calls `ScopedSuspendAll(`) before updating
the function table.
Mutual exclusion in the dispatch table is thus ensured by the fact that it is only updated while
diff --git a/runtime/app_info.cc b/runtime/app_info.cc
index c72951eebc..2dbbbf6a90 100644
--- a/runtime/app_info.cc
+++ b/runtime/app_info.cc
@@ -93,6 +93,12 @@ void AppInfo::RegisterOdexStatus(const std::string& code_path,
<< "\nodex_status=" << odex_status;
}
+bool AppInfo::HasRegisteredAppInfo() {
+ MutexLock mu(Thread::Current(), update_mutex_);
+
+ return package_name_.has_value();
+}
+
void AppInfo::GetPrimaryApkOptimizationStatus(
std::string* out_compiler_filter,
std::string* out_compilation_reason) {
@@ -110,6 +116,13 @@ void AppInfo::GetPrimaryApkOptimizationStatus(
*out_compilation_reason = kUnknownValue;
}
+AppInfo::CodeType AppInfo::GetRegisteredCodeType(const std::string& code_path) {
+ MutexLock mu(Thread::Current(), update_mutex_);
+
+ const auto it = registered_code_locations_.find(code_path);
+ return it != registered_code_locations_.end() ? it->second.code_type : CodeType::kUnknown;
+}
+
std::ostream& operator<<(std::ostream& os, AppInfo& rhs) {
MutexLock mu(Thread::Current(), rhs.update_mutex_);
diff --git a/runtime/app_info.h b/runtime/app_info.h
index 68f2c586da..43e2ef320b 100644
--- a/runtime/app_info.h
+++ b/runtime/app_info.h
@@ -77,6 +77,13 @@ class AppInfo {
void GetPrimaryApkOptimizationStatus(std::string* out_compiler_filter,
std::string* out_compilation_reason);
+ // Whether we've received a call to RegisterAppInfo.
+ bool HasRegisteredAppInfo();
+
+ // The registered code type for a given code path. Note that this will
+ // be kUnknown until an explicit registration for that path has been made.
+ CodeType GetRegisteredCodeType(const std::string& code_path);
+
private:
// Encapsulates optimization information about a particular code location.
struct CodeLocationInfo {
diff --git a/runtime/app_info_test.cc b/runtime/app_info_test.cc
index 4a365dec96..51dd42f6fb 100644
--- a/runtime/app_info_test.cc
+++ b/runtime/app_info_test.cc
@@ -24,12 +24,17 @@ namespace art {
TEST(AppInfoTest, RegisterAppInfo) {
AppInfo app_info;
+ EXPECT_FALSE(app_info.HasRegisteredAppInfo());
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kUnknown);
+
app_info.RegisterAppInfo(
"package_name",
std::vector<std::string>({"code_location"}),
"",
"",
AppInfo::CodeType::kPrimaryApk);
+ EXPECT_TRUE(app_info.HasRegisteredAppInfo());
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kPrimaryApk);
std::string filter;
std::string reason;
@@ -48,11 +53,13 @@ TEST(AppInfoTest, RegisterAppInfoWithOdexStatus) {
"",
"",
AppInfo::CodeType::kPrimaryApk);
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kPrimaryApk);
app_info.RegisterOdexStatus(
"code_location",
"filter",
"reason",
"odex_status");
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kPrimaryApk);
std::string filter;
std::string reason;
@@ -69,17 +76,22 @@ TEST(AppInfoTest, RegisterAppInfoWithOdexStatusMultiplePrimary) {
"filter",
"reason",
"odex_status");
+ EXPECT_FALSE(app_info.HasRegisteredAppInfo());
app_info.RegisterOdexStatus(
"code_location2",
"filter2",
"reason2",
"odex_status");
+ EXPECT_FALSE(app_info.HasRegisteredAppInfo());
app_info.RegisterAppInfo(
"package_name",
std::vector<std::string>({"code_location"}),
"",
"",
AppInfo::CodeType::kPrimaryApk);
+ EXPECT_TRUE(app_info.HasRegisteredAppInfo());
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kPrimaryApk);
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location2"), AppInfo::CodeType::kUnknown);
std::string filter;
std::string reason;
@@ -110,7 +122,7 @@ TEST(AppInfoTest, RegisterAppInfoWithOdexStatusNoPrimary) {
"filter",
"reason",
"odex_status");
-
+ EXPECT_EQ(app_info.GetRegisteredCodeType("code_location"), AppInfo::CodeType::kSplitApk);
// The optimization status is unknown since we don't have primary apks.
app_info.GetPrimaryApkOptimizationStatus(&filter, &reason);
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 23d82bac38..68afc24091 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -315,10 +315,6 @@
DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r1
-.endm
-
.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
ldr ip, [rSELF, #THREAD_EXCEPTION_OFFSET] @ Get exception field.
cmp ip, #0
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
index aff055e611..a3d46c22d3 100644
--- a/runtime/arch/arm/asm_support_arm.h
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -25,6 +25,8 @@
#define FRAME_SIZE_SAVE_EVERYTHING 192
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_R0_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
// The offset from the art_quick_read_barrier_mark_introspection (used for field
// loads with 32-bit LDR) to the entrypoint for field loads with 16-bit LDR,
@@ -43,22 +45,22 @@
// The offset of the reference load LDR from the return address in LR for field loads.
#ifdef USE_HEAP_POISONING
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET -8
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET -4
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET (-8)
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET (-4)
#else
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET -4
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET -2
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_WIDE_OFFSET (-4)
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_NARROW_OFFSET (-2)
#endif
// The offset of the reference load LDR from the return address in LR for array loads.
#ifdef USE_HEAP_POISONING
-#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET -8
+#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET (-8)
#else
-#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET -4
+#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET (-4)
#endif
// The offset of the reference load LDR from the return address in LR for GC root loads.
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET -8
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET -6
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_WIDE_OFFSET (-8)
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_NARROW_OFFSET (-6)
// The offset of the MOV from the return address in LR for intrinsic CAS.
-#define BAKER_MARK_INTROSPECTION_INTRINSIC_CAS_MOV_OFFSET -8
+#define BAKER_MARK_INTROSPECTION_INTRINSIC_CAS_MOV_OFFSET (-8)
#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index b0b0064643..555babec78 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -91,7 +91,7 @@ void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
qpoints->SetReadBarrierMarkReg10(is_active ? art_quick_read_barrier_mark_reg10 : nullptr);
qpoints->SetReadBarrierMarkReg11(is_active ? art_quick_read_barrier_mark_reg11 : nullptr);
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
// For the alignment check, strip the Thumb mode bit.
DCHECK_ALIGNED(reinterpret_cast<intptr_t>(art_quick_read_barrier_mark_introspection) - 1u,
256u);
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index fc57df76d3..d91882c95d 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -99,7 +99,7 @@ ENTRY art_jni_dlsym_lookup_stub
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
ldr ip, [r0, #THREAD_TOP_QUICK_FRAME_OFFSET] // uintptr_t tagged_quick_frame
- bic ip, #1 // ArtMethod** sp
+ bic ip, #TAGGED_JNI_SP_MASK // ArtMethod** sp
ldr ip, [ip] // ArtMethod* method
ldr ip, [ip, #ART_METHOD_ACCESS_FLAGS_OFFSET] // uint32_t access_flags
tst ip, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
@@ -327,6 +327,11 @@ JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_read_barrier, artJniReadBarrier
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_start, artJniMethodStart, rSELF
/*
+ * Trampoline to `artJniMethodEntryHook()` that preserves all managed arguments.
+ */
+JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_entry_hook, artJniMethodEntryHook, rSELF
+
+ /*
* Trampoline to `artJniMonitoredMethodStart()` that preserves all managed arguments.
*/
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_monitored_method_start, artJniMonitoredMethodStart, rSELF
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d6f129be50..bd8149ec11 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -134,16 +134,50 @@
.cfi_adjust_cfa_offset -52
.endm
-.macro RETURN_IF_RESULT_IS_ZERO
- cbnz r0, 1f @ result non-zero branch over
- bx lr @ return
+.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ ldr r1, [rSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ cbnz r1, 1f
+ DEOPT_OR_RETURN r1 // Check if deopt is required
1:
+ DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO
- cbz r0, 1f @ result zero branch over
- bx lr @ return
-1:
+.macro DEOPT_OR_RETURN temp, is_ref = 0
+ ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ bx lr
+2:
+ SETUP_SAVE_EVERYTHING_FRAME \temp
+ mov r2, \is_ref // pass if result is a reference
+ mov r1, r0 // pass the result
+ mov r0, rSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+.endm
+
+.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 temp, is_ref
+ ldr \temp, [rSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ str r0, [sp, SAVE_EVERYTHING_FRAME_R0_OFFSET] // update result in the frame
+ mov r2, \is_ref // pass if result is a reference
+ mov r1, r0 // pass the result
+ mov r0, rSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ bx lr
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
@@ -183,12 +217,16 @@ END \c_name
.endm
.macro RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
- RETURN_IF_RESULT_IS_ZERO
+ cbnz r0, 1f @ result non-zero branch over
+ DEOPT_OR_RETURN r1
+1:
DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- RETURN_IF_RESULT_IS_NON_ZERO
+.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+ cbz r0, 1f @ result zero branch over
+ DEOPT_OR_RETURN r1, /*is_ref=*/1
+1:
DELIVER_PENDING_EXCEPTION
.endm
@@ -517,8 +555,7 @@ ENTRY art_quick_lock_object_no_inline
bl artLockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_lock_object_no_inline
/*
@@ -548,8 +585,7 @@ ENTRY art_quick_unlock_object_no_inline
bl artUnlockObjectFromCode @ (Object* obj, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_unlock_object_no_inline
/*
@@ -601,13 +637,33 @@ END art_quick_check_instance_of
.cfi_rel_offset \rReg, \offset
.endm
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER rDest, rObj, offset
+ // Helper macros for `art_quick_aput_obj`.
#ifdef USE_READ_BARRIER
+#ifdef USE_BAKER_READ_BARRIER
+.macro BAKER_RB_CHECK_GRAY_BIT_AND_LOAD rDest, rObj, offset, gray_slow_path_label
+ ldr ip, [\rObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tst ip, #LOCK_WORD_READ_BARRIER_STATE_MASK_SHIFTED
+ bne \gray_slow_path_label
+ // False dependency to avoid needing load/load fence.
+ add \rObj, \rObj, ip, lsr #32
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+.endm
+
+.macro BAKER_RB_LOAD_AND_MARK rDest, rObj, offset, mark_function
+ ldr \rDest, [\rObj, #\offset]
+ UNPOISON_HEAP_REF \rDest
+ str lr, [sp, #-8]! @ Save LR with correct stack alignment.
+ .cfi_rel_offset lr, 0
+ .cfi_adjust_cfa_offset 8
+ bl \mark_function
+ ldr lr, [sp], #8 @ Restore LR.
+ .cfi_restore lr
+ .cfi_adjust_cfa_offset -8
+.endm
+#else // USE_BAKER_READ_BARRIER
+ .extern artReadBarrierSlow
+.macro READ_BARRIER_SLOW rDest, rObj, offset
push {r0-r3, ip, lr} @ 6 words for saved registers (used in art_quick_aput_obj)
.cfi_adjust_cfa_offset 24
.cfi_rel_offset r0, 0
@@ -640,30 +696,36 @@ END art_quick_check_instance_of
pop {lr} @ restore lr
.cfi_adjust_cfa_offset -4
.cfi_restore lr
-#else
- ldr \rDest, [\rObj, #\offset]
- UNPOISON_HEAP_REF \rDest
-#endif // USE_READ_BARRIER
.endm
+#endif // USE_BAKER_READ_BARRIER
+#endif // USE_READ_BARRIER
-#ifdef USE_READ_BARRIER
- .extern artReadBarrierSlow
-#endif
.hidden art_quick_aput_obj
ENTRY art_quick_aput_obj
-#ifdef USE_READ_BARRIER
- @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from READ_BARRIER macro.
+#if defined(USE_READ_BARRIER) && !defined(USE_BAKER_READ_BARRIER)
+ @ The offset to .Ldo_aput_null is too large to use cbz due to expansion from `READ_BARRIER_SLOW`.
tst r2, r2
- beq .Ldo_aput_null
-#else
- cbz r2, .Ldo_aput_null
+ beq .Laput_obj_null
+ READ_BARRIER_SLOW r3, r0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER_SLOW r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
+ READ_BARRIER_SLOW r4, r2, MIRROR_OBJECT_CLASS_OFFSET
+#else // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+ cbz r2, .Laput_obj_null
+#ifdef USE_READ_BARRIER
+ cmp rMR, #0
+ bne .Laput_obj_gc_marking
#endif // USE_READ_BARRIER
- READ_BARRIER r3, r0, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER ip, r2, MIRROR_OBJECT_CLASS_OFFSET
- READ_BARRIER r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
- cmp r3, ip @ value's type == array's component type - trivial assignability
- bne .Lcheck_assignability
-.Ldo_aput:
+ ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF r3
+ // R4 is a scratch register in managed ARM ABI.
+ ldr r4, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF r4
+ ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
+ UNPOISON_HEAP_REF r3
+#endif // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+ cmp r3, r4 @ value's type == array's component type - trivial assignability
+ bne .Laput_obj_check_assignability
+.Laput_obj_store:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
@@ -671,26 +733,22 @@ ENTRY art_quick_aput_obj
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
-.Ldo_aput_null:
+
+.Laput_obj_null:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
str r2, [r3, r1, lsl #2]
blx lr
-.Lcheck_assignability:
+
+.Laput_obj_check_assignability:
push {r0-r2, lr} @ save arguments
.cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
.cfi_rel_offset lr, 12
- mov r1, ip
+ mov r1, r4
mov r0, r3
bl artIsAssignableFromCode
cbz r0, .Lthrow_array_store_exception
.cfi_remember_state
pop {r0-r2, lr}
- .cfi_restore r0
- .cfi_restore r1
- .cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
@@ -700,19 +758,52 @@ ENTRY art_quick_aput_obj
lsr r0, r0, #CARD_TABLE_CARD_SHIFT
strb r3, [r3, r0]
blx lr
+
.Lthrow_array_store_exception:
CFI_RESTORE_STATE_AND_DEF_CFA sp, 16
pop {r0-r2, lr}
- .cfi_restore r0
- .cfi_restore r1
- .cfi_restore r2
.cfi_restore lr
.cfi_adjust_cfa_offset -16
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ .cfi_remember_state
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r3
mov r1, r2
- mov r2, rSELF @ pass Thread::Current
+ mov r2, rSELF @ Pass Thread::Current.
bl artThrowArrayStoreException @ (Class*, Class*, Thread*)
- bkpt @ unreached
+ bkpt @ Unreachable.
+
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
+.Laput_obj_gc_marking:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r3, r0, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_array_class
+.Laput_obj_mark_array_class_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, .Laput_obj_mark_array_element
+.Laput_obj_mark_array_element_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ r4, r2, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_object_class
+.Laput_obj_mark_object_class_continue:
+
+ cmp r3, r4 @ value's type == array's component type - trivial assignability
+ // All registers are set up for correctly `.Laput_obj_check_assignability`.
+ bne .Laput_obj_check_assignability
+ b .Laput_obj_store
+
+.Laput_obj_mark_array_class:
+ BAKER_RB_LOAD_AND_MARK r3, r0, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_class_continue
+
+.Laput_obj_mark_array_element:
+ BAKER_RB_LOAD_AND_MARK \
+ r3, r3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_element_continue
+
+.Laput_obj_mark_object_class:
+ BAKER_RB_LOAD_AND_MARK r4, r2, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg04
+ b .Laput_obj_mark_object_class_continue
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
@@ -782,11 +873,8 @@ ENTRY \name
mov r1, rSELF @ pass Thread::Current
bl \entrypoint @ (uint32_t index, Thread*)
cbz r0, 1f @ If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_R0
- REFRESH_MARKING_REGISTER
- bx lr
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ str r0, [sp, #136] @ store result in the frame
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_R0 r1, /* is_ref= */ 1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END \name
@@ -809,12 +897,12 @@ ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromC
/*
* Called by managed code to resolve a static field and load a non-wide value.
*/
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
/*
* Called by managed code to resolve a static field and load a 64-bit primitive value.
*/
@@ -827,7 +915,7 @@ ENTRY art_quick_get64_static
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
- bx lr @ return on success
+ DEOPT_OR_RETURN r2 @ check if deopt is required or return
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_static
@@ -835,12 +923,12 @@ END art_quick_get64_static
/*
* Called by managed code to resolve an instance field and load a non-wide value.
*/
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
/*
* Called by managed code to resolve an instance field and load a 64-bit primitive value.
*/
@@ -853,7 +941,7 @@ ENTRY art_quick_get64_instance
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
cbnz r2, 1f @ success if no exception pending
- bx lr @ return on success
+ DEOPT_OR_RETURN r2 @ check if deopt is required or return
1:
DELIVER_PENDING_EXCEPTION
END art_quick_get64_instance
@@ -888,8 +976,7 @@ ENTRY art_quick_set64_instance
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_set64_instance
.extern artSet64StaticFromCompiledCode
@@ -903,8 +990,7 @@ ENTRY art_quick_set64_static
.cfi_adjust_cfa_offset -16
RESTORE_SAVE_REFS_ONLY_FRAME @ TODO: we can clearly save an add here
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_ZERO
- DELIVER_PENDING_EXCEPTION
+ RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
END art_quick_set64_static
// Generate the allocation entrypoints for each allocator.
@@ -1037,7 +1123,7 @@ ENTRY \c_name
bl \cxx_name @ (mirror::Class* cls, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \c_name
.endm
@@ -1096,10 +1182,6 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
str r1, [rSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF r0
str r0, [r2, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
- // Fence. This is "ish" not "ishst" so
- // that the code after this allocation
- // site will see the right values in
- // the fields of the class.
mov r0, r2
// No barrier. The class is already observably initialized (otherwise the fast
// path size check above would fail) and new-instance allocations are protected
@@ -1122,7 +1204,7 @@ ENTRY \name
bl \entrypoint // (mirror::Class* klass, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1162,10 +1244,6 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
POISON_HEAP_REF r0
str r0, [r3, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
str r1, [r3, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
- // Fence. This is "ish" not "ishst" so
- // that the code after this allocation
- // site will see the right values in
- // the fields of the class.
mov r0, r3
// new-array is special. The class is loaded and immediately goes to the Initialized state
// before it is published. Therefore the only fence needed is for the publication of the object.
@@ -1195,7 +1273,7 @@ ENTRY \name
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1448,16 +1526,26 @@ ENTRY art_quick_generic_jni_trampoline
.cfi_remember_state
.cfi_def_cfa_register sp
+ // store into fpr, for when it's a fpr return...
+ vmov d0, r0, r1
+
+ LOAD_RUNTIME_INSTANCE r2
+ ldr r2, [r2, #INSTRUMENTATION_STUBS_INSTALLED_OFFSET_FROM_RUNTIME_INSTANCE]
+ cbnz r2, .Lcall_method_exit_hook
+.Lcall_method_exit_hook_done:
+
// Tear down the callee-save frame. Skip arg registers.
add sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY
.cfi_adjust_cfa_offset -(FRAME_SIZE_SAVE_REFS_AND_ARGS-FRAME_SIZE_SAVE_REFS_ONLY)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- // store into fpr, for when it's a fpr return...
- vmov d0, r0, r1
bx lr // ret
+.Lcall_method_exit_hook:
+ bl art_quick_method_exit_hook
+ b .Lcall_method_exit_hook_done
+
// Undo the unwinding information from above since it doesn't apply below.
CFI_RESTORE_STATE_AND_DEF_CFA r10, FRAME_SIZE_SAVE_REFS_AND_ARGS
.Lexception_in_native:
@@ -1883,7 +1971,7 @@ ENTRY art_quick_string_builder_append
bl artStringBuilderAppend @ (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END art_quick_string_builder_append
/*
@@ -1895,13 +1983,9 @@ END art_quick_string_builder_append
*
* If `reg` is different from `r0`, the generated function follows a
* non-standard runtime calling convention:
- * - register `reg` is used to pass the (sole) argument of this
- * function (instead of R0);
- * - register `reg` is used to return the result of this function
- * (instead of R0);
- * - R0 is treated like a normal (non-argument) caller-save register;
- * - everything else is the same as in the standard runtime calling
- * convention (e.g. standard callee-save registers are preserved).
+ * - register `reg` (which may be different from R0) is used to pass the (sole) argument,
+ * - register `reg` (which may be different from R0) is used to return the result,
+ * - all other registers are callee-save (the values they hold are preserved).
*/
.macro READ_BARRIER_MARK_REG name, reg
ENTRY \name
@@ -2496,14 +2580,8 @@ ENTRY art_quick_method_exit_hook
mov r0, rSELF @ pass Thread::Current
blx artMethodExitHook @ (Thread*, ArtMethod*, gpr_res*, fpr_res*)
- .cfi_remember_state
- cbnz r0, .Ldo_deliver_instrumentation_exception_exit @ Deliver exception
-
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
blx lr
-.Ldo_deliver_instrumentation_exception_exit:
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
- DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_quick_method_exit_hook
diff --git a/runtime/arch/arm/quick_entrypoints_cc_arm.cc b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
index 987b4590b7..d7fef6f72e 100644
--- a/runtime/arch/arm/quick_entrypoints_cc_arm.cc
+++ b/runtime/arch/arm/quick_entrypoints_cc_arm.cc
@@ -25,6 +25,7 @@ extern "C" void art_quick_invoke_stub_internal(ArtMethod*, uint32_t*, uint32_t,
uint32_t*);
template <bool kIsStatic>
+NO_STACK_PROTECTOR
static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
// Note: We do not follow aapcs ABI in quick code for both softfp and hardfp.
@@ -96,6 +97,7 @@ static void quick_invoke_reg_setup(ArtMethod* method, uint32_t* args, uint32_t a
// Called by art::ArtMethod::Invoke to do entry into a non-static method.
// TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_t args_size,
Thread* self, JValue* result, const char* shorty) {
quick_invoke_reg_setup<false>(method, args, args_size, self, result, shorty);
@@ -103,6 +105,7 @@ extern "C" void art_quick_invoke_stub(ArtMethod* method, uint32_t* args, uint32_
// Called by art::ArtMethod::Invoke to do entry into a static method.
// TODO: migrate into an assembly implementation as with ARM64.
+NO_STACK_PROTECTOR
extern "C" void art_quick_invoke_static_stub(ArtMethod* method, uint32_t* args,
uint32_t args_size, Thread* self, JValue* result,
const char* shorty) {
diff --git a/runtime/arch/arm64/asm_support_arm64.h b/runtime/arch/arm64/asm_support_arm64.h
index 887ee0259c..983fe3a06f 100644
--- a/runtime/arch/arm64/asm_support_arm64.h
+++ b/runtime/arch/arm64/asm_support_arm64.h
@@ -19,6 +19,7 @@
#include "asm_support.h"
+// TODO(mythria): Change these to use constants from callee_save_frame_arm64.h
#define CALLEE_SAVES_SIZE (12 * 8 + 8 * 8)
// +8 for the ArtMethod, +8 for alignment.
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES (CALLEE_SAVES_SIZE + 16)
@@ -27,6 +28,8 @@
#define FRAME_SIZE_SAVE_EVERYTHING 512
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_X0_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
// The offset from art_quick_read_barrier_mark_introspection to the array switch cases,
// i.e. art_quick_read_barrier_mark_introspection_arrays.
@@ -37,17 +40,17 @@
// The offset of the reference load LDR from the return address in LR for field loads.
#ifdef USE_HEAP_POISONING
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET -8
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET (-8)
#else
-#define BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET -4
+#define BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET (-4)
#endif
// The offset of the reference load LDR from the return address in LR for array loads.
#ifdef USE_HEAP_POISONING
-#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET -8
+#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET (-8)
#else
-#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET -4
+#define BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET (-4)
#endif
// The offset of the reference load LDR from the return address in LR for GC root loads.
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET -8
+#define BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET (-8)
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_H_
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index ad082aed1f..93400d9c7c 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -171,6 +171,18 @@ Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromVariant(
has_sve));
}
+Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::IntersectWithHwcap() const {
+ Arm64FeaturesUniquePtr hwcaps = Arm64InstructionSetFeatures::FromHwcap();
+ return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(
+ fix_cortex_a53_835769_,
+ fix_cortex_a53_843419_,
+ has_crc_ && hwcaps->has_crc_,
+ has_lse_ && hwcaps->has_lse_,
+ has_fp16_ && hwcaps->has_fp16_,
+ has_dotprod_ && hwcaps->has_dotprod_,
+ has_sve_ && hwcaps->has_sve_));
+}
+
Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
bool is_a53 = (bitmap & kA53Bitfield) != 0;
bool has_crc = (bitmap & kCRCBitField) != 0;
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index eb98c01633..8f0013ac86 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -53,6 +53,10 @@ class Arm64InstructionSetFeatures final : public InstructionSetFeatures {
// Use external cpu_features library.
static Arm64FeaturesUniquePtr FromCpuFeatures();
+ // Return a new set of instruction set features, intersecting `this` features
+ // with hardware capabilities.
+ Arm64FeaturesUniquePtr IntersectWithHwcap() const;
+
bool Equals(const InstructionSetFeatures* other) const override;
// Note that newer CPUs do not have a53 erratum 835769 and 843419,
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index 463767c846..9612a7b54f 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -103,7 +103,7 @@ ENTRY art_jni_dlsym_lookup_stub
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
ldr xIP0, [x0, #THREAD_TOP_QUICK_FRAME_OFFSET] // uintptr_t tagged_quick_frame
- bic xIP0, xIP0, #1 // ArtMethod** sp
+ bic xIP0, xIP0, #TAGGED_JNI_SP_MASK // ArtMethod** sp
ldr xIP0, [xIP0] // ArtMethod* method
ldr xIP0, [xIP0, #ART_METHOD_ACCESS_FLAGS_OFFSET] // uint32_t access_flags
mov xIP1, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
@@ -366,6 +366,11 @@ JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_read_barrier, artJniReadBarrier
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_start, artJniMethodStart, xSELF
/*
+ * Trampoline to `artJniMethodEntryHook` that preserves all managed arguments.
+ */
+JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_entry_hook, artJniMethodEntryHook, xSELF
+
+ /*
* Trampoline to `artJniMonitoredMethodStart()` that preserves all managed arguments.
*/
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_monitored_method_start, artJniMonitoredMethodStart, xSELF
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index d8c91e11b9..a35206fef8 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -194,26 +194,56 @@
RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
.endm
-.macro RETURN_IF_RESULT_IS_ZERO
- cbnz x0, 1f // result non-zero branch over
- ret // return
-1:
+.macro RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ ldr x1, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field.
+ cbnz x1, 1f
+ DEOPT_OR_RETURN x1 // Check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO
- cbz x0, 1f // result zero branch over
- ret // return
-1:
+.macro DEOPT_OR_RETURN temp, is_ref = 0
+ ldr \temp, [xSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME
+ mov x2, \is_ref // pass if result is a reference
+ mov x1, x0 // pass the result
+ mov x0, xSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
-// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
- RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
+.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_X0 temp, is_ref
+ ldr \temp, [xSELF, #THREAD_DEOPT_CHECK_REQUIRED_OFFSET]
+ cbnz \temp, 2f
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ str x0, [sp, #SAVE_EVERYTHING_FRAME_X0_OFFSET] // update result in the frame
+ mov x2, \is_ref // pass if result is a reference
+ mov x1, x0 // pass the result
+ mov x0, xSELF // Thread::Current
+ bl artDeoptimizeIfNeeded
+ .cfi_remember_state
+ RESTORE_SAVE_EVERYTHING_FRAME
+ REFRESH_MARKING_REGISTER
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
.endm
+
.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER
cbnz w0, 1f // result non-zero branch over
- ret // return
+ DEOPT_OR_RETURN x1
1:
DELIVER_PENDING_EXCEPTION
.endm
@@ -999,25 +1029,30 @@ END art_quick_check_instance_of
.cfi_restore \xReg2
.endm
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * xDest, wDest and xObj are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle
- * name mismatch between instructions. This macro uses the lower 32b of register when possible.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number
+ // Helper macros for `art_quick_aput_obj`.
#ifdef USE_READ_BARRIER
-# ifdef USE_BAKER_READ_BARRIER
- ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
- tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number
+#ifdef USE_BAKER_READ_BARRIER
+.macro BAKER_RB_CHECK_GRAY_BIT_AND_LOAD wDest, xObj, offset, gray_slow_path_label
+ ldr wIP0, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
+ tbnz wIP0, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, \gray_slow_path_label
// False dependency to avoid needing load/load fence.
- add \xObj, \xObj, \xTemp, lsr #32
- ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
+ add \xObj, \xObj, xIP0, lsr #32
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b; zero-extends to xN.
+ UNPOISON_HEAP_REF \wDest
+.endm
+
+.macro BAKER_RB_LOAD_AND_MARK wDest, xObj, offset, mark_function
+ ldr \wDest, [\xObj, #\offset] // Heap reference = 32b; zero-extends to xN.
UNPOISON_HEAP_REF \wDest
- b .Lrb_exit\number
-# endif // USE_BAKER_READ_BARRIER
-.Lrb_slowpath\number:
+ // Save LR in a register preserved by `art_quick_read_barrier_mark_regNN`
+ // and unused by the `art_quick_aput_obj`.
+ mov x5, lr
+ bl \mark_function
+ mov lr, x5 // Restore LR.
+.endm
+#else // USE_BAKER_READ_BARRIER
+ .extern artReadBarrierSlow
+.macro READ_BARRIER_SLOW xDest, wDest, xObj, offset
// Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned.
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48
SAVE_TWO_REGS x2, x3, 16
@@ -1042,41 +1077,44 @@ END art_quick_check_instance_of
POP_REG_NE x4, 32, \xDest
RESTORE_REG xLR, 40
DECREASE_FRAME 48
-.Lrb_exit\number:
-#else
- ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest.
- UNPOISON_HEAP_REF \wDest
-#endif // USE_READ_BARRIER
.endm
+#endif // USE_BAKER_READ_BARRIER
+#endif // USE_READ_BARRIER
-#ifdef USE_READ_BARRIER
- .extern artReadBarrierSlow
-#endif
ENTRY art_quick_aput_obj
- cbz x2, .Ldo_aput_null
- READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b
- // This also zero-extends to x3
- READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b
- // This also zero-extends to x3
- READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b
- // This also zero-extends to x4
+ cbz x2, .Laput_obj_null
+#if defined(USE_READ_BARRIER) && !defined(USE_BAKER_READ_BARRIER)
+ READ_BARRIER_SLOW x3, w3, x0, MIRROR_OBJECT_CLASS_OFFSET
+ READ_BARRIER_SLOW x3, w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
+ READ_BARRIER_SLOW x4, w4, x2, MIRROR_OBJECT_CLASS_OFFSET
+#else // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
+#ifdef USE_READ_BARRIER
+ cbnz wMR, .Laput_obj_gc_marking
+#endif // USE_READ_BARRIER
+ ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b; zero-extends to x3.
+ UNPOISON_HEAP_REF w3
+ ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b; zero-extends to x3.
+ UNPOISON_HEAP_REF w3
+ ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b; zero-extends to x4.
+ UNPOISON_HEAP_REF w4
+#endif // !defined(USE_READ_BARRIER) || defined(USE_BAKER_READ_BARRIER)
cmp w3, w4 // value's type == array's component type - trivial assignability
- bne .Lcheck_assignability
-.Ldo_aput:
+ bne .Laput_obj_check_assignability
+.Laput_obj_store:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
POISON_HEAP_REF w2
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #CARD_TABLE_CARD_SHIFT
strb w3, [x3, x0]
ret
-.Ldo_aput_null:
+
+.Laput_obj_null:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ret
-.Lcheck_assignability:
+
+.Laput_obj_check_assignability:
// Store arguments and link register
SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32
SAVE_TWO_REGS x2, xLR, 16
@@ -1087,7 +1125,7 @@ ENTRY art_quick_aput_obj
bl artIsAssignableFromCode
// Check for exception
- cbz x0, .Lthrow_array_store_exception
+ cbz x0, .Laput_obj_throw_array_store_exception
// Restore
.cfi_remember_state
@@ -1095,23 +1133,56 @@ ENTRY art_quick_aput_obj
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
- // "Compress" = do nothing
POISON_HEAP_REF w2
- str w2, [x3, x1, lsl #2] // Heap reference = 32b
+ str w2, [x3, x1, lsl #2] // Heap reference = 32b.
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #CARD_TABLE_CARD_SHIFT
strb w3, [x3, x0]
ret
CFI_RESTORE_STATE_AND_DEF_CFA sp, 32
-.Lthrow_array_store_exception:
+
+.Laput_obj_throw_array_store_exception:
RESTORE_TWO_REGS x2, xLR, 16
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ .cfi_remember_state
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
mov x1, x2 // Pass value.
mov x2, xSELF // Pass Thread::Current.
bl artThrowArrayStoreException // (Object*, Object*, Thread*).
- brk 0 // Unreached.
+ brk 0 // Unreachable.
+
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, 0
+.Laput_obj_gc_marking:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w3, x0, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_array_class
+.Laput_obj_mark_array_class_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, .Laput_obj_mark_array_element
+.Laput_obj_mark_array_element_continue:
+ BAKER_RB_CHECK_GRAY_BIT_AND_LOAD \
+ w4, x2, MIRROR_OBJECT_CLASS_OFFSET, .Laput_obj_mark_object_class
+.Laput_obj_mark_object_class_continue:
+ cmp w3, w4 // value's type == array's component type - trivial assignability
+ bne .Laput_obj_check_assignability
+ b .Laput_obj_store
+
+.Laput_obj_mark_array_class:
+ BAKER_RB_LOAD_AND_MARK w3, x0, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_class_continue
+
+.Laput_obj_mark_array_element:
+ BAKER_RB_LOAD_AND_MARK \
+ w3, x3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, art_quick_read_barrier_mark_reg03
+ b .Laput_obj_mark_array_element_continue
+
+.Laput_obj_mark_object_class:
+ BAKER_RB_LOAD_AND_MARK w4, x2, MIRROR_OBJECT_CLASS_OFFSET, art_quick_read_barrier_mark_reg04
+ b .Laput_obj_mark_object_class_continue
+#endif // defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
END art_quick_aput_obj
// Macro to facilitate adding new allocation entrypoints.
@@ -1214,11 +1285,7 @@ ENTRY \name
mov x1, xSELF // pass Thread::Current
bl \entrypoint // (int32_t index, Thread* self)
cbz w0, 1f // If result is null, deliver the OOME.
- .cfi_remember_state
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
- REFRESH_MARKING_REGISTER
- ret // return
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_X0 x1, /* is_ref= */ 1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END \name
@@ -1228,13 +1295,14 @@ END \name
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
.endm
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
- cbz w0, 1f // result zero branch over
- ret // return
+.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+ cbz w0, 1f // result zero branch over
+ DEOPT_OR_RETURN x1, /*is_ref=*/1 // check for deopt or return
1:
DELIVER_PENDING_EXCEPTION
.endm
+
/*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
@@ -1256,21 +1324,21 @@ ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromC
// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
@@ -1410,7 +1478,7 @@ ENTRY \c_name
bl \cxx_name
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \c_name
.endm
@@ -1439,10 +1507,6 @@ ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, art
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
POISON_HEAP_REF w0
str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
- // Fence. This is "ish" not "ishst" so
- // that the code after this allocation
- // site will see the right values in
- // the fields of the class.
mov x0, x4
// No barrier. The class is already observably initialized (otherwise the fast
// path size check above would fail) and new-instance allocations are protected
@@ -1465,7 +1529,7 @@ ENTRY \name
bl \entrypoint // (mirror::Class*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1510,7 +1574,6 @@ GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, art
POISON_HEAP_REF \wClass
str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer.
str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length.
- // Fence.
// new-array is special. The class is loaded and immediately goes to the Initialized state
// before it is published. Therefore the only fence needed is for the publication of the object.
// See ClassLinker::CreateArrayClass() for more details.
@@ -1539,7 +1602,7 @@ ENTRY \name
bl \entrypoint
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END \name
.endm
@@ -1837,6 +1900,11 @@ ENTRY art_quick_generic_jni_trampoline
.cfi_remember_state
.cfi_def_cfa_register sp
+ LOAD_RUNTIME_INSTANCE x1
+ ldrb w1, [x1, #INSTRUMENTATION_STUBS_INSTALLED_OFFSET_FROM_RUNTIME_INSTANCE]
+ cbnz w1, .Lcall_method_exit_hook
+.Lcall_method_exit_hook_done:
+
// Tear down the callee-save frame.
RESTORE_SAVE_REFS_AND_ARGS_FRAME
REFRESH_MARKING_REGISTER
@@ -1845,6 +1913,11 @@ ENTRY art_quick_generic_jni_trampoline
fmov d0, x0
ret
+.Lcall_method_exit_hook:
+ fmov d0, x0
+ bl art_quick_method_exit_hook
+ b .Lcall_method_exit_hook_done
+
// Undo the unwinding information from above since it doesn't apply below.
CFI_RESTORE_STATE_AND_DEF_CFA x28, FRAME_SIZE_SAVE_REFS_AND_ARGS
.Lexception_in_native:
@@ -2102,7 +2175,7 @@ ENTRY art_quick_string_builder_append
bl artStringBuilderAppend // (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME
REFRESH_MARKING_REGISTER
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
END art_quick_string_builder_append
/*
@@ -2111,15 +2184,10 @@ END art_quick_string_builder_append
* `wreg` (corresponding to X register `xreg`), saving and restoring
* all caller-save registers.
*
- * If `wreg` is different from `w0`, the generated function follows a
- * non-standard runtime calling convention:
- * - register `wreg` is used to pass the (sole) argument of this
- * function (instead of W0);
- * - register `wreg` is used to return the result of this function
- * (instead of W0);
- * - W0 is treated like a normal (non-argument) caller-save register;
- * - everything else is the same as in the standard runtime calling
- * convention (e.g. standard callee-save registers are preserved).
+ * The generated function follows a non-standard runtime calling convention:
+ * - register `reg` (which may be different from W0) is used to pass the (sole) argument,
+ * - register `reg` (which may be different from W0) is used to return the result,
+ * - all other registers are callee-save (the values they hold are preserved).
*/
.macro READ_BARRIER_MARK_REG name, wreg, xreg
ENTRY \name
@@ -2615,15 +2683,9 @@ ENTRY art_quick_method_exit_hook
mov x0, xSELF // Thread::Current
bl artMethodExitHook // (Thread*, ArtMethod*, gpr_res*, fpr_res*)
- .cfi_remember_state
- cbnz x0, .Ldo_deliver_instrumentation_exception_exit // Handle exception
-
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
REFRESH_MARKING_REGISTER
ret
-.Ldo_deliver_instrumentation_exception_exit:
- CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
- DELIVER_PENDING_EXCEPTION_FRAME_READY
END art_quick_method_exit_hook
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index ec1e340245..7a1e6b05ad 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -53,6 +53,33 @@ std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVarian
UNREACHABLE();
}
+std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromVariantAndHwcap(
+ InstructionSet isa, const std::string& variant, std::string* error_msg) {
+ auto variant_features = FromVariant(isa, variant, error_msg);
+ if (variant_features == nullptr) {
+ return nullptr;
+ }
+ // Pixel3a is wrongly reporting itself as cortex-a75, so validate the features
+ // with hwcaps.
+ // Note that when cross-compiling on device (using dex2oat32 for compiling
+ // arm64), the hwcaps will report that no feature is supported. This is
+ // currently our best approach to be safe/correct. Maybe using the
+ // cpu_features library could fix this issue.
+ if (isa == InstructionSet::kArm64) {
+ auto new_features = down_cast<const Arm64InstructionSetFeatures*>(variant_features.get())
+ ->IntersectWithHwcap();
+ if (!variant_features->Equals(new_features.get())) {
+ LOG(WARNING) << "Mismatch between instruction set variant of device ("
+ << *variant_features
+ << ") and features returned by the hardware (" << *new_features << ")";
+ }
+ return new_features;
+ } else {
+ // TODO: Implement this validation on all architectures.
+ return variant_features;
+ }
+}
+
std::unique_ptr<const InstructionSetFeatures> InstructionSetFeatures::FromBitmap(InstructionSet isa,
uint32_t bitmap) {
std::unique_ptr<const InstructionSetFeatures> result;
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index b80d36f153..cee8c5d42f 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -39,6 +39,12 @@ class InstructionSetFeatures {
const std::string& variant,
std::string* error_msg);
+ // Process a CPU variant string for the given ISA and make sure the features advertised
+ // are supported by the hardware. This is needed for Pixel3a which wrongly
+ // reports itself as cortex-a75.
+ static std::unique_ptr<const InstructionSetFeatures> FromVariantAndHwcap(
+ InstructionSet isa, const std::string& variant, std::string* error_msg);
+
// Parse a bitmap for the given isa and create an InstructionSetFeatures.
static std::unique_ptr<const InstructionSetFeatures> FromBitmap(InstructionSet isa,
uint32_t bitmap);
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index 32888edf7b..5d4b24bc6b 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -16,27 +16,27 @@
.macro GENERATE_ALLOC_ENTRYPOINTS c_suffix, cxx_suffix
// Called by managed code to allocate an object of a resolved class.
-ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_resolved\c_suffix, artAllocObjectFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an object of an initialized class.
-ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFromCodeInitialized\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an object when the caller doesn't know whether it has access
// to the created type.
-ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks\c_suffix, artAllocObjectFromCodeWithChecks\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string if it could not be removed by any optimizations
-ONE_ARG_DOWNCALL art_quick_alloc_string_object\c_suffix, artAllocStringObject\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_string_object\c_suffix, artAllocStringObject\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate an array of a resolve class.
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from bytes
-FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes\c_suffix, artAllocStringFromBytesFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from chars
-THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars\c_suffix, artAllocStringFromCharsFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
// Called by managed code to allocate a string from string
-ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ONE_ARG_DOWNCALL art_quick_alloc_string_from_string\c_suffix, artAllocStringFromStringFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
+TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
.endm
.macro GENERATE_ALL_ALLOC_ENTRYPOINTS
@@ -58,29 +58,29 @@ GENERATE_ALLOC_ENTRYPOINTS _region_tlab_instrumented, RegionTLABInstrumented
// GENERATE_ALL_ALLOC_ENTRYPOINTS for selectively implementing allocation fast paths in
// hand-written assembly.
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_resolved ## c_suffix, artAllocObjectFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_initialized ## c_suffix, artAllocObjectFromCodeInitialized ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_object_with_checks ## c_suffix, artAllocObjectFromCodeWithChecks ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_object ## c_suffix, artAllocStringObject ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_string_object ## c_suffix, artAllocStringObject ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(c_suffix, cxx_suffix) \
- FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ FOUR_ARG_DOWNCALL art_quick_alloc_string_from_bytes ## c_suffix, artAllocStringFromBytesFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(c_suffix, cxx_suffix) \
- THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ THREE_ARG_DOWNCALL art_quick_alloc_string_from_chars ## c_suffix, artAllocStringFromCharsFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(c_suffix, cxx_suffix) \
- ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ ONE_ARG_DOWNCALL art_quick_alloc_string_from_string ## c_suffix, artAllocStringFromStringFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved8 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved16 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved32 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(c_suffix, cxx_suffix) \
- TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+ TWO_ARG_DOWNCALL art_quick_alloc_array_resolved64 ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER
.macro GENERATE_ALLOC_ENTRYPOINTS_FOR_REGION_TLAB_ALLOCATOR
// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
diff --git a/runtime/arch/x86/asm_support_x86.h b/runtime/arch/x86/asm_support_x86.h
index 737d736f01..f6889334df 100644
--- a/runtime/arch/x86/asm_support_x86.h
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_SAVE_EVERYTHING (48 + 64)
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_EAX_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index d82750973d..c7cf856e60 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -98,7 +98,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
// for @FastNative or @CriticalNative.
movl (%esp), %eax // Thread* self
movl THREAD_TOP_QUICK_FRAME_OFFSET(%eax), %eax // uintptr_t tagged_quick_frame
- andl LITERAL(0xfffffffe), %eax // ArtMethod** sp
+ andl LITERAL(TAGGED_JNI_SP_MASK_TOGGLED32), %eax // ArtMethod** sp
movl (%eax), %eax // ArtMethod* method
testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
ART_METHOD_ACCESS_FLAGS_OFFSET(%eax)
@@ -286,6 +286,12 @@ JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_read_barrier, artJniReadBarrier, eax
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_start, artJniMethodStart, fs:THREAD_SELF_OFFSET
/*
+ * Trampoline to `artJniMethodEntryHook` that preserves all managed arguments.
+ */
+JNI_SAVE_MANAGED_ARGS_TRAMPOLINE \
+ art_jni_method_entry_hook, artJniMethodEntryHook, fs:THREAD_SELF_OFFSET
+
+ /*
* Trampoline to `artJniMonitoredMethodStart()` that preserves all managed arguments.
*/
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE \
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 7f1311c01e..bc61be58d2 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -794,12 +794,9 @@ MACRO3(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name, runtime_method_offset
call CALLVAR(cxx_name) // cxx_name(arg1, Thread*)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- testl %eax, %eax // If result is null, deliver the OOME.
+ testl %eax, %eax // If result is null deliver pending exception
jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX // restore frame up to return address
- ret // return
- CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX ebx, /* is_ref= */1 // Check for deopt
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION VAR(c_name)
@@ -809,18 +806,72 @@ MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT, c_name, cxx_name)
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \c_name, \cxx_name, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
- testl %eax, %eax // eax == 0 ?
- jz 1f // if eax == 0 goto 1
- ret // return
-1: // deliver exception on current thread
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER)
+ testl %eax, %eax // eax == 0 ?
+ jz 1f // if eax == 0 goto 1
+ DEOPT_OR_RETURN ebx, /*is_ref=*/1 // check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO0(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION)
+ cmpl MACRO_LITERAL(0),%fs:THREAD_EXCEPTION_OFFSET // exception field == 0 ?
+ jne 1f // if exception field != 0 goto 1
+ DEOPT_OR_RETURN ebx // check if deopt is required
+1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
+MACRO2(DEOPT_OR_RETURN, temp, is_ref = 0)
+ cmpl LITERAL(0), %fs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME \temp
+ subl MACRO_LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl MACRO_LITERAL(\is_ref) // is_ref
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH_ARG eax // result
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artDeoptimizeIfNeeded)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+MACRO2(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_EAX, temp, is_ref = 0)
+ cmpl LITERAL(0), %fs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ movl %eax, SAVE_EVERYTHING_FRAME_EAX_OFFSET(%esp) // update eax in the frame
+ subl MACRO_LITERAL(4), %esp // alignment padding
+ CFI_ADJUST_CFA_OFFSET(4)
+ pushl MACRO_LITERAL(\is_ref) // is_ref
+ CFI_ADJUST_CFA_OFFSET(4)
+ PUSH_ARG eax // result
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current
+ CFI_ADJUST_CFA_OFFSET(4)
+ call SYMBOL(artDeoptimizeIfNeeded)
+ addl LITERAL(16), %esp // pop arguments
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
- ret // return
+ DEOPT_OR_RETURN ebx // check if deopt is needed
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -927,7 +978,7 @@ MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -974,7 +1025,7 @@ MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_SLOW_PATH, cxx_name)
addl LITERAL(16), %esp
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_MACRO
MACRO2(ART_QUICK_ALLOC_OBJECT_TLAB, c_name, cxx_name)
@@ -1107,7 +1158,7 @@ MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_entrypoint)
END_MACRO
@@ -1254,126 +1305,102 @@ MACRO2(POP_REG_NE, reg, exclude_reg)
.endif
END_MACRO
- /*
- * Macro to insert read barrier, only used in art_quick_aput_obj.
- * obj_reg and dest_reg are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET.
- * pop_eax is a boolean flag, indicating if eax is popped after the call.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- */
-MACRO4(READ_BARRIER, obj_reg, offset, dest_reg, pop_eax)
-#ifdef USE_READ_BARRIER
- PUSH eax // save registers used in art_quick_aput_obj
- PUSH ebx
- PUSH edx
- PUSH ecx
- // Outgoing argument set up
- pushl MACRO_LITERAL((RAW_VAR(offset))) // pass offset, double parentheses are necessary
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH RAW_VAR(obj_reg) // pass obj_reg
- PUSH eax // pass ref, just pass eax for now since parameter ref is unused
- call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
- // No need to unpoison return value in eax, artReadBarrierSlow() would do the unpoisoning.
- .ifnc RAW_VAR(dest_reg), eax
- movl %eax, REG_VAR(dest_reg) // save loaded ref in dest_reg
- .endif
- addl MACRO_LITERAL(12), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-12)
- POP_REG_NE ecx, RAW_VAR(dest_reg) // Restore args except dest_reg
- POP_REG_NE edx, RAW_VAR(dest_reg)
- POP_REG_NE ebx, RAW_VAR(dest_reg)
- .ifc RAW_VAR(pop_eax), true
- POP_REG_NE eax, RAW_VAR(dest_reg)
- .endif
-#else
- movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg)
- UNPOISON_HEAP_REF RAW_VAR(dest_reg)
-#endif // USE_READ_BARRIER
-END_MACRO
-
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
- jz .Ldo_aput_null
- READ_BARRIER eax, MIRROR_OBJECT_CLASS_OFFSET, ebx, true
- READ_BARRIER ebx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ebx, true
- // value's type == array's component type - trivial assignability
-#if defined(USE_READ_BARRIER)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, false
- cmpl %eax, %ebx
- POP eax // restore eax from the push in the beginning of READ_BARRIER macro
- // This asymmetric push/pop saves a push of eax and maintains stack alignment.
-#elif defined(USE_HEAP_POISONING)
- PUSH eax // save eax
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
- UNPOISON_HEAP_REF eax
- cmpl %eax, %ebx
- POP eax // restore eax
-#else
- cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
-#endif
- jne .Lcheck_assignability
-.Ldo_aput:
+ jz .Laput_obj_null
+ movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
+ UNPOISON_HEAP_REF ebx
+#ifdef USE_READ_BARRIER
+ cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Laput_obj_gc_marking
+#endif // USE_READ_BARRIER
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx // Both poisoned if heap poisoning is enabled.
+ jne .Laput_obj_check_assignability
+.Laput_obj_store:
POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %eax
movb %dl, (%edx, %eax)
ret
-.Ldo_aput_null:
+
+.Laput_obj_null:
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
ret
-.Lcheck_assignability:
- PUSH eax // save arguments
- PUSH ecx
- PUSH edx
-#if defined(USE_READ_BARRIER)
- subl LITERAL(4), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(4)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, true
- subl LITERAL(4), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH eax // pass arg2 - type of the value to be stored
-#elif defined(USE_HEAP_POISONING)
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
+
+.Laput_obj_check_assignability:
+ UNPOISON_HEAP_REF ebx // Unpoison array component type if poisoning is enabled.
+ PUSH_ARG eax // Save `art_quick_aput_obj()` arguments.
+ PUSH_ARG ecx
+ PUSH_ARG edx
+ INCREASE_FRAME 8 // Alignment padding.
+ // Pass arg2 - type of the value to be stored.
+#if defined(USE_HEAP_POISONING)
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
UNPOISON_HEAP_REF eax
- PUSH eax // pass arg2 - type of the value to be stored
+ PUSH_ARG eax
#else
- subl LITERAL(8), %esp // alignment padding
- CFI_ADJUST_CFA_OFFSET(8)
- pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
+ pushl MIRROR_OBJECT_CLASS_OFFSET(%edx)
CFI_ADJUST_CFA_OFFSET(4)
#endif
- PUSH ebx // pass arg1 - component type of the array
+.Laput_obj_check_assignability_call:
+ PUSH_ARG ebx // Pass arg1 - component type of the array.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
- addl LITERAL(16), %esp // pop arguments
- CFI_ADJUST_CFA_OFFSET(-16)
+ DECREASE_FRAME 16 // Pop `artIsAssignableFromCode()` arguments
testl %eax, %eax
+ POP_ARG edx // Pop `art_quick_aput_obj()` arguments; flags unaffected.
+ POP_ARG ecx
+ POP_ARG eax
jz .Lthrow_array_store_exception
- POP edx
- POP ecx
- POP eax
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // Do the aput.
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %eax
movb %dl, (%edx, %eax)
ret
- CFI_ADJUST_CFA_OFFSET(12) // 3 POP after the jz for unwinding.
+
.Lthrow_array_store_exception:
- POP edx
- POP ecx
- POP eax
- SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx // save all registers as basis for long jump context
- // Outgoing argument set up
- PUSH eax // alignment padding
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- CFI_ADJUST_CFA_OFFSET(4)
- PUSH edx // pass arg2 - value
- PUSH eax // pass arg1 - array
+#ifdef USE_READ_BARRIER
+ CFI_REMEMBER_STATE
+#endif // USE_READ_BARRIER
+ SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx // Save all registers as basis for long jump context.
+ // Outgoing argument set up.
+ PUSH_ARG eax // Alignment padding.
+ PUSH_ARG fs:THREAD_SELF_OFFSET // Pass Thread::Current()
+ PUSH_ARG edx // Pass arg2 - value.
+ PUSH_ARG eax // Pass arg1 - array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
+
+#ifdef USE_READ_BARRIER
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
+.Laput_obj_gc_marking:
+ PUSH_ARG eax // Save `art_quick_aput_obj()` arguments.
+ PUSH_ARG ecx // We need to align stack for `art_quick_read_barrier_mark_regNN`
+ PUSH_ARG edx // and use a register (EAX) as a temporary for the object class.
+ call SYMBOL(art_quick_read_barrier_mark_reg03) // Mark EBX.
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ UNPOISON_HEAP_REF ebx
+ call SYMBOL(art_quick_read_barrier_mark_reg03) // Mark EBX.
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
+ UNPOISON_HEAP_REF eax
+ call SYMBOL(art_quick_read_barrier_mark_reg00) // Mark EAX.
+ cmpl %eax, %ebx
+ jne .Laput_obj_check_assignability_gc_marking
+ POP_ARG edx // Restore `art_quick_aput_obj()` arguments.
+ POP_ARG ecx
+ POP_ARG eax
+ jmp .Laput_obj_store
+
+.Laput_obj_check_assignability_gc_marking:
+ // Prepare arguments in line with `.Laput_obj_check_assignability_call` and jump there.
+ // (EAX, ECX and EDX were already saved in the right stack slots.)
+ INCREASE_FRAME 8 // Alignment padding.
+ PUSH_ARG eax // Pass arg2 - type of the value to be stored.
+ // The arg1 shall be pushed at `.Laput_obj_check_assignability_call`.
+ jmp .Laput_obj_check_assignability_call
+#endif // USE_READ_BARRIER
END_FUNCTION art_quick_aput_obj
DEFINE_FUNCTION art_quick_memcpy
@@ -1501,21 +1528,21 @@ END_FUNCTION art_quick_lushr
// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
@@ -1616,7 +1643,7 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- test %eax, %eax // if code pointer is null goto deliver pending exception
+ test %eax, %eax // if code pointer is null goto deliver the OOME.
jz 1f
RESTORE_SAVE_REFS_AND_ARGS_FRAME_AND_JUMP
1:
@@ -1686,6 +1713,16 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
CFI_REMEMBER_STATE
CFI_DEF_CFA_REGISTER(esp)
+ // Quick expects the return value to be in xmm0.
+ movd %eax, %xmm0
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+
+ LOAD_RUNTIME_INSTANCE ebx
+ cmpb MACRO_LITERAL(0), INSTRUMENTATION_STUBS_INSTALLED_OFFSET_FROM_RUNTIME_INSTANCE(%ebx)
+ jne .Lcall_method_exit_hook
+.Lcall_method_exit_hook_done:
+
// Tear down the callee-save frame.
// Remove space for FPR args and EAX
addl LITERAL(4 + 4 * 8), %esp
@@ -1698,12 +1735,12 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
POP ebp // Restore callee saves
POP esi
POP edi
- // Quick expects the return value to be in xmm0.
- movd %eax, %xmm0
- movd %edx, %xmm1
- punpckldq %xmm1, %xmm0
ret
+.Lcall_method_exit_hook:
+ call art_quick_method_exit_hook
+ jmp .Lcall_method_exit_hook_done
+
// Undo the unwinding information from above since it doesn't apply below.
CFI_RESTORE_STATE_AND_DEF_CFA ebp, 64
.Lexception_in_native:
@@ -1974,34 +2011,29 @@ DEFINE_FUNCTION art_quick_string_builder_append
SETUP_SAVE_REFS_ONLY_FRAME ebx // save ref containing registers for GC
// Outgoing argument set up
leal FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__(%esp), %edi // prepare args
- push %eax // push padding
+ push %eax // push padding
CFI_ADJUST_CFA_OFFSET(4)
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- push %edi // pass args
+ push %edi // pass args
CFI_ADJUST_CFA_OFFSET(4)
- push %eax // pass format
+ push %eax // pass format
CFI_ADJUST_CFA_OFFSET(4)
- call SYMBOL(artStringBuilderAppend) // (uint32_t, const unit32_t*, Thread*)
- addl MACRO_LITERAL(16), %esp // pop arguments
+ call SYMBOL(artStringBuilderAppend) // (uint32_t, const unit32_t*, Thread*)
+ addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
- RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION art_quick_string_builder_append
// Create a function `name` calling the ReadBarrier::Mark routine,
// getting its argument and returning its result through register
// `reg`, saving and restoring all caller-save registers.
//
-// If `reg` is different from `eax`, the generated function follows a
-// non-standard runtime calling convention:
-// - register `reg` is used to pass the (sole) argument of this function
-// (instead of EAX);
-// - register `reg` is used to return the result of this function
-// (instead of EAX);
-// - EAX is treated like a normal (non-argument) caller-save register;
-// - everything else is the same as in the standard runtime calling
-// convention (e.g. standard callee-save registers are preserved).
+// The generated function follows a non-standard runtime calling convention:
+// - register `reg` (which may differ from EAX) is used to pass the (sole) argument,
+// - register `reg` (which may differ from EAX) is used to return the result,
+// - all other registers are callee-save (the values they hold are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
// Null check so that we can load the lock word.
@@ -2354,16 +2386,9 @@ DEFINE_FUNCTION art_quick_method_exit_hook
addl LITERAL(32), %esp // Pop arguments and grp_result.
CFI_ADJUST_CFA_OFFSET(-32)
- cmpl LITERAL(1), %eax // Check if we returned error.
- CFI_REMEMBER_STATE
- je .Ldo_deliver_instrumentation_exception_exit
-
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
-.Ldo_deliver_instrumentation_exception_exit:
- CFI_RESTORE_STATE_AND_DEF_CFA esp, FRAME_SIZE_SAVE_EVERYTHING
- DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION art_quick_method_exit_hook
diff --git a/runtime/arch/x86_64/asm_support_x86_64.h b/runtime/arch/x86_64/asm_support_x86_64.h
index 51befbe7b8..e389c781e5 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.h
+++ b/runtime/arch/x86_64/asm_support_x86_64.h
@@ -25,5 +25,7 @@
#define FRAME_SIZE_SAVE_EVERYTHING (144 + 16*8)
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
+#define SAVE_EVERYTHING_FRAME_RAX_OFFSET \
+ (FRAME_SIZE_SAVE_EVERYTHING - CALLEE_SAVE_EVERYTHING_NUM_CORE_SPILLS * POINTER_SIZE)
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_H_
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
index 0d5fa3f3e0..55f01b78fa 100644
--- a/runtime/arch/x86_64/jni_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -118,7 +118,7 @@ DEFINE_FUNCTION art_jni_dlsym_lookup_stub
// Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
// for @FastNative or @CriticalNative.
movq THREAD_TOP_QUICK_FRAME_OFFSET(%rdi), %rax // uintptr_t tagged_quick_frame
- andq LITERAL(0xfffffffffffffffe), %rax // ArtMethod** sp
+ andq LITERAL(TAGGED_JNI_SP_MASK_TOGGLED64), %rax // ArtMethod** sp
movq (%rax), %rax // ArtMethod* method
testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
ART_METHOD_ACCESS_FLAGS_OFFSET(%rax)
@@ -400,6 +400,12 @@ JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_read_barrier, artJniReadBarrier, none
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE art_jni_method_start, artJniMethodStart, gs:THREAD_SELF_OFFSET
/*
+ * Trampoline to `artJniMethodEntryHook` that preserves all managed arguments.
+ */
+JNI_SAVE_MANAGED_ARGS_TRAMPOLINE \
+ art_jni_method_entry_hook, artJniMethodEntryHook, gs:THREAD_SELF_OFFSET
+
+ /*
* Trampoline to `artJniMonitoredMethodStart()` that preserves all managed arguments.
*/
JNI_SAVE_MANAGED_ARGS_TRAMPOLINE \
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 673696c714..0d79d00dca 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -731,12 +731,9 @@ MACRO3(ONE_ARG_SAVE_EVERYTHING_DOWNCALL, c_name, cxx_name, runtime_method_offset
movl %eax, %edi // pass the index of the constant as arg0
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
- testl %eax, %eax // If result is null, deliver the OOME.
+ testl %eax, %eax // If result is null, deliver pending exception.
jz 1f
- CFI_REMEMBER_STATE
- RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX // restore frame up to return address
- ret
- CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX /*is_ref=*/1
1:
DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION VAR(c_name)
@@ -746,18 +743,65 @@ MACRO2(ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT, c_name, cxx_name)
ONE_ARG_SAVE_EVERYTHING_DOWNCALL \c_name, \cxx_name, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
END_MACRO
-MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER)
+MACRO0(RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER)
testq %rax, %rax // rax == 0 ?
jz 1f // if rax == 0 goto 1
- ret // return
+ DEOPT_OR_RETURN /*is_ref=*/1 // Check if deopt is required
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
+
+MACRO0(RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION)
+ movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
+ testq %rcx, %rcx // rcx == 0 ?
+ jnz 1f // if rcx != 0 goto 1
+ DEOPT_OR_RETURN // Check if deopt is required
+1: // deliver exception on current thread
+ DELIVER_PENDING_EXCEPTION
+END_MACRO
+
+MACRO1(DEOPT_OR_RETURN, is_ref = 0)
+ cmpl LITERAL(0), %gs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ ret
+2:
+ SETUP_SAVE_EVERYTHING_FRAME
+ movq LITERAL(\is_ref), %rdx // pass if result is a reference
+ movq %rax, %rsi // pass the result
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
+ call SYMBOL(artDeoptimizeIfNeeded)
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+MACRO1(DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_RAX, is_ref = 0)
+ cmpl LITERAL(0), %gs:THREAD_DEOPT_CHECK_REQUIRED_OFFSET
+ jne 2f
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+2:
+ movq %rax, SAVE_EVERYTHING_FRAME_RAX_OFFSET(%rsp) // update result in the frame
+ movq LITERAL(\is_ref), %rdx // pass if result is a reference
+ movq %rax, %rsi // pass the result
+ movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current
+ call SYMBOL(artDeoptimizeIfNeeded)
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
+END_MACRO
+
+
+
MACRO0(RETURN_IF_EAX_ZERO)
testl %eax, %eax // eax == 0 ?
jnz 1f // if eax != 0 goto 1
- ret // return
+ DEOPT_OR_RETURN // Check if we need a deopt
1: // deliver exception on current thread
DELIVER_PENDING_EXCEPTION
END_MACRO
@@ -859,7 +903,7 @@ MACRO2(ART_QUICK_ALLOC_OBJECT_ROSALLOC, c_name, cxx_name)
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_name)
END_MACRO
@@ -931,7 +975,7 @@ MACRO1(ALLOC_OBJECT_TLAB_SLOW_PATH, cxx_name)
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_MACRO
// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB). May be
@@ -1019,7 +1063,7 @@ MACRO3(GENERATE_ALLOC_ARRAY_TLAB, c_entrypoint, cxx_name, size_setup)
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call CALLVAR(cxx_name) // cxx_name(arg0, arg1, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deliver exception
END_FUNCTION VAR(c_entrypoint)
END_MACRO
@@ -1163,134 +1207,89 @@ MACRO2(POP_REG_NE, reg, exclude_reg)
.endif
END_MACRO
- /*
- * Macro to insert read barrier, used in art_quick_aput_obj.
- * obj_reg and dest_reg{32|64} are registers, offset is a defined literal such as
- * MIRROR_OBJECT_CLASS_OFFSET. dest_reg needs two versions to handle the mismatch between
- * 64b PUSH/POP and 32b argument.
- * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
- *
- * As with art_quick_aput_obj function, the 64b versions are in comments.
- */
-MACRO4(READ_BARRIER, obj_reg, offset, dest_reg32, dest_reg64)
+DEFINE_FUNCTION art_quick_aput_obj
+ test %edx, %edx // store of null
+ jz .Laput_obj_null
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdi), %ecx
+ UNPOISON_HEAP_REF ecx
#ifdef USE_READ_BARRIER
- PUSH rax // save registers that might be used
- PUSH rdi
- PUSH rsi
- PUSH rdx
- PUSH rcx
- SETUP_FP_CALLEE_SAVE_FRAME
- // Outgoing argument set up
- // movl REG_VAR(ref_reg32), %edi // pass ref, no-op for now since parameter ref is unused
- // // movq REG_VAR(ref_reg64), %rdi
- movl REG_VAR(obj_reg), %esi // pass obj_reg
- // movq REG_VAR(obj_reg), %rsi
- movl MACRO_LITERAL((RAW_VAR(offset))), %edx // pass offset, double parentheses are necessary
- // movq MACRO_LITERAL((RAW_VAR(offset))), %rdx
- call SYMBOL(artReadBarrierSlow) // artReadBarrierSlow(ref, obj_reg, offset)
- // No need to unpoison return value in rax, artReadBarrierSlow() would do the unpoisoning.
- .ifnc RAW_VAR(dest_reg32), eax
- // .ifnc RAW_VAR(dest_reg64), rax
- movl %eax, REG_VAR(dest_reg32) // save loaded ref in dest_reg
- // movq %rax, REG_VAR(dest_reg64)
- .endif
- RESTORE_FP_CALLEE_SAVE_FRAME
- POP_REG_NE rcx, RAW_VAR(dest_reg64) // Restore registers except dest_reg
- POP_REG_NE rdx, RAW_VAR(dest_reg64)
- POP_REG_NE rsi, RAW_VAR(dest_reg64)
- POP_REG_NE rdi, RAW_VAR(dest_reg64)
- POP_REG_NE rax, RAW_VAR(dest_reg64)
-#else
- movl RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg32)
- // movq RAW_VAR(offset)(REG_VAR(obj_reg)), REG_VAR(dest_reg64)
- UNPOISON_HEAP_REF RAW_VAR(dest_reg32) // UNPOISON_HEAP_REF only takes a 32b register
+ cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
+ jnz .Laput_obj_gc_marking
#endif // USE_READ_BARRIER
-END_MACRO
-
-DEFINE_FUNCTION art_quick_aput_obj
- testl %edx, %edx // store of null
-// test %rdx, %rdx
- jz .Ldo_aput_null
- READ_BARRIER edi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
- // READ_BARRIER rdi, MIRROR_OBJECT_CLASS_OFFSET, ecx, rcx
- READ_BARRIER ecx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
- // READ_BARRIER rcx, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, ecx, rcx
-#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
- READ_BARRIER edx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax // rax is free.
- // READ_BARRIER rdx, MIRROR_OBJECT_CLASS_OFFSET, eax, rax
- cmpl %eax, %ecx // value's type == array's component type - trivial assignability
-#else
- cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
-// cmpq MIRROR_CLASS_OFFSET(%rdx), %rcx
-#endif
- jne .Lcheck_assignability
-.Ldo_aput:
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %ecx
+ cmpl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %ecx // Both poisoned if heap poisoning is enabled.
+ jne .Laput_obj_check_assignability
+.Laput_obj_store:
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %edi
-// shrl LITERAL(CARD_TABLE_CARD_SHIFT), %rdi
- movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
+ movb %dl, (%rdx, %rdi)
ret
-.Ldo_aput_null:
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
- ret
-.Lcheck_assignability:
- // Save arguments.
- PUSH rdi
- PUSH rsi
- PUSH rdx
- SETUP_FP_CALLEE_SAVE_FRAME
-#if defined(USE_HEAP_POISONING) || defined(USE_READ_BARRIER)
- // The load of MIRROR_OBJECT_CLASS_OFFSET(%edx) is redundant, eax still holds the value.
- movl %eax, %esi // Pass arg2 = value's class.
- // movq %rax, %rsi
-#else
- // "Uncompress" = do nothing, as already zero-extended on load.
- movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
-#endif
- movq %rcx, %rdi // Pass arg1 = array's component type.
+.Laput_obj_null:
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ ret
+.Laput_obj_check_assignability:
+ UNPOISON_HEAP_REF ecx // Unpoison array component type if poisoning is enabled.
+ PUSH_ARG rdi // Save arguments.
+ PUSH_ARG rsi
+ PUSH_ARG rdx
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %esi // Pass arg2 = value's class.
+ UNPOISON_HEAP_REF esi
+.Laput_obj_check_assignability_call:
+ movl %ecx, %edi // Pass arg1 = array's component type.
+ SETUP_FP_CALLEE_SAVE_FRAME
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
-
- // Exception?
- testq %rax, %rax
- jz .Lthrow_array_store_exception
-
- RESTORE_FP_CALLEE_SAVE_FRAME
- // Restore arguments.
- POP rdx
- POP rsi
- POP rdi
-
+ RESTORE_FP_CALLEE_SAVE_FRAME // Resore FP registers.
+ POP_ARG rdx // Restore arguments.
+ POP_ARG rsi
+ POP_ARG rdi
+ testq %rax, %rax // Check for exception.
+ jz .Laput_obj_throw_array_store_exception
POISON_HEAP_REF edx
- movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
-// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
+ movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
shrl LITERAL(CARD_TABLE_CARD_SHIFT), %edi
-// shrl LITERAL(CARD_TABLE_CARD_SHIFT), %rdi
- movb %dl, (%rdx, %rdi) // Note: this assumes that top 32b of %rdi are zero
-// movb %dl, (%rdx, %rdi)
+ movb %dl, (%rdx, %rdi)
ret
- CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
-.Lthrow_array_store_exception:
- RESTORE_FP_CALLEE_SAVE_FRAME
- // Restore arguments.
- POP rdx
- POP rsi
- POP rdi
+.Laput_obj_throw_array_store_exception:
+#ifdef USE_READ_BARRIER
+ CFI_REMEMBER_STATE
+#endif // USE_READ_BARRIER
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // Save all registers as basis for long jump context.
-
// Outgoing argument set up.
movq %rdx, %rsi // Pass arg 2 = value.
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*)
UNREACHABLE
+
+#ifdef USE_READ_BARRIER
+ CFI_RESTORE_STATE_AND_DEF_CFA esp, 4
+.Laput_obj_gc_marking:
+ // We need to align stack for `art_quick_read_barrier_mark_regNN`.
+ INCREASE_FRAME 8 // Stack alignment.
+ call SYMBOL(art_quick_read_barrier_mark_reg01) // Mark ECX
+ movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %ecx
+ UNPOISON_HEAP_REF ecx
+ call SYMBOL(art_quick_read_barrier_mark_reg01) // Mark ECX
+ movl MIRROR_OBJECT_CLASS_OFFSET(%rdx), %eax
+ UNPOISON_HEAP_REF eax
+ call SYMBOL(art_quick_read_barrier_mark_reg00) // Mark EAX
+ DECREASE_FRAME 8 // Remove stack alignment.
+ cmpl %eax, %ecx
+ je .Laput_obj_store
+ // Prepare arguments in line with `.Laput_obj_check_assignability_call` and jump there.
+ PUSH_ARG rdi // Save arguments.
+ PUSH_ARG rsi
+ PUSH_ARG rdx
+ movl %eax, %esi // Pass arg2 - type of the value to be stored.
+ // The arg1 shall be moved at `.Ldo_assignability_check_call`.
+ jmp .Laput_obj_check_assignability_call
+#endif // USE_READ_BARRIER
END_FUNCTION art_quick_aput_obj
// TODO: This is quite silly on X86_64 now.
@@ -1324,27 +1323,27 @@ THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCod
THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_EAX_ZERO
THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_EAX_ZERO
TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_EAX_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_EAX_ZERO
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DEOPT_OR_DELIVER_PENDING_EXCEPTION
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI
@@ -1575,6 +1574,14 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
CFI_REMEMBER_STATE
CFI_DEF_CFA_REGISTER(rsp)
+ // store into fpr, for when it's a fpr return...
+ movq %rax, %xmm0
+
+ LOAD_RUNTIME_INSTANCE rcx
+ cmpb MACRO_LITERAL(0), INSTRUMENTATION_STUBS_INSTALLED_OFFSET_FROM_RUNTIME_INSTANCE(%rcx)
+ jne .Lcall_method_exit_hook
+.Lcall_method_exit_hook_done:
+
// Tear down the callee-save frame.
// Load FPRs.
// movq %xmm0, 16(%rsp) // doesn't make sense!!!
@@ -1604,10 +1611,12 @@ DEFINE_FUNCTION art_quick_generic_jni_trampoline
POP r13 // Callee save.
POP r14 // Callee save.
POP r15 // Callee save.
- // store into fpr, for when it's a fpr return...
- movq %rax, %xmm0
ret
+.Lcall_method_exit_hook:
+ call art_quick_method_exit_hook
+ jmp .Lcall_method_exit_hook_done
+
// Undo the unwinding information from above since it doesn't apply below.
CFI_RESTORE_STATE_AND_DEF_CFA rbp, 208
.Lexception_in_native:
@@ -1846,7 +1855,7 @@ DEFINE_FUNCTION art_quick_string_builder_append
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call artStringBuilderAppend // (uint32_t, const unit32_t*, Thread*)
RESTORE_SAVE_REFS_ONLY_FRAME // restore frame up to return address
- RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER // return or deliver exception
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DEOPT_OR_DELIVER // return or deopt or deliver exception
END_FUNCTION art_quick_string_builder_append
// Create a function `name` calling the ReadBarrier::Mark routine,
@@ -1855,16 +1864,9 @@ END_FUNCTION art_quick_string_builder_append
//
// The generated function follows a non-standard runtime calling
// convention:
-// - register `reg` (which may be different from RDI) is used to pass
-// the (sole) argument of this function;
-// - register `reg` (which may be different from RAX) is used to return
-// the result of this function (instead of RAX);
-// - if `reg` is different from `rdi`, RDI is treated like a normal
-// (non-argument) caller-save register;
-// - if `reg` is different from `rax`, RAX is treated like a normal
-// (non-result) caller-save register;
-// - everything else is the same as in the standard runtime calling
-// convention (e.g. standard callee-save registers are preserved).
+// - register `reg` (which may be different from RDI) is used to pass the (sole) argument,
+// - register `reg` (which may be different from RAX) is used to return the result,
+// - all other registers are callee-save (the values they hold are preserved).
MACRO2(READ_BARRIER_MARK_REG, name, reg)
DEFINE_FUNCTION VAR(name)
// Null check so that we can load the lock word.
@@ -2172,7 +2174,6 @@ DEFINE_FUNCTION art_quick_method_entry_hook
END_FUNCTION art_quick_method_entry_hook
// On entry, method is at the bottom of the stack.
-// and r8 has should_deopt_frame value.
DEFINE_FUNCTION art_quick_method_exit_hook
SETUP_SAVE_EVERYTHING_FRAME
@@ -2183,14 +2184,7 @@ DEFINE_FUNCTION art_quick_method_exit_hook
movq %gs:THREAD_SELF_OFFSET, %rdi // Thread::Current
call SYMBOL(artMethodExitHook) // (Thread*, SP, gpr_res*, fpr_res*)
- cmpq LITERAL(1), %rax
- CFI_REMEMBER_STATE
- je .Ldo_deliver_instrumentation_exception_exit
-
// Normal return.
RESTORE_SAVE_EVERYTHING_FRAME
ret
-.Ldo_deliver_instrumentation_exception_exit:
- CFI_RESTORE_STATE_AND_DEF_CFA rsp, FRAME_SIZE_SAVE_EVERYTHING
- DELIVER_PENDING_EXCEPTION_FRAME_READY
END_FUNCTION art_quick_method_entry_hook
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 844a0ffa9b..b071714382 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -388,17 +388,19 @@ inline bool ArtMethod::HasSingleImplementation() {
return (GetAccessFlags() & kAccSingleImplementation) != 0;
}
-template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
- ObjPtr<mirror::Class> klass = declaring_class_.Read<kReadBarrierOption>();
- if (UNLIKELY(klass->IsProxyClass())) {
- // For normal methods, dex cache shortcuts will be visited through the declaring class.
- // However, for proxies we need to keep the interface method alive, so we visit its roots.
- ArtMethod* interface_method = GetInterfaceMethodForProxyUnchecked(pointer_size);
- DCHECK(interface_method != nullptr);
- interface_method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ if (kVisitProxyMethod) {
+ ObjPtr<mirror::Class> klass = declaring_class_.Read<kReadBarrierOption>();
+ if (UNLIKELY(klass->IsProxyClass())) {
+ // For normal methods, dex cache shortcuts will be visited through the declaring class.
+ // However, for proxies we need to keep the interface method alive, so we visit its roots.
+ ArtMethod* interface_method = GetInterfaceMethodForProxyUnchecked(pointer_size);
+ DCHECK(interface_method != nullptr);
+ interface_method->VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
+ }
}
}
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f6f8b5f545..71f08e7b4d 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -150,10 +150,34 @@ uint16_t ArtMethod::FindObsoleteDexClassDefIndex() {
return dex_file->GetIndexForClassDef(*class_def);
}
-void ArtMethod::ThrowInvocationTimeError() {
+void ArtMethod::ThrowInvocationTimeError(ObjPtr<mirror::Object> receiver) {
DCHECK(!IsInvokable());
if (IsDefaultConflicting()) {
ThrowIncompatibleClassChangeErrorForMethodConflict(this);
+ } else if (GetDeclaringClass()->IsInterface() && receiver != nullptr) {
+ // If this was an interface call, check whether there is a method in the
+ // superclass chain that isn't public. In this situation, we should throw an
+ // IllegalAccessError.
+ DCHECK(IsAbstract());
+ ObjPtr<mirror::Class> current = receiver->GetClass();
+ while (current != nullptr) {
+ for (ArtMethod& method : current->GetDeclaredMethodsSlice(kRuntimePointerSize)) {
+ ArtMethod* np_method = method.GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ if (!np_method->IsStatic() &&
+ np_method->GetNameView() == GetNameView() &&
+ np_method->GetSignature() == GetSignature()) {
+ if (!np_method->IsPublic()) {
+ ThrowIllegalAccessErrorForImplementingMethod(receiver->GetClass(), np_method, this);
+ return;
+ } else if (np_method->IsAbstract()) {
+ ThrowAbstractMethodError(this);
+ return;
+ }
+ }
+ }
+ current = current->GetSuperClass();
+ }
+ ThrowAbstractMethodError(this);
} else {
DCHECK(IsAbstract());
ThrowAbstractMethodError(this);
@@ -310,6 +334,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<mirror::Class> exception_type,
return found_dex_pc;
}
+NO_STACK_PROTECTOR
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -551,6 +576,12 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
return nullptr;
}
+ // We should not reach here with a pc of 0. pc can be 0 for downcalls when walking the stack.
+ // For native methods this case is handled by the caller by checking the quick frame tag. See
+ // StackVisitor::WalkStack for more details. For non-native methods pc can be 0 only for runtime
+ // methods or proxy invoke handlers which are handled earlier.
+ DCHECK_NE(pc, 0u) << "PC 0 for " << PrettyMethod();
+
// Check whether the current entry point contains this pc.
if (!class_linker->IsQuickGenericJniStub(existing_entry_point) &&
!class_linker->IsQuickResolutionStub(existing_entry_point) &&
@@ -592,21 +623,17 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
OatFile::OatMethod oat_method =
FindOatMethodFor(this, class_linker->GetImagePointerSize(), &found);
if (!found) {
- if (IsNative()) {
- // We are running the GenericJNI stub. The entrypoint may point
- // to different entrypoints or to a JIT-compiled JNI stub.
- DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
- class_linker->IsQuickResolutionStub(existing_entry_point) ||
- existing_entry_point == GetQuickInstrumentationEntryPoint() ||
- (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)))
- << " entrypoint: " << existing_entry_point
- << " size: " << OatQuickMethodHeader::FromEntryPoint(existing_entry_point)->GetCodeSize()
- << " pc: " << reinterpret_cast<const void*>(pc);
- return nullptr;
- }
- // Only for unit tests.
- // TODO(ngeoffray): Update these tests to pass the right pc?
- return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+ CHECK(IsNative());
+ // We are running the GenericJNI stub. The entrypoint may point
+ // to different entrypoints or to a JIT-compiled JNI stub.
+ DCHECK(class_linker->IsQuickGenericJniStub(existing_entry_point) ||
+ class_linker->IsQuickResolutionStub(existing_entry_point) ||
+ existing_entry_point == GetQuickInstrumentationEntryPoint() ||
+ (jit != nullptr && jit->GetCodeCache()->ContainsPc(existing_entry_point)))
+ << " entrypoint: " << existing_entry_point
+ << " size: " << OatQuickMethodHeader::FromEntryPoint(existing_entry_point)->GetCodeSize()
+ << " pc: " << reinterpret_cast<const void*>(pc);
+ return nullptr;
}
const void* oat_entry_point = oat_method.GetQuickCode();
if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
@@ -615,10 +642,13 @@ const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
}
OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(oat_entry_point);
- if (pc == 0) {
- // This is a downcall, it can only happen for a native method.
- DCHECK(IsNative());
- return method_header;
+ // We could have existing Oat code for native methods but we may not use it if the runtime is java
+ // debuggable or when profiling boot class path. There is no easy way to check if the pc
+ // corresponds to QuickGenericJniStub. Since we have eliminated all the other cases, if the pc
+ // doesn't correspond to the AOT code then we must be running QuickGenericJniStub.
+ if (IsNative() && !method_header->Contains(pc)) {
+ DCHECK_NE(pc, 0u) << "PC 0 for " << PrettyMethod();
+ return nullptr;
}
DCHECK(method_header->Contains(pc))
@@ -728,16 +758,16 @@ void ArtMethod::CopyFrom(ArtMethod* src, PointerSize image_pointer_size) {
// the entry point to the JIT code, but this would require taking the JIT code cache
// lock to notify it, which we do not want at this level.
Runtime* runtime = Runtime::Current();
+ const void* entry_point = GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size);
if (runtime->UseJitCompilation()) {
- if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) {
+ if (runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point)) {
SetEntryPointFromQuickCompiledCodePtrSize(
src->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge(),
image_pointer_size);
}
}
- if (interpreter::IsNterpSupported() &&
- (GetEntryPointFromQuickCompiledCodePtrSize(image_pointer_size) ==
- interpreter::GetNterpEntryPoint())) {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ if (interpreter::IsNterpSupported() && class_linker->IsNterpEntryPoint(entry_point)) {
// If the entrypoint is nterp, it's too early to check if the new method
// will support it. So for simplicity, use the interpreter bridge.
SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size);
diff --git a/runtime/art_method.h b/runtime/art_method.h
index c2de71829e..a07d696f25 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -259,9 +259,7 @@ class ArtMethod final {
}
void SetMemorySharedMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
- // Disable until we make sure critical code is AOTed.
- static constexpr bool kEnabledMemorySharedMethod = false;
- if (kEnabledMemorySharedMethod && !IsIntrinsic() && !IsAbstract()) {
+ if (!IsIntrinsic() && !IsAbstract()) {
AddAccessFlags(kAccMemorySharedMethod);
SetHotCounter();
}
@@ -424,8 +422,10 @@ class ArtMethod final {
bool CheckIncompatibleClassChange(InvokeType type) REQUIRES_SHARED(Locks::mutator_lock_);
// Throws the error that would result from trying to invoke this method (i.e.
- // IncompatibleClassChangeError or AbstractMethodError). Only call if !IsInvokable();
- void ThrowInvocationTimeError() REQUIRES_SHARED(Locks::mutator_lock_);
+ // IncompatibleClassChangeError, AbstractMethodError, or IllegalAccessError).
+ // Only call if !IsInvokable();
+ void ThrowInvocationTimeError(ObjPtr<mirror::Object> receiver)
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint16_t GetMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -635,7 +635,9 @@ class ArtMethod final {
REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_standalone_runtime_tests.xml b/runtime/art_standalone_runtime_tests.xml
index 520052962e..8f89d841ce 100644
--- a/runtime/art_standalone_runtime_tests.xml
+++ b/runtime/art_standalone_runtime_tests.xml
@@ -93,7 +93,7 @@
<option name="exclude-filter" value="HiddenApiTest.DexDomain_SystemSystemExtFrameworkDir" />
<option name="exclude-filter" value="HiddenApiTest.DexDomain_SystemSystemExtFrameworkDir_MultiDex" />
<option name="exclude-filter" value="JniInternalTest.CallVarArgMethodBadPrimitive" />
- <option name="exclude-filter" value="OatFileAssistantTest.SystemFrameworkDir" />
+ <option name="exclude-filter" value="OatFileAssistantBaseTest.SystemFrameworkDir" />
<option name="exclude-filter" value="StubTest.Fields16" />
<option name="exclude-filter" value="StubTest.Fields32" />
<option name="exclude-filter" value="StubTest.Fields64" />
@@ -107,10 +107,10 @@
them fail to dynamically link to the expected (64-bit) libraries.
TODO(b/204649079): Investigate these failures and re-enable these tests. -->
- <option name="exclude-filter" value="ExecUtilsTest.EnvSnapshotDeletionsAreNotVisible" />
- <option name="exclude-filter" value="ExecUtilsTest.ExecNoTimeout" />
- <option name="exclude-filter" value="ExecUtilsTest.ExecSuccess" />
- <option name="exclude-filter" value="ExecUtilsTest.ExecTimeout" />
+ <option name="exclude-filter" value="*ExecUtilsTest.EnvSnapshotDeletionsAreNotVisible*" />
+ <option name="exclude-filter" value="*ExecUtilsTest.ExecNoTimeout*" />
+ <option name="exclude-filter" value="*ExecUtilsTest.ExecSuccess*" />
+ <option name="exclude-filter" value="*ExecUtilsTest.ExecTimeout*" />
</test>
<!-- When this test is run in a Mainline context (e.g. with `mts-tradefed`), only enable it if
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
index a74d0e0354..9be2550b92 100644
--- a/runtime/backtrace_helper.h
+++ b/runtime/backtrace_helper.h
@@ -26,7 +26,7 @@ class Unwinder;
namespace art {
-// Using libbacktrace
+// Using libunwindstack
class BacktraceCollector {
public:
BacktraceCollector(uintptr_t* out_frames, size_t max_depth, size_t skip_count)
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index 829adff8ee..c15e5dee71 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -68,12 +68,12 @@ enum LockLevel : uint8_t {
// Can be held while GC related work is done, and thus must be above kMarkSweepMarkStackLock
kThreadWaitLock,
kCHALock,
- kJitCodeCacheLock,
kRosAllocGlobalLock,
kRosAllocBracketLock,
kRosAllocBulkFreeLock,
kAllocSpaceLock,
kTaggingLockLevel,
+ kJitCodeCacheLock,
kTransactionLogLock,
kCustomTlsLock,
kJniFunctionTableLock,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 5709333756..01d7e73774 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -59,18 +59,19 @@ struct DumpStackLastTimeTLSData : public art::TLSData {
};
#if ART_USE_FUTEXES
+// Compute a relative timespec as *result_ts = lhs - rhs.
+// Return false (and produce an invalid *result_ts) if lhs < rhs.
static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) {
const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds.
+ static_assert(std::is_signed<decltype(result_ts->tv_sec)>::value); // Signed on Linux.
result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec;
result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec;
if (result_ts->tv_nsec < 0) {
result_ts->tv_sec--;
result_ts->tv_nsec += one_sec;
- } else if (result_ts->tv_nsec > one_sec) {
- result_ts->tv_sec++;
- result_ts->tv_nsec -= one_sec;
}
- return result_ts->tv_sec < 0;
+ DCHECK(result_ts->tv_nsec >= 0 && result_ts->tv_nsec < one_sec);
+ return result_ts->tv_sec >= 0;
}
#endif
@@ -852,7 +853,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
timespec now_abs_ts;
InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts);
timespec rel_ts;
- if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
+ if (!ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) {
return false; // Timed out.
}
ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
@@ -869,6 +870,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32
// EAGAIN and EINTR both indicate a spurious failure,
// recompute the relative time out from now and try again.
// We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
+ num_contenders_.fetch_sub(1); // Unlikely to matter.
PLOG(FATAL) << "timed futex wait failed for " << name_;
}
}
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index a7c3e45ae7..d03139b555 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -91,7 +91,7 @@ class CheckReferenceMapVisitor : public StackVisitor {
CodeItemDataAccessor accessor(m->DexInstructionData());
uint16_t number_of_dex_registers = accessor.RegistersSize();
- if (!Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(GetOuterMethod(), GetCurrentQuickFramePc())) {
// We can only guarantee dex register info presence for debuggable methods.
return;
}
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 02b2778f4f..b79f3f5685 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -24,6 +24,7 @@
#include "art_method-inl.h"
#include "base/mutex.h"
#include "class_linker.h"
+#include "class_table-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_structs.h"
#include "gc_root-inl.h"
@@ -592,6 +593,11 @@ inline ArtField* ClassLinker::ResolveField(uint32_t field_idx,
return resolved;
}
+template <typename Visitor>
+inline void ClassLinker::VisitBootClasses(Visitor* visitor) {
+ boot_class_table_->Visit(*visitor);
+}
+
template <class Visitor>
inline void ClassLinker::VisitClassTables(const Visitor& visitor) {
Thread* const self = Thread::Current();
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index c8dbc75e61..dc67dcab0b 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -37,6 +37,7 @@
#include "art_method-inl.h"
#include "barrier.h"
#include "base/arena_allocator.h"
+#include "base/arena_bit_vector.h"
#include "base/casts.h"
#include "base/file_utils.h"
#include "base/hash_map.h"
@@ -2115,7 +2116,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
const bool tracing_enabled = Trace::IsTracingEnabled();
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We do not track new roots for CC.
DCHECK_EQ(0, flags & (kVisitRootFlagNewRoots |
kVisitRootFlagClearRootLog |
@@ -2151,7 +2152,7 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
root.VisitRoot(visitor, RootInfo(kRootVMInternal));
}
}
- } else if (!kUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) {
+ } else if (!gUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) {
for (auto& root : new_class_roots_) {
ObjPtr<mirror::Class> old_ref = root.Read<kWithoutReadBarrier>();
root.VisitRoot(visitor, RootInfo(kRootStickyClass));
@@ -2172,13 +2173,13 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
}
}
}
- if (!kUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) {
+ if (!gUseReadBarrier && (flags & kVisitRootFlagClearRootLog) != 0) {
new_class_roots_.clear();
new_bss_roots_boot_oat_files_.clear();
}
- if (!kUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
+ if (!gUseReadBarrier && (flags & kVisitRootFlagStartLoggingNewRoots) != 0) {
log_new_roots_ = true;
- } else if (!kUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
+ } else if (!gUseReadBarrier && (flags & kVisitRootFlagStopLoggingNewRoots) != 0) {
log_new_roots_ = false;
}
// We deliberately ignore the class roots in the image since we
@@ -3389,11 +3390,9 @@ void ClassLinker::FixupStaticTrampolines(Thread* self, ObjPtr<mirror::Class> kla
}
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
- // Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; method_index < num_direct_methods; ++method_index) {
ArtMethod* method = klass->GetDirectMethod(method_index, pointer_size);
- if (!method->IsStatic()) {
- // Only update static methods.
+ if (!NeedsClinitCheckBeforeCall(method)) {
continue;
}
instrumentation->UpdateMethodsCode(method, instrumentation->GetCodeForInvoke(method));
@@ -4144,10 +4143,11 @@ ObjPtr<mirror::Class> ClassLinker::CreateArrayClass(Thread* self,
class_loader)));
if (component_type == nullptr) {
DCHECK(self->IsExceptionPending());
- // We need to accept erroneous classes as component types.
+ // We need to accept erroneous classes as component types. Under AOT, we
+ // don't accept them as we cannot encode the erroneous class in an image.
const size_t component_hash = ComputeModifiedUtf8Hash(descriptor + 1);
component_type.Assign(LookupClass(self, descriptor + 1, component_hash, class_loader.Get()));
- if (component_type == nullptr) {
+ if (component_type == nullptr || Runtime::Current()->IsAotCompiler()) {
DCHECK(self->IsExceptionPending());
return nullptr;
} else {
@@ -5088,11 +5088,19 @@ void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) cons
CHECK_EQ(prototype, method->GetInterfaceMethodIfProxy(image_pointer_size_));
}
-bool ClassLinker::CanWeInitializeClass(ObjPtr<mirror::Class> klass, bool can_init_statics,
+bool ClassLinker::CanWeInitializeClass(ObjPtr<mirror::Class> klass,
+ bool can_init_statics,
bool can_init_parents) {
if (can_init_statics && can_init_parents) {
return true;
}
+ DCHECK(Runtime::Current()->IsAotCompiler());
+
+ // We currently don't support initializing at AOT time classes that need access
+ // checks.
+ if (klass->IsVerifiedNeedsAccessChecks()) {
+ return false;
+ }
if (!can_init_statics) {
// Check if there's a class initializer.
ArtMethod* clinit = klass->FindClassInitializer(image_pointer_size_);
@@ -7859,20 +7867,6 @@ bool ClassLinker::LinkMethodsHelper<kPointerSize>::FinalizeIfTable(
return true;
}
-NO_INLINE
-static void ThrowIllegalAccessErrorForImplementingMethod(ObjPtr<mirror::Class> klass,
- ArtMethod* vtable_method,
- ArtMethod* interface_method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(!vtable_method->IsAbstract());
- DCHECK(!vtable_method->IsPublic());
- ThrowIllegalAccessError(
- klass,
- "Method '%s' implementing interface method '%s' is not public",
- vtable_method->PrettyMethod().c_str(),
- interface_method->PrettyMethod().c_str());
-}
-
template <PointerSize kPointerSize>
ObjPtr<mirror::PointerArray> ClassLinker::LinkMethodsHelper<kPointerSize>::AllocPointerArray(
Thread* self, size_t length) {
@@ -7961,55 +7955,17 @@ size_t ClassLinker::LinkMethodsHelper<kPointerSize>::AssignVTableIndexes(
static constexpr double kMinLoadFactor = 0.3;
static constexpr double kMaxLoadFactor = 0.5;
static constexpr size_t kMaxStackBuferSize = 256;
- const size_t super_vtable_buffer_size = super_vtable_length * 3;
const size_t declared_virtuals_buffer_size = num_virtual_methods * 3;
- const size_t total_buffer_size = super_vtable_buffer_size + declared_virtuals_buffer_size;
- uint32_t* super_vtable_buffer_ptr = (total_buffer_size <= kMaxStackBuferSize)
- ? reinterpret_cast<uint32_t*>(alloca(total_buffer_size * sizeof(uint32_t)))
- : allocator_.AllocArray<uint32_t>(total_buffer_size);
- uint32_t* declared_virtuals_buffer_ptr = super_vtable_buffer_ptr + super_vtable_buffer_size;
- VTableSignatureSet super_vtable_signatures(
- kMinLoadFactor,
- kMaxLoadFactor,
- VTableSignatureHash(super_vtable_accessor),
- VTableSignatureEqual(super_vtable_accessor),
- super_vtable_buffer_ptr,
- super_vtable_buffer_size,
- allocator_.Adapter());
- ArrayRef<uint32_t> same_signature_vtable_lists;
- // Insert the first `mirror::Object::kVTableLength` indexes with pre-calculated hashes.
- DCHECK_GE(super_vtable_length, mirror::Object::kVTableLength);
- for (uint32_t i = 0; i != mirror::Object::kVTableLength; ++i) {
- size_t hash = class_linker_->object_virtual_method_hashes_[i];
- // There are no duplicate signatures in `java.lang.Object`, so use `HashSet<>::PutWithHash()`.
- // This avoids equality comparison for the three `java.lang.Object.wait()` overloads.
- super_vtable_signatures.PutWithHash(i, hash);
- }
- // Insert the remaining indexes, check for duplicate signatures.
- if (super_vtable_length > mirror::Object::kVTableLength) {
- for (size_t i = mirror::Object::kVTableLength; i < super_vtable_length; ++i) {
- // Use `super_vtable_accessor` for getting the method for hash calculation.
- // Letting `HashSet<>::insert()` use the internal accessor copy in the hash
- // function prevents the compiler from optimizing this properly because the
- // compiler cannot prove that the accessor copy is immutable.
- size_t hash = ComputeMethodHash(super_vtable_accessor.GetVTableEntry(i));
- auto [it, inserted] = super_vtable_signatures.InsertWithHash(i, hash);
- if (UNLIKELY(!inserted)) {
- if (same_signature_vtable_lists.empty()) {
- same_signature_vtable_lists = ArrayRef<uint32_t>(
- allocator_.AllocArray<uint32_t>(super_vtable_length), super_vtable_length);
- std::fill_n(same_signature_vtable_lists.data(), super_vtable_length, dex::kDexNoIndex);
- same_signature_vtable_lists_ = same_signature_vtable_lists;
- }
- DCHECK_LT(*it, i);
- same_signature_vtable_lists[i] = *it;
- *it = i;
- }
- }
- }
+ const size_t super_vtable_buffer_size = super_vtable_length * 3;
+ const size_t bit_vector_size = BitVector::BitsToWords(num_virtual_methods);
+ const size_t total_size =
+ declared_virtuals_buffer_size + super_vtable_buffer_size + bit_vector_size;
+
+ uint32_t* declared_virtuals_buffer_ptr = (total_size <= kMaxStackBuferSize)
+ ? reinterpret_cast<uint32_t*>(alloca(total_size * sizeof(uint32_t)))
+ : allocator_.AllocArray<uint32_t>(total_size);
+ uint32_t* bit_vector_buffer_ptr = declared_virtuals_buffer_ptr + declared_virtuals_buffer_size;
- // For each declared virtual method, look for a superclass virtual method
- // to override and assign a new vtable index if no method was overridden.
DeclaredVirtualSignatureSet declared_virtual_signatures(
kMinLoadFactor,
kMaxLoadFactor,
@@ -8018,8 +7974,24 @@ size_t ClassLinker::LinkMethodsHelper<kPointerSize>::AssignVTableIndexes(
declared_virtuals_buffer_ptr,
declared_virtuals_buffer_size,
allocator_.Adapter());
+
+ ArrayRef<uint32_t> same_signature_vtable_lists;
const bool is_proxy_class = klass->IsProxyClass();
size_t vtable_length = super_vtable_length;
+
+ // Record which declared methods are overriding a super method.
+ BitVector initialized_methods(/* expandable= */ false,
+ Allocator::GetNoopAllocator(),
+ bit_vector_size,
+ bit_vector_buffer_ptr);
+
+ // Note: our sets hash on the method name, and therefore we pay a high
+ // performance price when a class has many overloads.
+ //
+ // We populate a set of declared signatures instead of signatures from the
+ // super vtable (which is only lazy populated in case of interface overriding,
+ // see below). This makes sure that we pay the performance price only on that
+ // class, and not on its subclasses (except in the case of interface overriding, see below).
for (size_t i = 0; i != num_virtual_methods; ++i) {
ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(i, kPointerSize);
DCHECK(!virtual_method->IsStatic()) << virtual_method->PrettyMethod();
@@ -8028,59 +8000,79 @@ size_t ClassLinker::LinkMethodsHelper<kPointerSize>::AssignVTableIndexes(
: virtual_method;
size_t hash = ComputeMethodHash(signature_method);
declared_virtual_signatures.PutWithHash(i, hash);
- auto it = super_vtable_signatures.FindWithHash(signature_method, hash);
- if (it != super_vtable_signatures.end()) {
- size_t super_index = *it;
- DCHECK_LT(super_index, super_vtable_length);
- ArtMethod* super_method = super_vtable_accessor.GetVTableEntry(super_index);
- // Historical note: Before Android 4.1, an inaccessible package-private
- // superclass method would have been incorrectly overridden.
- bool overrides = klass->CanAccessMember(super_method->GetDeclaringClass(),
- super_method->GetAccessFlags());
- if (overrides && super_method->IsFinal()) {
- sants.reset();
- ThrowLinkageError(klass, "Method %s overrides final method in class %s",
- virtual_method->PrettyMethod().c_str(),
- super_method->GetDeclaringClassDescriptor());
- return 0u;
- }
- if (UNLIKELY(!same_signature_vtable_lists.empty())) {
- // We may override more than one method according to JLS, see b/211854716 .
- // We record the highest overridden vtable index here so that we can walk
- // the list to find other overridden methods when constructing the vtable.
- // However, we walk all the methods to check for final method overriding.
- size_t current_index = super_index;
- while (same_signature_vtable_lists[current_index] != dex::kDexNoIndex) {
- DCHECK_LT(same_signature_vtable_lists[current_index], current_index);
- current_index = same_signature_vtable_lists[current_index];
- ArtMethod* current_method = super_vtable_accessor.GetVTableEntry(current_index);
- if (klass->CanAccessMember(current_method->GetDeclaringClass(),
- current_method->GetAccessFlags())) {
- if (current_method->IsFinal()) {
- sants.reset();
- ThrowLinkageError(klass, "Method %s overrides final method in class %s",
- virtual_method->PrettyMethod().c_str(),
- current_method->GetDeclaringClassDescriptor());
- return 0u;
- }
- if (!overrides) {
- overrides = true;
- super_index = current_index;
- super_method = current_method;
- }
- }
- }
- }
- if (overrides) {
- virtual_method->SetMethodIndex(super_index);
- continue;
- }
+ }
+
+ // Loop through each super vtable method and see if they are overridden by a method we added to
+ // the hash table.
+ for (size_t j = 0; j < super_vtable_length; ++j) {
+ // Search the hash table to see if we are overridden by any method.
+ ArtMethod* super_method = super_vtable_accessor.GetVTableEntry(j);
+ if (!klass->CanAccessMember(super_method->GetDeclaringClass(),
+ super_method->GetAccessFlags())) {
+ // Continue on to the next method since this one is package private and cannot be overridden.
+ // Before Android 4.1, the package-private method super_method might have been incorrectly
+ // overridden.
+ continue;
+ }
+ size_t hash = (j < mirror::Object::kVTableLength)
+ ? class_linker_->object_virtual_method_hashes_[j]
+ : ComputeMethodHash(super_method);
+ auto it = declared_virtual_signatures.FindWithHash(super_method, hash);
+ if (it == declared_virtual_signatures.end()) {
+ continue;
+ }
+ ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking(*it, kPointerSize);
+ if (super_method->IsFinal()) {
+ sants.reset();
+ ThrowLinkageError(klass, "Method %s overrides final method in class %s",
+ virtual_method->PrettyMethod().c_str(),
+ super_method->GetDeclaringClassDescriptor());
+ return 0u;
+ }
+ if (initialized_methods.IsBitSet(*it)) {
+ // The method is overriding more than one method.
+ // We record that information in a linked list to later set the method in the vtable
+ // locations that are not the method index.
+ if (same_signature_vtable_lists.empty()) {
+ same_signature_vtable_lists = ArrayRef<uint32_t>(
+ allocator_.AllocArray<uint32_t>(super_vtable_length), super_vtable_length);
+ std::fill_n(same_signature_vtable_lists.data(), super_vtable_length, dex::kDexNoIndex);
+ same_signature_vtable_lists_ = same_signature_vtable_lists;
+ }
+ same_signature_vtable_lists[j] = virtual_method->GetMethodIndexDuringLinking();
+ } else {
+ initialized_methods.SetBit(*it);
+ }
+
+ // We arbitrarily set to the largest index. This is also expected when
+ // iterating over the `same_signature_vtable_lists_`.
+ virtual_method->SetMethodIndex(j);
+ }
+
+ // Add the non-overridden methods at the end.
+ for (size_t i = 0; i < num_virtual_methods; ++i) {
+ if (!initialized_methods.IsBitSet(i)) {
+ ArtMethod* local_method = klass->GetVirtualMethodDuringLinking(i, kPointerSize);
+ local_method->SetMethodIndex(vtable_length);
+ vtable_length++;
}
- // The method does not override any method from superclass, so it needs a new vtable index.
- virtual_method->SetMethodIndex(vtable_length);
- ++vtable_length;
}
+ // A lazily constructed super vtable set, which we only populate in the less
+ // common sittuation of a superclass implementing a method declared in an
+ // interface this class inherits.
+ // We still try to allocate the set on the stack as using the arena will have
+ // a larger cost.
+ uint32_t* super_vtable_buffer_ptr = bit_vector_buffer_ptr + bit_vector_size;
+ VTableSignatureSet super_vtable_signatures(
+ kMinLoadFactor,
+ kMaxLoadFactor,
+ VTableSignatureHash(super_vtable_accessor),
+ VTableSignatureEqual(super_vtable_accessor),
+ super_vtable_buffer_ptr,
+ super_vtable_buffer_size,
+ allocator_.Adapter());
+
// Assign vtable indexes for interface methods in new interfaces and store them
// in implementation method arrays. These shall be replaced by actual method
// pointers later. We do not need to do this for superclass interfaces as we can
@@ -8099,42 +8091,39 @@ size_t ClassLinker::LinkMethodsHelper<kPointerSize>::AssignVTableIndexes(
ArtMethod* interface_method = iface->GetVirtualMethod(j, kPointerSize);
size_t hash = ComputeMethodHash(interface_method);
ArtMethod* vtable_method = nullptr;
- bool found = false;
auto it1 = declared_virtual_signatures.FindWithHash(interface_method, hash);
if (it1 != declared_virtual_signatures.end()) {
- vtable_method = klass->GetVirtualMethodDuringLinking(*it1, kPointerSize);
- found = true;
+ ArtMethod* found_method = klass->GetVirtualMethodDuringLinking(*it1, kPointerSize);
+ // For interface overriding, we only look at public methods.
+ if (found_method->IsPublic()) {
+ vtable_method = found_method;
+ }
} else {
+ // This situation should be rare (a superclass implements a method
+ // declared in an interface this class is inheriting). Only in this case
+ // do we lazily populate the super_vtable_signatures.
+ if (super_vtable_signatures.empty()) {
+ for (size_t k = 0; k < super_vtable_length; ++k) {
+ ArtMethod* super_method = super_vtable_accessor.GetVTableEntry(k);
+ if (!super_method->IsPublic()) {
+ // For interface overriding, we only look at public methods.
+ continue;
+ }
+ size_t super_hash = (k < mirror::Object::kVTableLength)
+ ? class_linker_->object_virtual_method_hashes_[k]
+ : ComputeMethodHash(super_method);
+ auto [it, inserted] = super_vtable_signatures.InsertWithHash(k, super_hash);
+ DCHECK(inserted || super_vtable_accessor.GetVTableEntry(*it) == super_method);
+ }
+ }
auto it2 = super_vtable_signatures.FindWithHash(interface_method, hash);
if (it2 != super_vtable_signatures.end()) {
- // If there are multiple vtable methods with the same signature, the one with
- // the highest vtable index is not nessarily the one in most-derived class.
- // Find the most-derived method. See b/211854716 .
vtable_method = super_vtable_accessor.GetVTableEntry(*it2);
- if (UNLIKELY(!same_signature_vtable_lists.empty())) {
- size_t current_index = *it2;
- while (same_signature_vtable_lists[current_index] != dex::kDexNoIndex) {
- DCHECK_LT(same_signature_vtable_lists[current_index], current_index);
- current_index = same_signature_vtable_lists[current_index];
- ArtMethod* current_method = super_vtable_accessor.GetVTableEntry(current_index);
- ObjPtr<mirror::Class> current_class = current_method->GetDeclaringClass();
- if (current_class->IsSubClass(vtable_method->GetDeclaringClass())) {
- vtable_method = current_method;
- }
- }
- }
- found = true;
}
}
+
uint32_t vtable_index = vtable_length;
- if (found) {
- DCHECK(vtable_method != nullptr);
- if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
- // FIXME: Delay the exception until we actually try to call the method. b/211854716
- sants.reset();
- ThrowIllegalAccessErrorForImplementingMethod(klass, vtable_method, interface_method);
- return 0u;
- }
+ if (vtable_method != nullptr) {
vtable_index = vtable_method->GetMethodIndexDuringLinking();
if (!vtable_method->IsOverridableByDefaultMethod()) {
method_array->SetElementPtrSize(j, vtable_index, kPointerSize);
@@ -8144,7 +8133,7 @@ size_t ClassLinker::LinkMethodsHelper<kPointerSize>::AssignVTableIndexes(
auto [it, inserted] = copied_method_records_.InsertWithHash(
CopiedMethodRecord(interface_method, vtable_index), hash);
- if (found) {
+ if (vtable_method != nullptr) {
DCHECK_EQ(vtable_index, it->GetMethodIndex());
} else if (inserted) {
DCHECK_EQ(vtable_index, it->GetMethodIndex());
@@ -8484,17 +8473,16 @@ bool ClassLinker::LinkMethodsHelper<kPointerSize>::LinkMethods(
uint32_t vtable_index = virtual_method.GetMethodIndexDuringLinking();
vtable->SetElementPtrSize(vtable_index, &virtual_method, kPointerSize);
if (UNLIKELY(vtable_index < same_signature_vtable_lists.size())) {
- // We may override more than one method according to JLS, see b/211854716 .
- // If we do, arbitrarily update the method index to the lowest overridden vtable index.
+ // We may override more than one method according to JLS, see b/211854716.
while (same_signature_vtable_lists[vtable_index] != dex::kDexNoIndex) {
DCHECK_LT(same_signature_vtable_lists[vtable_index], vtable_index);
vtable_index = same_signature_vtable_lists[vtable_index];
- ArtMethod* current_method = super_class->GetVTableEntry(vtable_index, kPointerSize);
- if (klass->CanAccessMember(current_method->GetDeclaringClass(),
- current_method->GetAccessFlags())) {
+ vtable->SetElementPtrSize(vtable_index, &virtual_method, kPointerSize);
+ if (kIsDebugBuild) {
+ ArtMethod* current_method = super_class->GetVTableEntry(vtable_index, kPointerSize);
+ DCHECK(klass->CanAccessMember(current_method->GetDeclaringClass(),
+ current_method->GetAccessFlags()));
DCHECK(!current_method->IsFinal());
- vtable->SetElementPtrSize(vtable_index, &virtual_method, kPointerSize);
- virtual_method.SetMethodIndex(vtable_index);
}
}
}
@@ -8606,7 +8594,8 @@ class ClassLinker::LinkFieldsHelper {
};
// We use the following order of field types for assigning offsets.
-// Some fields can be shuffled forward to fill gaps, see `ClassLinker::LinkFields()`.
+// Some fields can be shuffled forward to fill gaps, see
+// `ClassLinker::LinkFieldsHelper::LinkFields()`.
enum class ClassLinker::LinkFieldsHelper::FieldTypeOrder : uint16_t {
kReference = 0u,
kLong,
@@ -10196,6 +10185,18 @@ void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
}
}
+void ClassLinker::VisitDexCaches(DexCacheVisitor* visitor) const {
+ Thread* const self = Thread::Current();
+ for (const auto& it : dex_caches_) {
+ // Need to use DecodeJObject so that we get null for cleared JNI weak globals.
+ ObjPtr<mirror::DexCache> dex_cache = ObjPtr<mirror::DexCache>::DownCast(
+ self->DecodeJObject(it.second.weak_root));
+ if (dex_cache != nullptr) {
+ visitor->Visit(dex_cache);
+ }
+ }
+}
+
void ClassLinker::VisitAllocators(AllocatorVisitor* visitor) const {
for (const ClassLoaderData& data : class_loaders_) {
LinearAlloc* alloc = data.allocator;
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3b20c75b64..1ac47562b2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -36,6 +36,7 @@
#include "dex/dex_file_types.h"
#include "gc_root.h"
#include "handle.h"
+#include "interpreter/mterp/nterp.h"
#include "jni.h"
#include "mirror/class.h"
#include "mirror/object.h"
@@ -127,6 +128,13 @@ class ClassLoaderVisitor {
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
};
+class DexCacheVisitor {
+ public:
+ virtual ~DexCacheVisitor() {}
+ virtual void Visit(ObjPtr<mirror::DexCache> dex_cache)
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_) = 0;
+};
+
template <typename Func>
class ClassLoaderFuncVisitor final : public ClassLoaderVisitor {
public:
@@ -478,6 +486,11 @@ class ClassLinker {
REQUIRES(!Locks::classlinker_classes_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Visits only the classes in the boot class path.
+ template <typename Visitor>
+ inline void VisitBootClasses(Visitor* visitor)
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
@@ -608,6 +621,11 @@ class ClassLinker {
return nterp_trampoline_ == entry_point;
}
+ bool IsNterpEntryPoint(const void* entry_point) const {
+ return entry_point == interpreter::GetNterpEntryPoint() ||
+ entry_point == interpreter::GetNterpWithClinitEntryPoint();
+ }
+
const void* GetQuickToInterpreterBridgeTrampoline() const {
return quick_to_interpreter_bridge_trampoline_;
}
@@ -774,6 +792,10 @@ class ClassLinker {
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ // Visit all of the dex caches in the class linker.
+ void VisitDexCaches(DexCacheVisitor* visitor) const
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_);
+
// Checks that a class and its superclass from another class loader have the same virtual methods.
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 010b384498..666f86eca6 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -52,6 +52,7 @@
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
#include "mirror/reference.h"
+#include "mirror/stack_frame_info.h"
#include "mirror/stack_trace_element.h"
#include "mirror/string-inl.h"
#include "mirror/var_handle.h"
@@ -598,6 +599,7 @@ struct ClassOffsets : public CheckOffsets<mirror::Class> {
struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
+ addOffset(OFFSETOF_MEMBER(mirror::ClassExt, class_value_map_), "classValueMap");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, erroneous_state_error_), "erroneousStateError");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, instance_jfield_ids_), "instanceJfieldIDs");
addOffset(OFFSETOF_MEMBER(mirror::ClassExt, jmethod_ids_), "jmethodIDs");
@@ -640,6 +642,20 @@ struct StackTraceElementOffsets : public CheckOffsets<mirror::StackTraceElement>
}
};
+struct StackFrameInfoOffsets : public CheckOffsets<mirror::StackFrameInfo> {
+ StackFrameInfoOffsets() : CheckOffsets<mirror::StackFrameInfo>(
+ false, "Ljava/lang/StackFrameInfo;") {
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, bci_), "bci");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, declaring_class_), "declaringClass");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, file_name_), "fileName");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, line_number_), "lineNumber");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, method_name_), "methodName");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, method_type_), "methodType");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, retain_class_ref_), "retainClassRef");
+ addOffset(OFFSETOF_MEMBER(mirror::StackFrameInfo, ste_), "ste");
+ }
+};
+
struct ClassLoaderOffsets : public CheckOffsets<mirror::ClassLoader> {
ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, allocator_), "allocator");
@@ -830,7 +846,7 @@ struct ByteBufferViewVarHandleOffsets : public CheckOffsets<mirror::ByteBufferVi
// C++ fields must exactly match the fields in the Java classes. If this fails,
// reorder the fields in the C++ class. Managed class fields are ordered by
-// ClassLinker::LinkFields.
+// ClassLinker::LinkFieldsHelper::LinkFields.
TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
ScopedObjectAccess soa(Thread::Current());
EXPECT_TRUE(ObjectOffsets().Check());
@@ -858,6 +874,7 @@ TEST_F(ClassLinkerTest, ValidateFieldOrderOfJavaCppUnionClasses) {
EXPECT_TRUE(ArrayElementVarHandleOffsets().Check());
EXPECT_TRUE(ByteArrayViewVarHandleOffsets().Check());
EXPECT_TRUE(ByteBufferViewVarHandleOffsets().Check());
+ EXPECT_TRUE(StackFrameInfoOffsets().Check());
}
TEST_F(ClassLinkerTest, FindClassNonexistent) {
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 2419b7b720..31c310e1ec 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -950,12 +950,13 @@ std::vector<const DexFile*> ClassLoaderContext::FlattenOpenedDexFiles() const {
return result;
}
-std::string ClassLoaderContext::FlattenDexPaths() const {
+std::vector<std::string> ClassLoaderContext::FlattenDexPaths() const {
+ std::vector<std::string> result;
+
if (class_loader_chain_ == nullptr) {
- return "";
+ return result;
}
- std::vector<std::string> result;
std::vector<ClassLoaderInfo*> work_list;
work_list.push_back(class_loader_chain_.get());
while (!work_list.empty()) {
@@ -966,7 +967,7 @@ std::string ClassLoaderContext::FlattenDexPaths() const {
}
AddToWorkList(info, work_list);
}
- return FlattenClasspath(result);
+ return result;
}
const char* ClassLoaderContext::GetClassLoaderTypeName(ClassLoaderType type) {
diff --git a/runtime/class_loader_context.h b/runtime/class_loader_context.h
index eceea00177..ccc5c731de 100644
--- a/runtime/class_loader_context.h
+++ b/runtime/class_loader_context.h
@@ -155,9 +155,8 @@ class ClassLoaderContext {
// Should only be called if OpenDexFiles() returned true.
std::vector<const DexFile*> FlattenOpenedDexFiles() const;
- // Return a colon-separated list of dex file locations from this class loader
- // context after flattening.
- std::string FlattenDexPaths() const;
+ // Return a list of dex file locations from this class loader context after flattening.
+ std::vector<std::string> FlattenDexPaths() const;
// Verifies that the current context is identical to the context encoded as `context_spec`.
// Identical means:
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 934c92b630..c7773709bf 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -177,7 +177,7 @@ inline void VisitClassLoaderDexFiles(ScopedObjectAccessAlreadyRunnable& soa,
VisitClassLoaderDexFiles<decltype(helper), void*>(soa,
class_loader,
helper,
- /* default= */ nullptr);
+ /* defaultReturn= */ nullptr);
}
} // namespace art
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 071376cd77..67eeb553a4 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -104,6 +104,43 @@ void ClassTable::VisitRoots(const Visitor& visitor) {
}
}
+template <typename Visitor>
+class ClassTable::TableSlot::ClassAndRootVisitor {
+ public:
+ explicit ClassAndRootVisitor(Visitor& visitor) : visitor_(visitor) {}
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* klass) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!klass->IsNull());
+ // Visit roots in the klass object
+ visitor_(klass->AsMirrorPtr());
+ // Visit the GC-root holding klass' reference
+ visitor_.VisitRoot(klass);
+ }
+
+ private:
+ Visitor& visitor_;
+};
+
+template <typename Visitor>
+void ClassTable::VisitClassesAndRoots(Visitor& visitor) {
+ TableSlot::ClassAndRootVisitor class_visitor(visitor);
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ for (ClassSet& class_set : classes_) {
+ for (TableSlot& table_slot : class_set) {
+ table_slot.VisitRoot(class_visitor);
+ }
+ }
+ for (GcRoot<mirror::Object>& root : strong_roots_) {
+ visitor.VisitRoot(root.AddressWithoutBarrier());
+ }
+ for (const OatFile* oat_file : oat_files_) {
+ for (GcRoot<mirror::Object>& root : oat_file->GetBssGcRoots()) {
+ visitor.VisitRootIfNonNull(root.AddressWithoutBarrier());
+ }
+ }
+}
+
template <ReadBarrierOption kReadBarrierOption, typename Visitor>
bool ClassTable::Visit(Visitor& visitor) {
ReaderMutexLock mu(Thread::Current(), lock_);
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 212a7d6631..123c069f0e 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -85,6 +85,9 @@ class ClassTable {
template<typename Visitor>
void VisitRoot(const Visitor& visitor) const NO_THREAD_SAFETY_ANALYSIS;
+ template<typename Visitor>
+ class ClassAndRootVisitor;
+
private:
// Extract a raw pointer from an address.
static ObjPtr<mirror::Class> ExtractPtr(uint32_t data)
@@ -185,6 +188,12 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ template<class Visitor>
+ void VisitClassesAndRoots(Visitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Stops visit if the visitor returns false.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
bool Visit(Visitor& visitor)
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index a48d860f0a..cd3968610b 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -166,9 +166,6 @@ void CommonRuntimeTestImpl::FinalizeSetup() {
WellKnownClasses::Init(Thread::Current()->GetJniEnv());
InitializeIntrinsics();
- // Create the heap thread pool so that the GC runs in parallel for tests. Normally, the thread
- // pool is created by the runtime.
- runtime_->GetHeap()->CreateThreadPool();
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
// Reduce timinig-dependent flakiness in OOME behavior (eg StubTest.AllocObject).
runtime_->GetHeap()->SetMinIntervalHomogeneousSpaceCompactionByOom(0U);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 9fa9c5d7e5..e1360730d6 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -305,7 +305,7 @@ class CheckJniAbortCatcher {
}
#define TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS() \
- if (!kEmitCompilerReadBarrier || !kUseBakerReadBarrier) { \
+ if (!gUseReadBarrier || !kUseBakerReadBarrier) { \
printf("WARNING: TEST DISABLED FOR GC WITHOUT BAKER READ BARRIER\n"); \
return; \
}
@@ -317,7 +317,7 @@ class CheckJniAbortCatcher {
}
#define TEST_DISABLED_FOR_MEMORY_TOOL_WITH_HEAP_POISONING_WITHOUT_READ_BARRIERS() \
- if (kRunningOnMemoryTool && kPoisonHeapReferences && !kEmitCompilerReadBarrier) { \
+ if (kRunningOnMemoryTool && kPoisonHeapReferences && !gUseReadBarrier) { \
printf("WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING WITHOUT READ BARRIERS\n"); \
return; \
}
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index 17a0a8a714..03fd4232c8 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -152,7 +152,6 @@ void ThrowWrappedBootstrapMethodError(const char* fmt, ...) {
// ClassCastException
void ThrowClassCastException(ObjPtr<mirror::Class> dest_type, ObjPtr<mirror::Class> src_type) {
- DumpB77342775DebugData(dest_type, src_type);
ThrowException("Ljava/lang/ClassCastException;", nullptr,
StringPrintf("%s cannot be cast to %s",
mirror::Class::PrettyDescriptor(src_type).c_str(),
@@ -238,6 +237,19 @@ void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ..
va_end(args);
}
+void ThrowIllegalAccessErrorForImplementingMethod(ObjPtr<mirror::Class> klass,
+ ArtMethod* implementation_method,
+ ArtMethod* interface_method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(!implementation_method->IsAbstract());
+ DCHECK(!implementation_method->IsPublic());
+ ThrowIllegalAccessError(
+ klass,
+ "Method '%s' implementing interface method '%s' is not public",
+ implementation_method->PrettyMethod().c_str(),
+ interface_method->PrettyMethod().c_str());
+}
+
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg) {
@@ -281,7 +293,6 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* inter
<< "' does not implement interface '"
<< mirror::Class::PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << ArtMethod::PrettyMethod(interface_method) << "'";
- DumpB77342775DebugData(interface_method->GetDeclaringClass(), this_object->GetClass());
ThrowException("Ljava/lang/IncompatibleClassChangeError;",
referrer != nullptr ? referrer->GetDeclaringClass() : nullptr,
msg.str().c_str());
@@ -437,7 +448,7 @@ void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method, InvokeType type
}
static bool IsValidReadBarrierImplicitCheck(uintptr_t addr) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
uint32_t monitor_offset = mirror::Object::MonitorOffset().Uint32Value();
if (kUseBakerReadBarrier &&
(kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64)) {
@@ -472,7 +483,7 @@ static bool IsValidImplicitCheck(uintptr_t addr, const Instruction& instr)
}
case Instruction::IGET_OBJECT:
- if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
+ if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
return true;
}
FALLTHROUGH_INTENDED;
@@ -496,7 +507,7 @@ static bool IsValidImplicitCheck(uintptr_t addr, const Instruction& instr)
}
case Instruction::AGET_OBJECT:
- if (kEmitCompilerReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
+ if (gUseReadBarrier && IsValidReadBarrierImplicitCheck(addr)) {
return true;
}
FALLTHROUGH_INTENDED;
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index 843c455878..d9620df0b9 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -111,6 +111,11 @@ void ThrowIllegalAccessError(ObjPtr<mirror::Class> referrer, const char* fmt, ..
__attribute__((__format__(__printf__, 2, 3)))
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+void ThrowIllegalAccessErrorForImplementingMethod(ObjPtr<mirror::Class> klass,
+ ArtMethod* implementation_method,
+ ArtMethod* interface_method)
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg)
diff --git a/runtime/debug_print.cc b/runtime/debug_print.cc
index cde4d868cb..9c38cce77a 100644
--- a/runtime/debug_print.cc
+++ b/runtime/debug_print.cc
@@ -129,60 +129,4 @@ std::string DescribeLoaders(ObjPtr<mirror::ClassLoader> loader, const char* clas
return oss.str();
}
-void DumpB77342775DebugData(ObjPtr<mirror::Class> target_class, ObjPtr<mirror::Class> src_class) {
- std::string target_descriptor_storage;
- const char* target_descriptor = target_class->GetDescriptor(&target_descriptor_storage);
- const char kCheckedPrefix[] = "Lorg/apache/http/";
- // Avoid spam for other packages. (That spam would break some ART run-tests for example.)
- if (strncmp(target_descriptor, kCheckedPrefix, sizeof(kCheckedPrefix) - 1) != 0) {
- return;
- }
- auto matcher = [target_descriptor, target_class](ObjPtr<mirror::Class> klass)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (klass->DescriptorEquals(target_descriptor)) {
- LOG(ERROR) << " descriptor match in "
- << DescribeLoaders(klass->GetClassLoader(), target_descriptor)
- << " match? " << std::boolalpha << (klass == target_class);
- }
- };
-
- std::string source_descriptor_storage;
- const char* source_descriptor = src_class->GetDescriptor(&source_descriptor_storage);
-
- LOG(ERROR) << "Maybe bug 77342775, looking for " << target_descriptor
- << " " << target_class.Ptr() << "[" << DescribeSpace(target_class) << "]"
- << " defined in " << target_class->GetDexFile().GetLocation()
- << "/" << static_cast<const void*>(&target_class->GetDexFile())
- << "\n with loader: " << DescribeLoaders(target_class->GetClassLoader(), target_descriptor);
- if (target_class->IsInterface()) {
- ObjPtr<mirror::IfTable> iftable = src_class->GetIfTable();
- CHECK(iftable != nullptr);
- size_t ifcount = iftable->Count();
- LOG(ERROR) << " in interface table for " << source_descriptor
- << " " << src_class.Ptr() << "[" << DescribeSpace(src_class) << "]"
- << " defined in " << src_class->GetDexFile().GetLocation()
- << "/" << static_cast<const void*>(&src_class->GetDexFile())
- << " ifcount=" << ifcount
- << "\n with loader " << DescribeLoaders(src_class->GetClassLoader(), source_descriptor);
- for (size_t i = 0; i != ifcount; ++i) {
- ObjPtr<mirror::Class> iface = iftable->GetInterface(i);
- CHECK(iface != nullptr);
- LOG(ERROR) << " iface #" << i << ": " << iface->PrettyDescriptor();
- matcher(iface);
- }
- } else {
- LOG(ERROR) << " in superclass chain for " << source_descriptor
- << " " << src_class.Ptr() << "[" << DescribeSpace(src_class) << "]"
- << " defined in " << src_class->GetDexFile().GetLocation()
- << "/" << static_cast<const void*>(&src_class->GetDexFile())
- << "\n with loader " << DescribeLoaders(src_class->GetClassLoader(), source_descriptor);
- for (ObjPtr<mirror::Class> klass = src_class;
- klass != nullptr;
- klass = klass->GetSuperClass()) {
- LOG(ERROR) << " - " << klass->PrettyDescriptor();
- matcher(klass);
- }
- }
-}
-
} // namespace art
diff --git a/runtime/debug_print.h b/runtime/debug_print.h
index e2990d4c2d..7c6840284d 100644
--- a/runtime/debug_print.h
+++ b/runtime/debug_print.h
@@ -29,9 +29,6 @@ std::string DescribeSpace(ObjPtr<mirror::Class> klass)
std::string DescribeLoaders(ObjPtr<mirror::ClassLoader> loader, const char* class_descriptor)
REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-void DumpB77342775DebugData(ObjPtr<mirror::Class> target_class, ObjPtr<mirror::Class> src_class)
- REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
-
} // namespace art
#endif // ART_RUNTIME_DEBUG_PRINT_H_
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index 5a409f0a4b..3dd8ae19df 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -841,6 +841,38 @@ ObjPtr<mirror::Object> GetAnnotationValue(const ClassData& klass,
return annotation_value.value_.GetL();
}
+template<typename T>
+static inline ObjPtr<mirror::ObjectArray<T>> GetAnnotationArrayValue(
+ Handle<mirror::Class> klass,
+ const char* annotation_name,
+ const char* value_name)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ClassData data(klass);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item =
+ SearchAnnotationSet(data.GetDexFile(), annotation_set, annotation_name,
+ DexFile::kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ StackHandleScope<1> hs(Thread::Current());
+ Handle<mirror::Class> class_array_class =
+ hs.NewHandle(GetClassRoot<mirror::ObjectArray<T>>());
+ DCHECK(class_array_class != nullptr);
+ ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+ annotation_item,
+ value_name,
+ class_array_class,
+ DexFile::kDexAnnotationArray);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ return obj->AsObjectArray<T>();
+}
+
static ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureValue(
const ClassData& klass,
const AnnotationSetItem* annotation_set)
@@ -1478,28 +1510,9 @@ ObjPtr<mirror::ObjectArray<mirror::Object>> GetAnnotationsForClass(Handle<mirror
}
ObjPtr<mirror::ObjectArray<mirror::Class>> GetDeclaredClasses(Handle<mirror::Class> klass) {
- ClassData data(klass);
- const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
- if (annotation_set == nullptr) {
- return nullptr;
- }
- const AnnotationItem* annotation_item =
- SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/MemberClasses;",
- DexFile::kDexVisibilitySystem);
- if (annotation_item == nullptr) {
- return nullptr;
- }
- StackHandleScope<1> hs(Thread::Current());
- Handle<mirror::Class> class_array_class =
- hs.NewHandle(GetClassRoot<mirror::ObjectArray<mirror::Class>>());
- DCHECK(class_array_class != nullptr);
- ObjPtr<mirror::Object> obj =
- GetAnnotationValue(data, annotation_item, "value", class_array_class,
- DexFile::kDexAnnotationArray);
- if (obj == nullptr) {
- return nullptr;
- }
- return obj->AsObjectArray<mirror::Class>();
+ return GetAnnotationArrayValue<mirror::Class>(klass,
+ "Ldalvik/annotation/MemberClasses;",
+ "value");
}
ObjPtr<mirror::Class> GetDeclaringClass(Handle<mirror::Class> klass) {
@@ -1714,6 +1727,46 @@ const char* GetSourceDebugExtension(Handle<mirror::Class> klass) {
return data.GetDexFile().StringDataByIdx(index);
}
+ObjPtr<mirror::Class> GetNestHost(Handle<mirror::Class> klass) {
+ ClassData data(klass);
+ const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
+ if (annotation_set == nullptr) {
+ return nullptr;
+ }
+ const AnnotationItem* annotation_item =
+ SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/NestHost;",
+ DexFile::kDexVisibilitySystem);
+ if (annotation_item == nullptr) {
+ return nullptr;
+ }
+ ObjPtr<mirror::Object> obj = GetAnnotationValue(data,
+ annotation_item,
+ "host",
+ ScopedNullHandle<mirror::Class>(),
+ DexFile::kDexAnnotationType);
+ if (obj == nullptr) {
+ return nullptr;
+ }
+ if (!obj->IsClass()) {
+ // TypeNotPresentException, throw the NoClassDefFoundError.
+ Thread::Current()->SetException(obj->AsThrowable()->GetCause());
+ return nullptr;
+ }
+ return obj->AsClass();
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetNestMembers(Handle<mirror::Class> klass) {
+ return GetAnnotationArrayValue<mirror::Class>(klass,
+ "Ldalvik/annotation/NestMembers;",
+ "classes");
+}
+
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetPermittedSubclasses(Handle<mirror::Class> klass) {
+ return GetAnnotationArrayValue<mirror::Class>(klass,
+ "Ldalvik/annotation/PermittedSubclasses;",
+ "value");
+}
+
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) {
ClassData data(klass);
const AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data);
diff --git a/runtime/dex/dex_file_annotations.h b/runtime/dex/dex_file_annotations.h
index 3ef67e5661..75951a7cef 100644
--- a/runtime/dex/dex_file_annotations.h
+++ b/runtime/dex/dex_file_annotations.h
@@ -136,6 +136,12 @@ ObjPtr<mirror::ObjectArray<mirror::String>> GetSignatureAnnotationForClass(
Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetSourceDebugExtension(Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
+ObjPtr<mirror::Class> GetNestHost(Handle<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetNestMembers(Handle<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ObjPtr<mirror::ObjectArray<mirror::Class>> GetPermittedSubclasses(Handle<mirror::Class> klass)
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index 964b7f3fff..166464f28d 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -21,8 +21,6 @@
#include <string>
#include <vector>
-#include <gtest/gtest.h>
-
#include "base/file_utils.h"
#include "base/os.h"
#include "base/stl_util.h"
@@ -34,8 +32,10 @@
#include "exec_utils.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
+#include "gtest/gtest.h"
#include "oat_file_assistant.h"
#include "runtime.h"
+#include "ziparchive/zip_writer.h"
namespace art {
@@ -233,6 +233,23 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
return res.status_code;
}
+ void CreateDexMetadata(const std::string& vdex, const std::string& out_dm) {
+ // Read the vdex bytes.
+ std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex.c_str()));
+ std::vector<uint8_t> data(vdex_file->GetLength());
+ ASSERT_TRUE(vdex_file->ReadFully(data.data(), data.size()));
+
+ // Zip the content.
+ FILE* file = fopen(out_dm.c_str(), "wb");
+ ZipWriter writer(file);
+ writer.StartEntry("primary.vdex", ZipWriter::kAlign32);
+ writer.WriteBytes(data.data(), data.size());
+ writer.FinishEntry();
+ writer.Finish();
+ fflush(file);
+ fclose(file);
+ }
+
private:
std::string scratch_dir_;
std::string odex_oat_dir_;
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index eb5b149039..1f44a67500 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -14,14 +14,17 @@
* limitations under the License.
*/
-#include <string>
-#include <vector>
+#include "dexopt_test.h"
#include <gtest/gtest.h>
#include <procinfo/process_map.h>
+#include <string>
+#include <vector>
+
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "arch/instruction_set.h"
#include "base/file_utils.h"
#include "base/mem_map.h"
#include "common_runtime_test.h"
@@ -29,10 +32,10 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "dex2oat_environment_test.h"
-#include "dexopt_test.h"
#include "gc/space/image_space.h"
#include "hidden_api.h"
#include "oat.h"
+#include "oat_file_assistant.h"
#include "profile/profile_compilation_info.h"
namespace art {
@@ -163,22 +166,13 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location,
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
if (CompilerFilter::DependsOnImageChecksum(filter)) {
- const OatHeader& oat_header = odex_file->GetOatHeader();
- const char* oat_bcp = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
- ASSERT_TRUE(oat_bcp != nullptr);
- ASSERT_EQ(oat_bcp, android::base::Join(Runtime::Current()->GetBootClassPathLocations(), ':'));
- const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
- ASSERT_TRUE(checksums != nullptr);
-
- bool match = gc::space::ImageSpace::VerifyBootClassPathChecksums(
- checksums,
- oat_bcp,
- ArrayRef<const std::string>(&image_location, 1),
- ArrayRef<const std::string>(Runtime::Current()->GetBootClassPathLocations()),
- ArrayRef<const std::string>(Runtime::Current()->GetBootClassPath()),
- ArrayRef<const int>(Runtime::Current()->GetBootClassPathFds()),
- kRuntimeISA,
- &error_msg);
+ std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(/*spec=*/"");
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ kRuntimeISA,
+ context.get(),
+ /*load_executable=*/false);
+
+ bool match = oat_file_assistant.ValidateBootClassPathChecksums(*odex_file);
ASSERT_EQ(!with_alternate_image, match) << error_msg;
}
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 4ee1013816..91c266392c 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -486,6 +486,123 @@ EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(StaticPrimitiveWrite);
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL
#undef EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL
+static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
+ const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
+ const char* method_name = dex_file->GetMethodName(method_id);
+ // Instead of calling ResolveMethod() which has suspend point and can trigger
+ // GC, look up the method symbolically.
+ // Compare method's class name and method name against string init.
+ // It's ok since it's not allowed to create your own java/lang/String.
+ // TODO: verify that assumption.
+ if ((strcmp(class_name, "Ljava/lang/String;") == 0) &&
+ (strcmp(method_name, "<init>") == 0)) {
+ return true;
+ }
+ return false;
+}
+
+static inline bool IsStringInit(const Instruction& instr, ArtMethod* caller)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (instr.Opcode() == Instruction::INVOKE_DIRECT ||
+ instr.Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
+ uint16_t callee_method_idx = (instr.Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
+ instr.VRegB_3rc() : instr.VRegB_35c();
+ return IsStringInit(caller->GetDexFile(), callee_method_idx);
+ }
+ return false;
+}
+
+extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr);
+
+template <InvokeType type>
+ArtMethod* FindMethodToCall(Thread* self,
+ ArtMethod* caller,
+ ObjPtr<mirror::Object>* this_object,
+ const Instruction& inst,
+ /*out*/ bool* string_init)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+
+ // Try to find the method in thread-local cache.
+ size_t tls_value = 0u;
+ if (!self->GetInterpreterCache()->Get(self, &inst, &tls_value)) {
+ DCHECK(!self->IsExceptionPending());
+ // NterpGetMethod can suspend, so save this_object.
+ StackHandleScope<1> hs(self);
+ HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
+ tls_value = NterpGetMethod(self, caller, reinterpret_cast<const uint16_t*>(&inst));
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
+ }
+
+ if (type != kStatic && UNLIKELY((*this_object) == nullptr)) {
+ if (UNLIKELY(IsStringInit(inst, caller))) {
+ // Hack for String init:
+ //
+ // We assume that the input of String.<init> in verified code is always
+ // an uninitialized reference. If it is a null constant, it must have been
+ // optimized out by the compiler and we arrive here after deoptimization.
+ // Do not throw NullPointerException.
+ } else {
+ // Maintain interpreter-like semantics where NullPointerException is thrown
+ // after potential NoSuchMethodError from class linker.
+ const uint32_t method_idx = inst.VRegB();
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
+ return nullptr;
+ }
+ }
+
+ static constexpr size_t kStringInitMethodFlag = 0b1;
+ static constexpr size_t kInvokeInterfaceOnObjectMethodFlag = 0b1;
+ static constexpr size_t kMethodMask = ~0b11;
+
+ ArtMethod* called_method = nullptr;
+ switch (type) {
+ case kDirect:
+ case kSuper:
+ case kStatic:
+ // Note: for the interpreter, the String.<init> special casing for invocation is handled
+ // in DoCallCommon.
+ *string_init = ((tls_value & kStringInitMethodFlag) != 0);
+ DCHECK_EQ(*string_init, IsStringInit(inst, caller));
+ called_method = reinterpret_cast<ArtMethod*>(tls_value & kMethodMask);
+ break;
+ case kInterface:
+ if ((tls_value & kInvokeInterfaceOnObjectMethodFlag) != 0) {
+ // invokeinterface on a j.l.Object method.
+ uint16_t method_index = tls_value >> 16;
+ called_method = (*this_object)->GetClass()->GetVTableEntry(method_index, pointer_size);
+ } else {
+ ArtMethod* interface_method = reinterpret_cast<ArtMethod*>(tls_value & kMethodMask);
+ called_method = (*this_object)->GetClass()->GetImt(pointer_size)->Get(
+ interface_method->GetImtIndex(), pointer_size);
+ if (called_method->IsRuntimeMethod()) {
+ called_method = (*this_object)->GetClass()->FindVirtualMethodForInterface(
+ interface_method, pointer_size);
+ if (UNLIKELY(called_method == nullptr)) {
+ ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(
+ interface_method, *this_object, caller);
+ return nullptr;
+ }
+ }
+ }
+ break;
+ case kVirtual:
+ called_method = (*this_object)->GetClass()->GetVTableEntry(tls_value, pointer_size);
+ break;
+ }
+
+ if (UNLIKELY(!called_method->IsInvokable())) {
+ called_method->ThrowInvocationTimeError((type == kStatic) ? nullptr : *this_object);
+ return nullptr;
+ }
+ DCHECK(!called_method->IsRuntimeMethod()) << called_method->PrettyMethod();
+ return called_method;
+}
+
template<bool access_check>
ALWAYS_INLINE ArtMethod* FindSuperMethodToCall(uint32_t method_idx,
ArtMethod* resolved_method,
@@ -546,130 +663,6 @@ ALWAYS_INLINE ArtMethod* FindSuperMethodToCall(uint32_t method_idx,
return super_class->GetVTableEntry(vtable_index, linker->GetImagePointerSize());
}
-// Follow virtual/interface indirections if applicable.
-// Will throw null-pointer exception the if the object is null.
-template<InvokeType type, bool access_check>
-ALWAYS_INLINE ArtMethod* FindMethodToCall(uint32_t method_idx,
- ArtMethod* resolved_method,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- // Null pointer check.
- if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
- if (UNLIKELY(resolved_method->GetDeclaringClass()->IsStringClass() &&
- resolved_method->IsConstructor())) {
- // Hack for String init:
- //
- // We assume that the input of String.<init> in verified code is always
- // an unitialized reference. If it is a null constant, it must have been
- // optimized out by the compiler. Do not throw NullPointerException.
- } else {
- // Maintain interpreter-like semantics where NullPointerException is thrown
- // after potential NoSuchMethodError from class linker.
- ThrowNullPointerExceptionForMethodAccess(method_idx, type);
- return nullptr; // Failure.
- }
- }
- switch (type) {
- case kStatic:
- case kDirect:
- return resolved_method;
- case kVirtual: {
- ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
- uint16_t vtable_index = resolved_method->GetMethodIndex();
- if (access_check &&
- (!klass->HasVTable() ||
- vtable_index >= static_cast<uint32_t>(klass->GetVTableLength()))) {
- // Behavior to agree with that of the verifier.
- ThrowNoSuchMethodError(type, resolved_method->GetDeclaringClass(),
- resolved_method->GetName(), resolved_method->GetSignature());
- return nullptr; // Failure.
- }
- DCHECK(klass->HasVTable()) << klass->PrettyClass();
- return klass->GetVTableEntry(vtable_index, class_linker->GetImagePointerSize());
- }
- case kSuper: {
- return FindSuperMethodToCall<access_check>(method_idx, resolved_method, referrer, self);
- }
- case kInterface: {
- size_t imt_index = resolved_method->GetImtIndex();
- PointerSize pointer_size = class_linker->GetImagePointerSize();
- ObjPtr<mirror::Class> klass = (*this_object)->GetClass();
- ArtMethod* imt_method = klass->GetImt(pointer_size)->Get(imt_index, pointer_size);
- if (!imt_method->IsRuntimeMethod()) {
- if (kIsDebugBuild) {
- ArtMethod* method = klass->FindVirtualMethodForInterface(
- resolved_method, class_linker->GetImagePointerSize());
- CHECK_EQ(imt_method, method) << ArtMethod::PrettyMethod(resolved_method) << " / "
- << imt_method->PrettyMethod() << " / "
- << ArtMethod::PrettyMethod(method) << " / "
- << klass->PrettyClass();
- }
- return imt_method;
- } else {
- ArtMethod* interface_method = klass->FindVirtualMethodForInterface(
- resolved_method, class_linker->GetImagePointerSize());
- if (UNLIKELY(interface_method == nullptr)) {
- ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(resolved_method,
- *this_object, referrer);
- return nullptr; // Failure.
- }
- return interface_method;
- }
- }
- default:
- LOG(FATAL) << "Unknown invoke type " << type;
- return nullptr; // Failure.
- }
-}
-
-template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self) {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- ArtMethod* resolved_method;
- if (type == kStatic) {
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- } else {
- StackHandleScope<1> hs(self);
- HandleWrapperObjPtr<mirror::Object> h_this(hs.NewHandleWrapper(this_object));
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, referrer, type);
- }
- if (UNLIKELY(resolved_method == nullptr)) {
- DCHECK(self->IsExceptionPending()); // Throw exception and unwind.
- return nullptr; // Failure.
- }
- return FindMethodToCall<type, access_check>(
- method_idx, resolved_method, this_object, referrer, self);
-}
-
-// Explicit template declarations of FindMethodFromCode for all invoke types.
-#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
- ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
- ObjPtr<mirror::Object>* this_object, \
- ArtMethod* referrer, \
- Thread* self)
-#define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \
- EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \
- EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true)
-
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kStatic);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kDirect);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kVirtual);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kSuper);
-EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(kInterface);
-
-#undef EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL
-#undef EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL
-
inline ObjPtr<mirror::Class> ResolveVerifyAndClinit(dex::TypeIndex type_idx,
ArtMethod* referrer,
Thread* self,
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 8b6fc69bea..ae5687506a 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -143,11 +143,12 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx,
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
-template<InvokeType type, bool access_check>
-inline ArtMethod* FindMethodFromCode(uint32_t method_idx,
- ObjPtr<mirror::Object>* this_object,
- ArtMethod* referrer,
- Thread* self)
+template<InvokeType type>
+inline ArtMethod* FindMethodToCall(Thread* self,
+ ArtMethod* referrer,
+ ObjPtr<mirror::Object>* this_object,
+ const Instruction& inst,
+ /*out*/ bool* string_init)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index c78b604b43..80eb89f4e3 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -87,11 +87,11 @@ extern "C" const void* artFindNativeMethodRunnable(Thread* self)
}
// Replace the runtime method on the stack with the target method.
- DCHECK(!self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(!self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrameKnownNotTagged();
DCHECK(*sp == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
*sp = target_method;
- self->SetTopOfStackTagged(sp); // Fake GenericJNI frame.
+ self->SetTopOfStackGenericJniTagged(sp); // Fake GenericJNI frame.
// Continue with the target method.
method = target_method;
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index f8856d82b9..cb3caac9ab 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -122,6 +122,7 @@ extern "C" void art_jni_method_start();
extern "C" void art_jni_monitored_method_start();
extern "C" void art_jni_method_end();
extern "C" void art_jni_monitored_method_end();
+extern "C" void art_jni_method_entry_hook();
// JNI lock/unlock entrypoints. Note: Custom calling convention.
extern "C" void art_jni_lock_object(art::mirror::Object*);
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index 939feeebcc..ea077889ee 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -79,6 +79,7 @@ static void DefaultInitEntryPoints(JniEntryPoints* jpoints,
qpoints->SetQuickGenericJniTrampoline(art_quick_generic_jni_trampoline);
qpoints->SetJniDecodeReferenceResult(JniDecodeReferenceResult);
qpoints->SetJniReadBarrier(art_jni_read_barrier);
+ qpoints->SetJniMethodEntryHook(art_jni_method_entry_hook);
// Locks
if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 60a5875c5e..76bee2152a 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -23,6 +23,7 @@
#include "dex/dex_file_types.h"
#include "entrypoints/entrypoint_utils-inl.h"
#include "gc/heap.h"
+#include "jvalue-inl.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 7af1a0b14e..0e73c63828 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -67,6 +67,7 @@ extern "C" void artJniUnlockObject(mirror::Object* locked, Thread* self)
// JNI entrypoints when monitoring entry/exit.
extern "C" void artJniMonitoredMethodStart(Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_);
extern "C" void artJniMonitoredMethodEnd(Thread* self) SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
+extern "C" void artJniMethodEntryHook(Thread* self);
// StringAppend pattern entrypoint.
extern "C" mirror::String* artStringBuilderAppend(uint32_t format,
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index dffaa4bb25..4534bba8ef 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -78,6 +78,7 @@
V(JniLockObject, void, mirror::Object*) \
V(JniUnlockObject, void, mirror::Object*) \
V(QuickGenericJniTrampoline, void, ArtMethod*) \
+ V(JniMethodEntryHook, void) \
\
V(LockObject, void, mirror::Object*) \
V(UnlockObject, void, mirror::Object*) \
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index d32aa39996..81a9e21ab0 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -175,7 +175,7 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
return -1; \
} \
} \
- if (!referrer->SkipAccessChecks() && IsObject && new_value != 0) { \
+ if (!referrer->SkipAccessChecks() && (IsObject) && new_value != 0) { \
StackArtFieldHandleScope<1> rhs(self); \
ReflectiveHandle<ArtField> field_handle(rhs.NewHandle(field)); \
if (field->ResolveType().IsNull()) { \
@@ -223,7 +223,7 @@ static ArtMethod* GetReferrer(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_
return -1; \
} \
} \
- if (!referrer->SkipAccessChecks() && IsObject && new_value != 0) { \
+ if (!referrer->SkipAccessChecks() && (IsObject) && new_value != 0) { \
StackArtFieldHandleScope<1> rhs(self); \
ReflectiveHandle<ArtField> field_handle(rhs.NewHandle(field)); \
if (field->ResolveType().IsNull()) { \
@@ -435,7 +435,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx,
}
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return ReadBarrier::Mark(obj);
}
@@ -443,14 +443,12 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS
mirror::Object* obj,
uint32_t offset) {
// Used only in connection with non-volatile loads.
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
uint8_t* raw_addr = reinterpret_cast<uint8_t*>(obj) + offset;
mirror::HeapReference<mirror::Object>* ref_addr =
reinterpret_cast<mirror::HeapReference<mirror::Object>*>(raw_addr);
- constexpr ReadBarrierOption kReadBarrierOption =
- kUseReadBarrier ? kWithReadBarrier : kWithoutReadBarrier;
mirror::Object* result =
- ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kReadBarrierOption>(
+ ReadBarrier::Barrier<mirror::Object, /* kIsVolatile= */ false, kWithReadBarrier>(
obj,
MemberOffset(offset),
ref_addr);
@@ -458,7 +456,7 @@ extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref ATTRIBUTE_UNUS
}
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root) {
- DCHECK(kEmitCompilerReadBarrier);
+ DCHECK(gUseReadBarrier);
return root->Read();
}
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index ab13bd95b1..fafa3c702b 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -38,11 +38,16 @@
namespace art {
+extern "C" int artMethodExitHook(Thread* self,
+ ArtMethod* method,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result);
+
static_assert(sizeof(IRTSegmentState) == sizeof(uint32_t), "IRTSegmentState size unexpected");
static_assert(std::is_trivial<IRTSegmentState>::value, "IRTSegmentState not trivial");
extern "C" void artJniReadBarrier(ArtMethod* method) {
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
mirror::CompressedReference<mirror::Object>* declaring_class =
method->GetDeclaringClassAddressWithoutBarrier();
if (kUseBakerReadBarrier) {
@@ -174,11 +179,11 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
artJniUnlockObject(lock.Ptr(), self);
}
char return_shorty_char = called->GetShorty()[0];
+ uint64_t ret;
if (return_shorty_char == 'L') {
- uint64_t ret = reinterpret_cast<uint64_t>(
+ ret = reinterpret_cast<uint64_t>(
UNLIKELY(self->IsExceptionPending()) ? nullptr : JniDecodeReferenceResult(result.l, self));
PopLocalReferences(saved_local_ref_cookie, self);
- return ret;
} else {
if (LIKELY(!critical_native)) {
PopLocalReferences(saved_local_ref_cookie, self);
@@ -188,32 +193,43 @@ extern uint64_t GenericJniMethodEnd(Thread* self,
if (kRuntimeISA == InstructionSet::kX86) {
// Convert back the result to float.
double d = bit_cast<double, uint64_t>(result_f);
- return bit_cast<uint32_t, float>(static_cast<float>(d));
+ ret = bit_cast<uint32_t, float>(static_cast<float>(d));
} else {
- return result_f;
+ ret = result_f;
}
}
+ break;
case 'D':
- return result_f;
+ ret = result_f;
+ break;
case 'Z':
- return result.z;
+ ret = result.z;
+ break;
case 'B':
- return result.b;
+ ret = result.b;
+ break;
case 'C':
- return result.c;
+ ret = result.c;
+ break;
case 'S':
- return result.s;
+ ret = result.s;
+ break;
case 'I':
- return result.i;
+ ret = result.i;
+ break;
case 'J':
- return result.j;
+ ret = result.j;
+ break;
case 'V':
- return 0;
+ ret = 0;
+ break;
default:
LOG(FATAL) << "Unexpected return shorty character " << return_shorty_char;
UNREACHABLE();
}
}
+
+ return ret;
}
extern "C" void artJniMonitoredMethodStart(Thread* self) {
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 93422cf056..5dca58ab04 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -21,16 +21,46 @@
namespace art {
+extern "C" void artDeoptimizeIfNeeded(Thread* self, uintptr_t result, bool is_ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ DCHECK(!self->IsExceptionPending());
+
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(sp != nullptr && (*sp)->IsRuntimeMethod());
+
+ DeoptimizationMethodType type = instr->GetDeoptimizationMethodType(*sp);
+ JValue jvalue;
+ jvalue.SetJ(result);
+ instr->DeoptimizeIfNeeded(self, sp, type, jvalue, is_ref);
+}
+
extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artImplicitSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when there is a pending checkpoint or suspend request.
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend(/*implicit=*/ true);
+
+ // We could have other dex instructions at the same dex pc as suspend and we need to execute
+ // those instructions. So we should start executing from the current dex pc.
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ JValue result;
+ result.SetJ(0);
+ Runtime::Current()->GetInstrumentation()->DeoptimizeIfNeeded(
+ self, sp, DeoptimizationMethodType::kKeepDexPc, result, /* is_ref= */ false);
}
extern "C" void artCompileOptimized(ArtMethod* method, Thread* self)
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index b6ece4a86e..9e21007387 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -224,13 +224,8 @@ class QuickArgumentVisitor {
#endif
public:
- // Special handling for proxy methods. Proxy methods are instance methods so the
- // 'this' object is the 1st argument. They also have the same frame layout as the
- // kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
- // 1st GPR.
- static StackReference<mirror::Object>* GetProxyThisObjectReference(ArtMethod** sp)
+ static StackReference<mirror::Object>* GetThisObjectReference(ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
- CHECK((*sp)->IsProxyMethod());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -529,7 +524,8 @@ class QuickArgumentVisitor {
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return QuickArgumentVisitor::GetProxyThisObjectReference(sp)->AsMirrorPtr();
+ DCHECK((*sp)->IsProxyMethod());
+ return QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr();
}
// Visits arguments on the stack placing them into the shadow frame.
@@ -647,6 +643,7 @@ static void HandleDeoptimization(JValue* result,
method_type);
}
+NO_STACK_PROTECTOR
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
@@ -654,7 +651,10 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(!method->IsInvokable())) {
- method->ThrowInvocationTimeError();
+ method->ThrowInvocationTimeError(
+ method->IsStatic()
+ ? nullptr
+ : QuickArgumentVisitor::GetThisObjectReference(sp)->AsMirrorPtr());
return 0;
}
@@ -713,41 +713,34 @@ extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self,
// Pop transition.
self->PopManagedStackFragment(fragment);
- // Request a stack deoptimization if needed
- ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
- uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ // Check if caller needs to be deoptimized for instrumentation reasons.
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
// If caller_pc is the instrumentation exit stub, the stub will check to see if deoptimization
// should be done and it knows the real return pc. NB If the upcall is null we don't need to do
// anything. This can happen during shutdown or early startup.
- if (UNLIKELY(
- caller != nullptr &&
- caller_pc != reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) &&
- (self->IsForceInterpreter() || Dbg::IsForcedInterpreterNeededForUpcall(self, caller)))) {
- if (!Runtime::Current()->IsAsyncDeoptimizeable(caller_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
- << caller->PrettyMethod();
- } else {
- VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
- << " to " << caller->PrettyMethod()
- << (force_frame_pop ? " for frame-pop" : "");
- DCHECK_IMPLIES(force_frame_pop, result.GetJ() == 0)
- << "Force frame pop should have no result.";
- if (force_frame_pop && self->GetException() != nullptr) {
- LOG(WARNING) << "Suppressing exception for instruction-retry: "
- << self->GetException()->Dump();
- }
- // Push the context of the deoptimization stack so we can restore the return value and the
- // exception before executing the deoptimized frames.
- self->PushDeoptimizationContext(
- result,
- shorty[0] == 'L' || shorty[0] == '[', /* class or array */
- force_frame_pop ? nullptr : self->GetException(),
- /* from_code= */ false,
- DeoptimizationMethodType::kDefault);
-
- // Set special exception to cause deoptimization.
- self->SetException(Thread::GetDeoptimizationException());
+ if (UNLIKELY(instr->ShouldDeoptimizeCaller(self, sp))) {
+ ArtMethod* caller = QuickArgumentVisitor::GetOuterMethod(sp);
+ uintptr_t caller_pc = QuickArgumentVisitor::GetCallingPc(sp);
+ DCHECK(Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc));
+ DCHECK(caller != nullptr);
+ VLOG(deopt) << "Forcing deoptimization on return from method " << method->PrettyMethod()
+ << " to " << caller->PrettyMethod() << (force_frame_pop ? " for frame-pop" : "");
+ DCHECK(!force_frame_pop || result.GetJ() == 0) << "Force frame pop should have no result.";
+ if (force_frame_pop && self->GetException() != nullptr) {
+ LOG(WARNING) << "Suppressing exception for instruction-retry: "
+ << self->GetException()->Dump();
}
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
+ // Push the context of the deoptimization stack so we can restore the return value and the
+ // exception before executing the deoptimized frames.
+ self->PushDeoptimizationContext(result,
+ shorty[0] == 'L' || shorty[0] == '[', /* class or array */
+ force_frame_pop ? nullptr : self->GetException(),
+ /* from_code= */ false,
+ DeoptimizationMethodType::kDefault);
+
+ // Set special exception to cause deoptimization.
+ self->SetException(Thread::GetDeoptimizationException());
}
// No need to restore the args since the method has already been run by the interpreter.
@@ -862,7 +855,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
instr->MethodEnterEvent(soa.Self(), proxy_method);
if (soa.Self()->IsExceptionPending()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
return 0;
@@ -872,7 +864,6 @@ extern "C" uint64_t artQuickProxyInvokeHandler(
if (soa.Self()->IsExceptionPending()) {
if (instr->HasMethodUnwindListeners()) {
instr->MethodUnwindEvent(self,
- soa.Decode<mirror::Object>(rcvr_jobj),
proxy_method,
0);
}
@@ -1037,7 +1028,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
<< "Proxy method " << method->PrettyMethod()
<< " (declaring class: " << method->GetDeclaringClass()->PrettyClass() << ")"
<< " should not hit instrumentation entrypoint.";
- DCHECK(!instrumentation->IsDeoptimized(method));
+ DCHECK(!instrumentation->IsDeoptimized(method)) << method->PrettyMethod();
// This will get the entry point either from the oat file, the JIT or the appropriate bridge
// method if none of those can be found.
result = instrumentation->GetCodeForInvoke(method);
@@ -1065,6 +1056,7 @@ extern "C" const void* artInstrumentationMethodEntryFromCode(ArtMethod* method,
}
}
+ DCHECK(!method->IsRuntimeMethod());
instrumentation->PushInstrumentationStackFrame(self,
is_static ? nullptr : h_object.Get(),
method,
@@ -1379,6 +1371,11 @@ extern "C" const void* artQuickResolutionTrampoline(
success = linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
}
if (success) {
+ // When the clinit check is at entry of the AOT/nterp code, we do the clinit check
+ // before doing the suspend check. To ensure the code sees the latest
+ // version of the class (the code doesn't do a read barrier to reduce
+ // size), do a suspend check now.
+ self->CheckSuspend();
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
// Check if we need instrumented code here. Since resolution stubs could suspend, it is
// possible that we instrumented the entry points after we started executing the resolution
@@ -1944,7 +1941,7 @@ class BuildGenericJniFrameVisitor final : public QuickArgumentVisitor {
// The declaring class must be marked.
auto* declaring_class = reinterpret_cast<mirror::CompressedReference<mirror::Class>*>(
method->GetDeclaringClassAddressWithoutBarrier());
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
artJniReadBarrier(method);
}
sm_.AdvancePointer(declaring_class);
@@ -2091,7 +2088,7 @@ extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
}
// Fix up managed-stack things in Thread. After this we can walk the stack.
- self->SetTopOfStackTagged(managed_sp);
+ self->SetTopOfStackGenericJniTagged(managed_sp);
self->VerifyStack();
@@ -2117,6 +2114,14 @@ extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
}
}
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instr->AreExitStubsInstalled() && Runtime::Current()->IsJavaDebuggable())) {
+ instr->MethodEnterEvent(self, called);
+ if (self->IsExceptionPending()) {
+ return nullptr;
+ }
+ }
+
// Skip calling `artJniMethodStart()` for @CriticalNative and @FastNative.
if (LIKELY(normal_native)) {
// Start JNI.
@@ -2185,7 +2190,7 @@ extern "C" uint64_t artQuickGenericJniEndTrampoline(Thread* self,
// anything that requires a mutator lock before that would cause problems as GC may have the
// exclusive mutator lock and may be moving objects, etc.
ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
- DCHECK(self->GetManagedStack()->GetTopQuickFrameTag());
+ DCHECK(self->GetManagedStack()->GetTopQuickFrameGenericJniTag());
uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
ArtMethod* called = *sp;
uint32_t cookie = *(sp32 - 1);
@@ -2274,12 +2279,18 @@ static TwoWordReturn artInvokeCommon(uint32_t method_idx,
uint32_t shorty_len;
const char* shorty = dex_file->GetMethodShorty(dex_file->GetMethodId(method_idx), &shorty_len);
{
- // Remember the args in case a GC happens in FindMethodFromCode.
+ // Remember the args in case a GC happens in FindMethodToCall.
ScopedObjectAccessUnchecked soa(self->GetJniEnv());
RememberForGcArgumentVisitor visitor(sp, type == kStatic, shorty, shorty_len, &soa);
visitor.VisitArguments();
- method = FindMethodFromCode<type, /*access_check=*/true>(
- method_idx, &this_object, caller_method, self);
+
+ uint32_t dex_pc = QuickArgumentVisitor::GetCallingDexPc(sp);
+ CodeItemInstructionAccessor accessor(caller_method->DexInstructions());
+ CHECK_LT(dex_pc, accessor.InsnsSizeInCodeUnits());
+ const Instruction& instr = accessor.InstructionAt(dex_pc);
+ bool string_init = false;
+ method = FindMethodToCall<type>(self, caller_method, &this_object, instr, &string_init);
+
visitor.FixupReferences();
}
@@ -2589,6 +2600,10 @@ extern "C" uint64_t artInvokePolymorphic(mirror::Object* raw_receiver, Thread* s
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
@@ -2647,9 +2662,20 @@ extern "C" uint64_t artInvokeCustom(uint32_t call_site_idx, Thread* self, ArtMet
// Pop transition record.
self->PopManagedStackFragment(fragment);
+ bool is_ref = (shorty[0] == 'L');
+ Runtime::Current()->GetInstrumentation()->PushDeoptContextIfNeeded(
+ self, DeoptimizationMethodType::kDefault, is_ref, result);
+
return result.GetJ();
}
+extern "C" void artJniMethodEntryHook(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ ArtMethod* method = *self->GetManagedStack()->GetTopQuickFrame();
+ instr->MethodEnterEvent(self, method);
+}
+
extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod** sp ATTRIBUTE_UNUSED)
REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
@@ -2662,11 +2688,19 @@ extern "C" void artMethodEntryHook(ArtMethod* method, Thread* self, ArtMethod**
}
}
-extern "C" int artMethodExitHook(Thread* self,
+extern "C" void artMethodExitHook(Thread* self,
ArtMethod* method,
uint64_t* gpr_result,
uint64_t* fpr_result)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ // For GenericJniTrampolines we call artMethodExitHook even for non debuggable runtimes though we
+ // still install instrumentation stubs. So just return early here so we don't call method exit
+ // twice. In all other cases (JITed JNI stubs / JITed code) we only call this for debuggable
+ // runtimes.
+ if (!Runtime::Current()->IsJavaDebuggable()) {
+ return;
+ }
+
DCHECK_EQ(reinterpret_cast<uintptr_t>(self), reinterpret_cast<uintptr_t>(Thread::Current()));
CHECK(gpr_result != nullptr);
CHECK(fpr_result != nullptr);
@@ -2677,7 +2711,7 @@ extern "C" int artMethodExitHook(Thread* self,
instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
DCHECK(instr->AreExitStubsInstalled());
bool is_ref;
- JValue return_value = instr->GetReturnValue(self, method, &is_ref, gpr_result, fpr_result);
+ JValue return_value = instr->GetReturnValue(method, &is_ref, gpr_result, fpr_result);
bool deoptimize = false;
{
StackHandleScope<1> hs(self);
@@ -2692,11 +2726,12 @@ extern "C" int artMethodExitHook(Thread* self,
// back to an upcall.
NthCallerVisitor visitor(self, 1, /*include_runtime_and_upcalls=*/false);
visitor.WalkStack(true);
- deoptimize = instr->ShouldDeoptimizeMethod(self, visitor);
+ deoptimize = instr->ShouldDeoptimizeCaller(self, visitor);
// If we need a deoptimization MethodExitEvent will be called by the interpreter when it
- // re-executes the return instruction.
- if (!deoptimize) {
+ // re-executes the return instruction. For native methods we have to process method exit
+ // events here since deoptimization just removes the native frame.
+ if (!deoptimize || method->IsNative()) {
instr->MethodExitEvent(self,
method,
/* frame= */ {},
@@ -2711,17 +2746,22 @@ extern "C" int artMethodExitHook(Thread* self,
}
if (self->IsExceptionPending() || self->ObserveAsyncException()) {
- return 1;
+ // The exception was thrown from the method exit callback. We should not call method unwind
+ // callbacks for this case.
+ self->QuickDeliverException(/* is_method_exit_exception= */ true);
+ UNREACHABLE();
}
if (deoptimize) {
DeoptimizationMethodType deopt_method_type = instr->GetDeoptimizationMethodType(method);
- self->PushDeoptimizationContext(return_value, is_ref, nullptr, false, deopt_method_type);
+ self->PushDeoptimizationContext(return_value,
+ is_ref,
+ self->GetException(),
+ false,
+ deopt_method_type);
artDeoptimize(self);
UNREACHABLE();
}
-
- return 0;
}
} // namespace art
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 240ecbd216..2cd58dbf1b 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -225,7 +225,9 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
pJniUnlockObject, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniUnlockObject,
pQuickGenericJniTrampoline, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline, pLockObject, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pQuickGenericJniTrampoline,
+ pJniMethodEntryHook, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pJniMethodEntryHook, pLockObject, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pLockObject, pUnlockObject, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUnlockObject, pCmpgDouble, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCmpgDouble, pCmpgFloat, sizeof(void*));
diff --git a/runtime/exec_utils.cc b/runtime/exec_utils.cc
index 463d4580cf..722f70ac06 100644
--- a/runtime/exec_utils.cc
+++ b/runtime/exec_utils.cc
@@ -16,22 +16,38 @@
#include "exec_utils.h"
+#include <poll.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <unistd.h>
+
+#ifdef __BIONIC__
+#include <sys/pidfd.h>
+#endif
+
+#include <chrono>
+#include <climits>
+#include <condition_variable>
+#include <cstdint>
+#include <mutex>
#include <string>
+#include <thread>
#include <vector>
+#include "android-base/scopeguard.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-
+#include "android-base/unique_fd.h"
+#include "base/macros.h"
#include "runtime.h"
namespace art {
-using android::base::StringPrintf;
-
namespace {
+using ::android::base::StringPrintf;
+using ::android::base::unique_fd;
+
std::string ToCommandLine(const std::vector<std::string>& args) {
return android::base::Join(args, ' ');
}
@@ -40,7 +56,7 @@ std::string ToCommandLine(const std::vector<std::string>& args) {
// If there is a runtime (Runtime::Current != nullptr) then the subprocess is created with the
// same environment that existed when the runtime was started.
// Returns the process id of the child process on success, -1 otherwise.
-pid_t ExecWithoutWait(std::vector<std::string>& arg_vector) {
+pid_t ExecWithoutWait(const std::vector<std::string>& arg_vector, std::string* error_msg) {
// Convert the args to char pointers.
const char* program = arg_vector[0].c_str();
std::vector<char*> args;
@@ -65,113 +81,195 @@ pid_t ExecWithoutWait(std::vector<std::string>& arg_vector) {
} else {
execve(program, &args[0], envp);
}
- PLOG(ERROR) << "Failed to execve(" << ToCommandLine(arg_vector) << ")";
- // _exit to avoid atexit handlers in child.
- _exit(1);
+ // This should be regarded as a crash rather than a normal return.
+ PLOG(FATAL) << "Failed to execute (" << ToCommandLine(arg_vector) << ")";
+ UNREACHABLE();
+ } else if (pid == -1) {
+ *error_msg = StringPrintf("Failed to execute (%s) because fork failed: %s",
+ ToCommandLine(arg_vector).c_str(),
+ strerror(errno));
+ return -1;
} else {
return pid;
}
}
-} // namespace
-
-int ExecAndReturnCode(std::vector<std::string>& arg_vector, std::string* error_msg) {
- pid_t pid = ExecWithoutWait(arg_vector);
- if (pid == -1) {
- *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
- ToCommandLine(arg_vector).c_str(), strerror(errno));
+int WaitChild(pid_t pid,
+ const std::vector<std::string>& arg_vector,
+ bool no_wait,
+ std::string* error_msg) {
+ siginfo_t info;
+ // WNOWAIT leaves the child in a waitable state. The call is still blocking.
+ int options = WEXITED | (no_wait ? WNOWAIT : 0);
+ if (TEMP_FAILURE_RETRY(waitid(P_PID, pid, &info, options)) != 0) {
+ *error_msg = StringPrintf("Failed to execute (%s) because waitid failed for pid %d: %s",
+ ToCommandLine(arg_vector).c_str(),
+ pid,
+ strerror(errno));
return -1;
}
-
- // wait for subprocess to finish
- int status = -1;
- pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
- if (got_pid != pid) {
- *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: "
- "wanted %d, got %d: %s",
- ToCommandLine(arg_vector).c_str(), pid, got_pid, strerror(errno));
+ if (info.si_pid != pid) {
+ *error_msg = StringPrintf("Failed to execute (%s) because waitid failed: wanted %d, got %d: %s",
+ ToCommandLine(arg_vector).c_str(),
+ pid,
+ info.si_pid,
+ strerror(errno));
return -1;
}
- if (WIFEXITED(status)) {
- return WEXITSTATUS(status);
+ if (info.si_code != CLD_EXITED) {
+ *error_msg =
+ StringPrintf("Failed to execute (%s) because the child process is terminated by signal %d",
+ ToCommandLine(arg_vector).c_str(),
+ info.si_status);
+ return -1;
}
- return -1;
+ return info.si_status;
}
-int ExecAndReturnCode(std::vector<std::string>& arg_vector,
- time_t timeout_secs,
- bool* timed_out,
- std::string* error_msg) {
- *timed_out = false;
+int WaitChild(pid_t pid, const std::vector<std::string>& arg_vector, std::string* error_msg) {
+ return WaitChild(pid, arg_vector, /*no_wait=*/false, error_msg);
+}
- // Start subprocess.
- pid_t pid = ExecWithoutWait(arg_vector);
- if (pid == -1) {
- *error_msg = StringPrintf("Failed to execv(%s) because fork failed: %s",
- ToCommandLine(arg_vector).c_str(), strerror(errno));
- return -1;
+// A fallback implementation of `WaitChildWithTimeout` that creates a thread to wait instead of
+// relying on `pidfd_open`.
+int WaitChildWithTimeoutFallback(pid_t pid,
+ const std::vector<std::string>& arg_vector,
+ int timeout_ms,
+ bool* timed_out,
+ std::string* error_msg) {
+ bool child_exited = false;
+ std::condition_variable cv;
+ std::mutex m;
+
+ std::thread wait_thread([&]() {
+ std::unique_lock<std::mutex> lock(m);
+ if (!cv.wait_for(lock, std::chrono::milliseconds(timeout_ms), [&] { return child_exited; })) {
+ *timed_out = true;
+ *error_msg =
+ StringPrintf("Child process %d timed out after %dms. Killing it", pid, timeout_ms);
+ kill(pid, SIGKILL);
+ }
+ });
+
+ // Leave the child in a waitable state just in case `wait_thread` sends a `SIGKILL` after the
+ // child exits.
+ std::string ignored_error_msg;
+ WaitChild(pid, arg_vector, /*no_wait=*/true, &ignored_error_msg);
+
+ {
+ std::unique_lock<std::mutex> lock(m);
+ child_exited = true;
}
+ cv.notify_all();
+ wait_thread.join();
- // Add SIGCHLD to the signal set.
- sigset_t child_mask, original_mask;
- sigemptyset(&child_mask);
- sigaddset(&child_mask, SIGCHLD);
- if (sigprocmask(SIG_BLOCK, &child_mask, &original_mask) == -1) {
- *error_msg = StringPrintf("Failed to set sigprocmask(): %s", strerror(errno));
+ if (*timed_out) {
+ WaitChild(pid, arg_vector, &ignored_error_msg);
return -1;
}
+ return WaitChild(pid, arg_vector, error_msg);
+}
- // Wait for a SIGCHLD notification.
- errno = 0;
- timespec ts = {timeout_secs, 0};
- int wait_result = TEMP_FAILURE_RETRY(sigtimedwait(&child_mask, nullptr, &ts));
- int wait_errno = errno;
-
- // Restore the original signal set.
- if (sigprocmask(SIG_SETMASK, &original_mask, nullptr) == -1) {
- *error_msg = StringPrintf("Fail to restore sigprocmask(): %s", strerror(errno));
- if (wait_result == 0) {
- return -1;
- }
+int WaitChildWithTimeout(pid_t pid,
+ unique_fd pidfd,
+ const std::vector<std::string>& arg_vector,
+ int timeout_ms,
+ bool* timed_out,
+ std::string* error_msg) {
+ auto cleanup = android::base::make_scope_guard([&]() {
+ kill(pid, SIGKILL);
+ std::string ignored_error_msg;
+ WaitChild(pid, arg_vector, &ignored_error_msg);
+ });
+
+ struct pollfd pfd;
+ pfd.fd = pidfd.get();
+ pfd.events = POLLIN;
+ int poll_ret = TEMP_FAILURE_RETRY(poll(&pfd, /*nfds=*/1, timeout_ms));
+
+ pidfd.reset();
+
+ if (poll_ret < 0) {
+ *error_msg = StringPrintf("poll failed for pid %d: %s", pid, strerror(errno));
+ return -1;
+ }
+ if (poll_ret == 0) {
+ *timed_out = true;
+ *error_msg = StringPrintf("Child process %d timed out after %dms. Killing it", pid, timeout_ms);
+ return -1;
}
- // Having restored the signal set, see if we need to terminate the subprocess.
- if (wait_result == -1) {
- if (wait_errno == EAGAIN) {
- *error_msg = "Timed out.";
- *timed_out = true;
- } else {
- *error_msg = StringPrintf("Failed to sigtimedwait(): %s", strerror(errno));
- }
- if (kill(pid, SIGKILL) != 0) {
- PLOG(ERROR) << "Failed to kill() subprocess: ";
- }
+ cleanup.Disable();
+ return WaitChild(pid, arg_vector, error_msg);
+}
+
+} // namespace
+
+int ExecUtils::ExecAndReturnCode(const std::vector<std::string>& arg_vector,
+ std::string* error_msg) const {
+ bool ignored_timed_out;
+ return ExecAndReturnCode(arg_vector, /*timeout_sec=*/-1, &ignored_timed_out, error_msg);
+}
+
+int ExecUtils::ExecAndReturnCode(const std::vector<std::string>& arg_vector,
+ int timeout_sec,
+ bool* timed_out,
+ std::string* error_msg) const {
+ *timed_out = false;
+
+ if (timeout_sec > INT_MAX / 1000) {
+ *error_msg = "Timeout too large";
+ return -1;
}
- // Wait for subprocess to finish.
- int status = -1;
- pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
- if (got_pid != pid) {
- *error_msg = StringPrintf("Failed after fork for execv(%s) because waitpid failed: "
- "wanted %d, got %d: %s",
- ToCommandLine(arg_vector).c_str(), pid, got_pid, strerror(errno));
+ // Start subprocess.
+ pid_t pid = ExecWithoutWait(arg_vector, error_msg);
+ if (pid == -1) {
return -1;
}
- if (WIFEXITED(status)) {
- return WEXITSTATUS(status);
+
+ // Wait for subprocess to finish.
+ if (timeout_sec >= 0) {
+ unique_fd pidfd = PidfdOpen(pid);
+ if (pidfd.get() >= 0) {
+ return WaitChildWithTimeout(
+ pid, std::move(pidfd), arg_vector, timeout_sec * 1000, timed_out, error_msg);
+ } else {
+ LOG(DEBUG) << StringPrintf(
+ "pidfd_open failed for pid %d: %s, falling back", pid, strerror(errno));
+ return WaitChildWithTimeoutFallback(
+ pid, arg_vector, timeout_sec * 1000, timed_out, error_msg);
+ }
+ } else {
+ return WaitChild(pid, arg_vector, error_msg);
}
- return -1;
}
-
-bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
+bool ExecUtils::Exec(const std::vector<std::string>& arg_vector, std::string* error_msg) const {
int status = ExecAndReturnCode(arg_vector, error_msg);
- if (status != 0) {
- *error_msg = StringPrintf("Failed execv(%s) because non-0 exit status",
- ToCommandLine(arg_vector).c_str());
+ if (status < 0) {
+ // Internal error. The error message is already set.
+ return false;
+ }
+ if (status > 0) {
+ *error_msg =
+ StringPrintf("Failed to execute (%s) because the child process returns non-zero exit code",
+ ToCommandLine(arg_vector).c_str());
return false;
}
return true;
}
+unique_fd ExecUtils::PidfdOpen(pid_t pid) const {
+#ifdef __BIONIC__
+ return unique_fd(pidfd_open(pid, /*flags=*/0));
+#else
+ // There is no glibc wrapper for pidfd_open.
+#ifndef SYS_pidfd_open
+ constexpr int SYS_pidfd_open = 434;
+#endif
+ return unique_fd(syscall(SYS_pidfd_open, pid, /*flags=*/0));
+#endif
+}
+
} // namespace art
diff --git a/runtime/exec_utils.h b/runtime/exec_utils.h
index 7ce0a9c20a..79a12d770a 100644
--- a/runtime/exec_utils.h
+++ b/runtime/exec_utils.h
@@ -22,45 +22,46 @@
#include <string>
#include <vector>
+#include "android-base/unique_fd.h"
+
namespace art {
// Wrapper on fork/execv to run a command in a subprocess.
// These spawn child processes using the environment as it was set when the single instance
// of the runtime (Runtime::Current()) was started. If no instance of the runtime was started, it
// will use the current environment settings.
-
-bool Exec(std::vector<std::string>& arg_vector, /*out*/ std::string* error_msg);
-int ExecAndReturnCode(std::vector<std::string>& arg_vector, /*out*/ std::string* error_msg);
-
-// Execute the command specified in `argv_vector` in a subprocess with a timeout.
-// Returns the process exit code on success, -1 otherwise.
-int ExecAndReturnCode(std::vector<std::string>& arg_vector,
- time_t timeout_secs,
- /*out*/ bool* timed_out,
- /*out*/ std::string* error_msg);
-
-// A wrapper class to make the functions above mockable.
class ExecUtils {
public:
virtual ~ExecUtils() = default;
- virtual bool Exec(std::vector<std::string>& arg_vector, /*out*/ std::string* error_msg) const {
- return art::Exec(arg_vector, error_msg);
- }
+ virtual bool Exec(const std::vector<std::string>& arg_vector,
+ /*out*/ std::string* error_msg) const;
- virtual int ExecAndReturnCode(std::vector<std::string>& arg_vector,
- /*out*/ std::string* error_msg) const {
- return art::ExecAndReturnCode(arg_vector, error_msg);
- }
+ virtual int ExecAndReturnCode(const std::vector<std::string>& arg_vector,
+ /*out*/ std::string* error_msg) const;
- virtual int ExecAndReturnCode(std::vector<std::string>& arg_vector,
- time_t timeout_secs,
+ // Executes the command specified in `arg_vector` in a subprocess with a timeout.
+ // If `timeout_sec` is negative, blocks until the subprocess exits.
+ // Returns the process exit code on success, -1 otherwise.
+ // Sets `timed_out` to true if the process times out, or false otherwise.
+ virtual int ExecAndReturnCode(const std::vector<std::string>& arg_vector,
+ int timeout_sec,
/*out*/ bool* timed_out,
- /*out*/ std::string* error_msg) const {
- return art::ExecAndReturnCode(arg_vector, timeout_secs, timed_out, error_msg);
- }
+ /*out*/ std::string* error_msg) const;
+
+ protected:
+ virtual android::base::unique_fd PidfdOpen(pid_t pid) const;
};
+inline bool Exec(const std::vector<std::string>& arg_vector, /*out*/ std::string* error_msg) {
+ return ExecUtils().Exec(arg_vector, error_msg);
+}
+
+inline int ExecAndReturnCode(const std::vector<std::string>& arg_vector,
+ /*out*/ std::string* error_msg) {
+ return ExecUtils().ExecAndReturnCode(arg_vector, error_msg);
+}
+
} // namespace art
#endif // ART_RUNTIME_EXEC_UTILS_H_
diff --git a/runtime/exec_utils_test.cc b/runtime/exec_utils_test.cc
index dc789aa292..c10b84e223 100644
--- a/runtime/exec_utils_test.cc
+++ b/runtime/exec_utils_test.cc
@@ -16,68 +16,114 @@
#include "exec_utils.h"
+#include <sys/utsname.h>
+
+#include <cstring>
+#include <filesystem>
+#include <memory>
+#include <tuple>
+
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "common_runtime_test.h"
+#include "gtest/gtest.h"
namespace art {
std::string PrettyArguments(const char* signature);
std::string PrettyReturnType(const char* signature);
-class ExecUtilsTest : public CommonRuntimeTest {};
-
-TEST_F(ExecUtilsTest, ExecSuccess) {
- std::vector<std::string> command;
+std::string GetBin(const std::string& name) {
if (kIsTargetBuild) {
std::string android_root(GetAndroidRoot());
- command.push_back(android_root + "/bin/id");
+ return android_root + "/bin/" + name;
+ } else if (std::filesystem::exists("/usr/bin/" + name)) {
+ return "/usr/bin/" + name;
} else {
- command.push_back("/usr/bin/id");
+ return "/bin/" + name;
+ }
+}
+
+std::tuple<int, int> GetKernelVersion() {
+ std::tuple<int, int> version;
+ utsname uts;
+ CHECK_EQ(uname(&uts), 0);
+ CHECK_EQ(sscanf(uts.release, "%d.%d", &std::get<0>(version), &std::get<1>(version)), 2);
+ return version;
+}
+
+class AlwaysFallbackExecUtils : public ExecUtils {
+ protected:
+ android::base::unique_fd PidfdOpen(pid_t) const override { return android::base::unique_fd(-1); }
+};
+
+class NeverFallbackExecUtils : public ExecUtils {
+ protected:
+ android::base::unique_fd PidfdOpen(pid_t pid) const override {
+ android::base::unique_fd pidfd = ExecUtils::PidfdOpen(pid);
+ CHECK_GE(pidfd.get(), 0) << strerror(errno);
+ return pidfd;
}
+};
+
+class ExecUtilsTest : public CommonRuntimeTest, public testing::WithParamInterface<bool> {
+ protected:
+ void SetUp() override {
+ CommonRuntimeTest::SetUp();
+ bool always_fallback = GetParam();
+ if (always_fallback) {
+ exec_utils_ = std::make_unique<AlwaysFallbackExecUtils>();
+ } else {
+ if (GetKernelVersion() >= std::make_tuple(5, 4)) {
+ exec_utils_ = std::make_unique<NeverFallbackExecUtils>();
+ } else {
+ GTEST_SKIP() << "Kernel version older than 5.4";
+ }
+ }
+ }
+
+ std::unique_ptr<ExecUtils> exec_utils_;
+};
+
+TEST_P(ExecUtilsTest, ExecSuccess) {
+ std::vector<std::string> command;
+ command.push_back(GetBin("id"));
std::string error_msg;
// Historical note: Running on Valgrind failed due to some memory
// that leaks in thread alternate signal stacks.
- EXPECT_TRUE(Exec(command, &error_msg));
+ EXPECT_TRUE(exec_utils_->Exec(command, &error_msg));
EXPECT_EQ(0U, error_msg.size()) << error_msg;
}
-TEST_F(ExecUtilsTest, ExecError) {
- // This will lead to error messages in the log.
- ScopedLogSeverity sls(LogSeverity::FATAL);
-
+TEST_P(ExecUtilsTest, ExecError) {
std::vector<std::string> command;
command.push_back("bogus");
std::string error_msg;
// Historical note: Running on Valgrind failed due to some memory
// that leaks in thread alternate signal stacks.
- EXPECT_FALSE(Exec(command, &error_msg));
+ EXPECT_FALSE(exec_utils_->Exec(command, &error_msg));
EXPECT_FALSE(error_msg.empty());
}
-TEST_F(ExecUtilsTest, EnvSnapshotAdditionsAreNotVisible) {
+TEST_P(ExecUtilsTest, EnvSnapshotAdditionsAreNotVisible) {
static constexpr const char* kModifiedVariable = "EXEC_SHOULD_NOT_EXPORT_THIS";
static constexpr int kOverwrite = 1;
// Set an variable in the current environment.
EXPECT_EQ(setenv(kModifiedVariable, "NEVER", kOverwrite), 0);
// Test that it is not exported.
std::vector<std::string> command;
- if (kIsTargetBuild) {
- std::string android_root(GetAndroidRoot());
- command.push_back(android_root + "/bin/printenv");
- } else {
- command.push_back("/usr/bin/printenv");
- }
+ command.push_back(GetBin("printenv"));
command.push_back(kModifiedVariable);
std::string error_msg;
// Historical note: Running on Valgrind failed due to some memory
// that leaks in thread alternate signal stacks.
- EXPECT_FALSE(Exec(command, &error_msg));
+ EXPECT_FALSE(exec_utils_->Exec(command, &error_msg));
EXPECT_NE(0U, error_msg.size()) << error_msg;
}
-TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
+TEST_P(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
static constexpr const char* kDeletedVariable = "PATH";
static constexpr int kOverwrite = 1;
// Save the variable's value.
@@ -87,17 +133,12 @@ TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
EXPECT_EQ(unsetenv(kDeletedVariable), 0);
// Test that it is not exported.
std::vector<std::string> command;
- if (kIsTargetBuild) {
- std::string android_root(GetAndroidRoot());
- command.push_back(android_root + "/bin/printenv");
- } else {
- command.push_back("/usr/bin/printenv");
- }
+ command.push_back(GetBin("printenv"));
command.push_back(kDeletedVariable);
std::string error_msg;
// Historical note: Running on Valgrind failed due to some memory
// that leaks in thread alternate signal stacks.
- EXPECT_TRUE(Exec(command, &error_msg));
+ EXPECT_TRUE(exec_utils_->Exec(command, &error_msg));
EXPECT_EQ(0U, error_msg.size()) << error_msg;
// Restore the variable's value.
EXPECT_EQ(setenv(kDeletedVariable, save_value, kOverwrite), 0);
@@ -105,33 +146,32 @@ TEST_F(ExecUtilsTest, EnvSnapshotDeletionsAreNotVisible) {
static std::vector<std::string> SleepCommand(int sleep_seconds) {
std::vector<std::string> command;
- if (kIsTargetBuild) {
- command.push_back(GetAndroidRoot() + "/bin/sleep");
- } else {
- command.push_back("/bin/sleep");
- }
+ command.push_back(GetBin("sleep"));
command.push_back(android::base::StringPrintf("%d", sleep_seconds));
return command;
}
-TEST_F(ExecUtilsTest, ExecTimeout) {
+TEST_P(ExecUtilsTest, ExecTimeout) {
static constexpr int kSleepSeconds = 5;
static constexpr int kWaitSeconds = 1;
std::vector<std::string> command = SleepCommand(kSleepSeconds);
std::string error_msg;
bool timed_out;
- ASSERT_EQ(ExecAndReturnCode(command, kWaitSeconds, &timed_out, &error_msg), -1);
- EXPECT_TRUE(timed_out);
+ ASSERT_EQ(exec_utils_->ExecAndReturnCode(command, kWaitSeconds, &timed_out, &error_msg), -1);
+ EXPECT_TRUE(timed_out) << error_msg;
}
-TEST_F(ExecUtilsTest, ExecNoTimeout) {
+TEST_P(ExecUtilsTest, ExecNoTimeout) {
static constexpr int kSleepSeconds = 1;
static constexpr int kWaitSeconds = 5;
std::vector<std::string> command = SleepCommand(kSleepSeconds);
std::string error_msg;
bool timed_out;
- ASSERT_EQ(ExecAndReturnCode(command, kWaitSeconds, &timed_out, &error_msg), 0);
+ ASSERT_EQ(exec_utils_->ExecAndReturnCode(command, kWaitSeconds, &timed_out, &error_msg), 0)
+ << error_msg;
EXPECT_FALSE(timed_out);
}
+INSTANTIATE_TEST_SUITE_P(AlwaysOrNeverFallback, ExecUtilsTest, testing::Values(true, false));
+
} // namespace art
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 5e6bd88d73..a90a31963b 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -130,6 +130,35 @@ class AtomicStack {
}
}
+ // Bump the back index by the given number of slots. Returns false if this
+ // operation will overflow the stack. New elements should be written
+ // to [*start_address, *end_address).
+ bool BumpBack(size_t num_slots,
+ StackReference<T>** start_address,
+ StackReference<T>** end_address)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kIsDebugBuild) {
+ debug_is_sorted_ = false;
+ }
+ const int32_t index = back_index_.load(std::memory_order_relaxed);
+ const int32_t new_index = index + num_slots;
+ if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
+ // Stack overflow.
+ return false;
+ }
+ back_index_.store(new_index, std::memory_order_relaxed);
+ *start_address = begin_ + index;
+ *end_address = begin_ + new_index;
+ if (kIsDebugBuild) {
+ // Check the memory is zero.
+ for (int32_t i = index; i < new_index; i++) {
+ DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
+ << "i=" << i << " index=" << index << " new_index=" << new_index;
+ }
+ }
+ return true;
+ }
+
void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
@@ -144,8 +173,16 @@ class AtomicStack {
DCHECK_GT(back_index_.load(std::memory_order_relaxed),
front_index_.load(std::memory_order_relaxed));
// Decrement the back index non atomically.
- back_index_.store(back_index_.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed);
- return begin_[back_index_.load(std::memory_order_relaxed)].AsMirrorPtr();
+ const int32_t index = back_index_.load(std::memory_order_relaxed) - 1;
+ back_index_.store(index, std::memory_order_relaxed);
+ T* ret = begin_[index].AsMirrorPtr();
+ // In debug builds we expect the stack elements to be null, which may not
+ // always be the case if the stack is being reused without resetting it
+ // in-between.
+ if (kIsDebugBuild) {
+ begin_[index].Clear();
+ }
+ return ret;
}
// Take an item from the front of the stack.
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index 37646b3728..bd10958496 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -21,6 +21,7 @@
#include "base/bit_utils.h"
#include "base/mem_map.h"
#include "card_table.h"
+#include "gc/collector/mark_compact.h"
#include "jit/jit_memory_region.h"
namespace art {
@@ -98,6 +99,7 @@ MemoryRangeBitmap<kAlignment>* MemoryRangeBitmap<kAlignment>::CreateFromMemMap(
template class MemoryRangeBitmap<CardTable::kCardSize>;
template class MemoryRangeBitmap<jit::kJitCodeAccountingBytes>;
+template class MemoryRangeBitmap<collector::MarkCompact::kAlignment>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 68f2d049d0..b050442a3e 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -81,7 +81,7 @@ class Bitmap {
void CopyFrom(Bitmap* source_bitmap);
// Starting address of our internal storage.
- uintptr_t* Begin() {
+ uintptr_t* Begin() const {
return bitmap_begin_;
}
@@ -98,7 +98,7 @@ class Bitmap {
std::string Dump() const;
protected:
- static constexpr size_t kBitsPerBitmapWord = sizeof(uintptr_t) * kBitsPerByte;
+ static constexpr size_t kBitsPerBitmapWord = kBitsPerIntPtrT;
Bitmap(MemMap&& mem_map, size_t bitmap_size);
~Bitmap();
@@ -109,7 +109,9 @@ class Bitmap {
template<bool kSetBit>
ALWAYS_INLINE bool ModifyBit(uintptr_t bit_index);
- // Backing storage for bitmap.
+ // Backing storage for bitmap. This is interpreted as an array of
+ // kBitsPerBitmapWord-sized integers, with bits assigned in each word little
+ // endian first.
MemMap mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
@@ -122,7 +124,7 @@ class Bitmap {
DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap);
};
-// One bit per kAlignment in range (start, end]
+// One bit per kAlignment in range [start, end)
template<size_t kAlignment>
class MemoryRangeBitmap : public Bitmap {
public:
@@ -138,7 +140,7 @@ class MemoryRangeBitmap : public Bitmap {
// End of the memory range that the bitmap covers.
ALWAYS_INLINE uintptr_t CoverEnd() const {
- return cover_end_;
+ return cover_begin_ + kAlignment * BitmapSize();
}
// Return the address associated with a bit index.
@@ -148,41 +150,53 @@ class MemoryRangeBitmap : public Bitmap {
return addr;
}
+ ALWAYS_INLINE uintptr_t BitIndexFromAddrUnchecked(uintptr_t addr) const {
+ return (addr - CoverBegin()) / kAlignment;
+ }
+
// Return the bit index associated with an address .
ALWAYS_INLINE uintptr_t BitIndexFromAddr(uintptr_t addr) const {
- DCHECK(HasAddress(addr)) << CoverBegin() << " <= " << addr << " < " << CoverEnd();
- return (addr - CoverBegin()) / kAlignment;
+ uintptr_t result = BitIndexFromAddrUnchecked(addr);
+ DCHECK(result < BitmapSize()) << CoverBegin() << " <= " << addr << " < " << CoverEnd();
+ return result;
}
ALWAYS_INLINE bool HasAddress(const uintptr_t addr) const {
- return cover_begin_ <= addr && addr < cover_end_;
+ // Don't use BitIndexFromAddr() here as the addr passed to this function
+ // could be outside the range. If addr < cover_begin_, then the result
+ // underflows to some very large value past the end of the bitmap.
+ // Therefore, all operations are unsigned here.
+ bool ret = (addr - CoverBegin()) / kAlignment < BitmapSize();
+ if (ret) {
+ DCHECK(CoverBegin() <= addr && addr < CoverEnd())
+ << CoverBegin() << " <= " << addr << " < " << CoverEnd();
+ }
+ return ret;
}
ALWAYS_INLINE bool Set(uintptr_t addr) {
return SetBit(BitIndexFromAddr(addr));
}
- ALWAYS_INLINE bool Clear(size_t addr) {
+ ALWAYS_INLINE bool Clear(uintptr_t addr) {
return ClearBit(BitIndexFromAddr(addr));
}
- ALWAYS_INLINE bool Test(size_t addr) const {
+ ALWAYS_INLINE bool Test(uintptr_t addr) const {
return TestBit(BitIndexFromAddr(addr));
}
// Returns true if the object was previously set.
- ALWAYS_INLINE bool AtomicTestAndSet(size_t addr) {
+ ALWAYS_INLINE bool AtomicTestAndSet(uintptr_t addr) {
return AtomicTestAndSetBit(BitIndexFromAddr(addr));
}
private:
MemoryRangeBitmap(MemMap&& mem_map, uintptr_t begin, size_t num_bits)
: Bitmap(std::move(mem_map), num_bits),
- cover_begin_(begin),
- cover_end_(begin + kAlignment * num_bits) {}
+ cover_begin_(begin) {}
uintptr_t const cover_begin_;
- uintptr_t const cover_end_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryRangeBitmap);
};
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index b4026fc3f3..4a84799431 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -388,6 +388,11 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
void ModUnionTableReferenceCache::VisitObjects(ObjectCallback callback, void* arg) {
CardTable* const card_table = heap_->GetCardTable();
ContinuousSpaceBitmap* live_bitmap = space_->GetLiveBitmap();
+ // Use an unordered_set for constant time search of card in the second loop.
+ // We don't want to change cleared_cards_ to unordered so that traversals are
+ // sequential in address order.
+ // TODO: Optimize this.
+ std::unordered_set<const uint8_t*> card_lookup_map;
for (uint8_t* card : cleared_cards_) {
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
@@ -396,10 +401,13 @@ void ModUnionTableReferenceCache::VisitObjects(ObjectCallback callback, void* ar
[callback, arg](mirror::Object* obj) {
callback(obj, arg);
});
+ card_lookup_map.insert(card);
}
- // This may visit the same card twice, TODO avoid this.
for (const auto& pair : references_) {
const uint8_t* card = pair.first;
+ if (card_lookup_map.find(card) != card_lookup_map.end()) {
+ continue;
+ }
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
live_bitmap->VisitMarkedRange(start,
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index d460e00075..e7825e6953 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -64,7 +64,44 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const {
}
template<size_t kAlignment>
-template<typename Visitor>
+inline mirror::Object* SpaceBitmap<kAlignment>::FindPrecedingObject(uintptr_t visit_begin,
+ uintptr_t visit_end) const {
+ // Covers [visit_end, visit_begin].
+ visit_end = std::max(heap_begin_, visit_end);
+ DCHECK_LE(visit_end, visit_begin);
+ DCHECK_LT(visit_begin, HeapLimit());
+
+ const uintptr_t offset_start = visit_begin - heap_begin_;
+ const uintptr_t offset_end = visit_end - heap_begin_;
+ uintptr_t index_start = OffsetToIndex(offset_start);
+ const uintptr_t index_end = OffsetToIndex(offset_end);
+
+ // Start with the right edge
+ uintptr_t word = bitmap_begin_[index_start].load(std::memory_order_relaxed);
+ // visit_begin could be the first word of the object we are looking for.
+ const uintptr_t right_edge_mask = OffsetToMask(offset_start);
+ word &= right_edge_mask | (right_edge_mask - 1);
+ while (index_start > index_end) {
+ if (word != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_start) + heap_begin_;
+ size_t pos_leading_set_bit = kBitsPerIntPtrT - CLZ(word) - 1;
+ return reinterpret_cast<mirror::Object*>(ptr_base + pos_leading_set_bit * kAlignment);
+ }
+ word = bitmap_begin_[--index_start].load(std::memory_order_relaxed);
+ }
+
+ word &= ~(OffsetToMask(offset_end) - 1);
+ if (word != 0) {
+ const uintptr_t ptr_base = IndexToOffset(index_end) + heap_begin_;
+ size_t pos_leading_set_bit = kBitsPerIntPtrT - CLZ(word) - 1;
+ return reinterpret_cast<mirror::Object*>(ptr_base + pos_leading_set_bit * kAlignment);
+ } else {
+ return nullptr;
+ }
+}
+
+template<size_t kAlignment>
+template<bool kVisitOnce, typename Visitor>
inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
uintptr_t visit_end,
Visitor&& visitor) const {
@@ -114,6 +151,9 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
const size_t shift = CTZ(left_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
+ if (kVisitOnce) {
+ return;
+ }
left_edge ^= (static_cast<uintptr_t>(1)) << shift;
} while (left_edge != 0);
}
@@ -128,6 +168,9 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
const size_t shift = CTZ(w);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
+ if (kVisitOnce) {
+ return;
+ }
w ^= (static_cast<uintptr_t>(1)) << shift;
} while (w != 0);
}
@@ -155,6 +198,9 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
const size_t shift = CTZ(right_edge);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
visitor(obj);
+ if (kVisitOnce) {
+ return;
+ }
right_edge ^= (static_cast<uintptr_t>(1)) << shift;
} while (right_edge != 0);
}
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 3c5688d5bd..a0458d2ae1 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -16,6 +16,9 @@
#include "space_bitmap-inl.h"
+#include <iomanip>
+#include <sstream>
+
#include "android-base/stringprintf.h"
#include "art_field-inl.h"
@@ -113,6 +116,37 @@ std::string SpaceBitmap<kAlignment>::Dump() const {
reinterpret_cast<void*>(HeapLimit()));
}
+template <size_t kAlignment>
+std::string SpaceBitmap<kAlignment>::DumpMemAround(mirror::Object* obj) const {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
+ DCHECK_GE(addr, heap_begin_);
+ DCHECK(HasAddress(obj)) << obj;
+ const uintptr_t offset = addr - heap_begin_;
+ const size_t index = OffsetToIndex(offset);
+ const uintptr_t mask = OffsetToMask(offset);
+ size_t num_entries = bitmap_size_ / sizeof(uintptr_t);
+ DCHECK_LT(index, num_entries) << " bitmap_size_ = " << bitmap_size_;
+ Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
+ uintptr_t prev = 0;
+ uintptr_t next = 0;
+ if (index > 0) {
+ prev = (atomic_entry - 1)->load(std::memory_order_relaxed);
+ }
+ uintptr_t curr = atomic_entry->load(std::memory_order_relaxed);
+ if (index < num_entries - 1) {
+ next = (atomic_entry + 1)->load(std::memory_order_relaxed);
+ }
+ std::ostringstream oss;
+ oss << " offset: " << offset
+ << " index: " << index
+ << " mask: " << std::hex << std::setfill('0') << std::setw(16) << mask
+ << " words {" << std::hex << std::setfill('0') << std::setw(16) << prev
+ << ", " << std::hex << std::setfill('0') << std::setw(16) << curr
+ << ", " << std::hex <<std::setfill('0') << std::setw(16) << next
+ << "}";
+ return oss.str();
+}
+
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::Clear() {
if (bitmap_begin_ != nullptr) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 0d8ffa0d67..c87b31e962 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -40,8 +40,8 @@ namespace accounting {
template<size_t kAlignment>
class SpaceBitmap {
public:
- typedef void ScanCallback(mirror::Object* obj, void* finger, void* arg);
- typedef void SweepCallback(size_t ptr_count, mirror::Object** ptrs, void* arg);
+ using ScanCallback = void(mirror::Object* obj, void* finger, void* arg);
+ using SweepCallback = void(size_t ptr_count, mirror::Object** ptrs, void* arg);
// Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
// heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
@@ -131,10 +131,15 @@ class SpaceBitmap {
}
}
- // Visit the live objects in the range [visit_begin, visit_end).
+ // Find first object while scanning bitmap backwards from visit_begin -> visit_end.
+ // Covers [visit_end, visit_begin] range.
+ mirror::Object* FindPrecedingObject(uintptr_t visit_begin, uintptr_t visit_end = 0) const;
+
+ // Visit the live objects in the range [visit_begin, visit_end). If kVisitOnce
+ // is true, then only the first live object will be visited.
// TODO: Use lock annotations when clang is fixed.
// REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- template <typename Visitor>
+ template <bool kVisitOnce = false, typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, Visitor&& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
@@ -202,6 +207,9 @@ class SpaceBitmap {
std::string Dump() const;
+ // Dump three bitmap words around obj.
+ std::string DumpMemAround(mirror::Object* obj) const;
+
// Helper function for computing bitmap size based on a 64 bit capacity.
static size_t ComputeBitmapSize(uint64_t capacity);
static size_t ComputeHeapSize(uint64_t bitmap_bytes);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 7bcf375b16..9586e9d70a 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -59,6 +59,13 @@ AllocRecordObjectMap::~AllocRecordObjectMap() {
}
void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ // When we are compacting in userfaultfd GC, the class GC-roots are already
+ // updated in SweepAllocationRecords()->SweepClassObject().
+ if (heap->CurrentCollectorType() == gc::CollectorType::kCollectorTypeCMC
+ && heap->MarkCompactCollector()->IsCompacting(Thread::Current())) {
+ return;
+ }
CHECK_LE(recent_record_max_, alloc_record_max_);
BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
size_t count = recent_record_max_;
@@ -92,7 +99,10 @@ static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visito
mirror::Object* new_object = visitor->IsMarked(old_object);
DCHECK(new_object != nullptr);
if (UNLIKELY(old_object != new_object)) {
- klass = GcRoot<mirror::Class>(new_object->AsClass());
+ // We can't use AsClass() as it uses IsClass in a DCHECK, which expects
+ // the class' contents to be there. This is not the case in userfaultfd
+ // GC.
+ klass = GcRoot<mirror::Class>(ObjPtr<mirror::Class>::DownCast(new_object));
}
}
}
@@ -131,13 +141,13 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
}
void AllocRecordObjectMap::AllowNewAllocationRecords() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
allow_new_record_ = true;
new_record_condition_.Broadcast(Thread::Current());
}
void AllocRecordObjectMap::DisallowNewAllocationRecords() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
allow_new_record_ = false;
}
@@ -230,8 +240,8 @@ void AllocRecordObjectMap::RecordAllocation(Thread* self,
// Since nobody seemed to really notice or care it might not be worth the trouble.
// Wait for GC's sweeping to complete and allow new records.
- while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/art-dlmalloc.cc
index 79d4fbfb5a..de0c85a407 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/art-dlmalloc.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "dlmalloc.h"
+#include "art-dlmalloc.h"
#include <android-base/logging.h>
@@ -39,8 +39,8 @@ static void art_heap_usage_error(const char* function, void* p);
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#pragma GCC diagnostic ignored "-Wexpansion-to-defined"
-#include "../../../external/dlmalloc/malloc.c"
-// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
+#include "dlmalloc.c" // NOLINT
+// Note: dlmalloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
// of libbase, so undefine it now.
#undef DEBUG
#pragma GCC diagnostic pop
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/art-dlmalloc.h
index b12691ad0e..296de72c70 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/art-dlmalloc.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
-#define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
+#ifndef ART_RUNTIME_GC_ALLOCATOR_ART_DLMALLOC_H_
+#define ART_RUNTIME_GC_ALLOCATOR_ART_DLMALLOC_H_
#include <cstdint>
@@ -33,7 +33,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
-#include "../../external/dlmalloc/malloc.h"
+#include "dlmalloc.h"
#pragma GCC diagnostic pop
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
@@ -58,4 +58,4 @@ void* ArtDlMallocMoreCore(void* mspace, intptr_t increment);
} // namespace gc
} // namespace art
-#endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_
+#endif // ART_RUNTIME_GC_ALLOCATOR_ART_DLMALLOC_H_
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 0de62fef47..f3c61e3ef4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -164,6 +164,9 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
gc_tracing_throughput_hist_ = metrics->YoungGcTracingThroughput();
gc_throughput_avg_ = metrics->YoungGcThroughputAvg();
gc_tracing_throughput_avg_ = metrics->YoungGcTracingThroughputAvg();
+ gc_scanned_bytes_ = metrics->YoungGcScannedBytes();
+ gc_freed_bytes_ = metrics->YoungGcFreedBytes();
+ gc_duration_ = metrics->YoungGcDuration();
} else {
gc_time_histogram_ = metrics->FullGcCollectionTime();
metrics_gc_count_ = metrics->FullGcCount();
@@ -171,6 +174,9 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
gc_tracing_throughput_hist_ = metrics->FullGcTracingThroughput();
gc_throughput_avg_ = metrics->FullGcThroughputAvg();
gc_tracing_throughput_avg_ = metrics->FullGcTracingThroughputAvg();
+ gc_scanned_bytes_ = metrics->FullGcScannedBytes();
+ gc_freed_bytes_ = metrics->FullGcFreedBytes();
+ gc_duration_ = metrics->FullGcDuration();
}
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 80b39824ec..4efe48c318 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -76,6 +76,9 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
gc_tracing_throughput_hist_(nullptr),
gc_throughput_avg_(nullptr),
gc_tracing_throughput_avg_(nullptr),
+ gc_scanned_bytes_(nullptr),
+ gc_freed_bytes_(nullptr),
+ gc_duration_(nullptr),
cumulative_timings_(name),
pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true),
is_transaction_active_(false),
@@ -189,15 +192,18 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
RegisterPause(duration_ns);
}
total_time_ns_ += duration_ns;
- uint64_t total_pause_time = 0;
+ uint64_t total_pause_time_ns = 0;
for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
MutexLock mu(self, pause_histogram_lock_);
pause_histogram_.AdjustAndAddValue(pause_time);
- total_pause_time += pause_time;
+ total_pause_time_ns += pause_time;
}
metrics::ArtMetrics* metrics = runtime->GetMetrics();
// Report STW pause time in microseconds.
- metrics->WorldStopTimeDuringGCAvg()->Add(total_pause_time / 1'000);
+ const uint64_t total_pause_time_us = total_pause_time_ns / 1'000;
+ metrics->WorldStopTimeDuringGCAvg()->Add(total_pause_time_us);
+ metrics->GcWorldStopTime()->Add(total_pause_time_us);
+ metrics->GcWorldStopCount()->AddOne();
// Report total collection time of all GCs put together.
metrics->TotalGcCollectionTime()->Add(NsToMs(duration_ns));
if (are_metrics_initialized_) {
@@ -216,6 +222,10 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
throughput = current_iteration->GetEstimatedThroughput() / MB;
gc_throughput_histogram_->Add(throughput);
gc_throughput_avg_->Add(throughput);
+
+ gc_scanned_bytes_->Add(current_iteration->GetScannedBytes());
+ gc_freed_bytes_->Add(current_iteration->GetFreedBytes());
+ gc_duration_->Add(NsToMs(current_iteration->GetDurationNs()));
}
is_transaction_active_ = false;
}
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index d439914621..d11aea36c9 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -166,6 +166,9 @@ class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public Mark
metrics::MetricsBase<int64_t>* gc_tracing_throughput_hist_;
metrics::MetricsBase<uint64_t>* gc_throughput_avg_;
metrics::MetricsBase<uint64_t>* gc_tracing_throughput_avg_;
+ metrics::MetricsBase<uint64_t>* gc_scanned_bytes_;
+ metrics::MetricsBase<uint64_t>* gc_freed_bytes_;
+ metrics::MetricsBase<uint64_t>* gc_duration_;
uint64_t total_thread_cpu_time_ns_;
uint64_t total_time_ns_;
uint64_t total_freed_objects_;
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index a0ea60d4c5..3d27a93c4b 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -46,7 +46,7 @@ class FakeImageSpace : public space::ImageSpace {
MemMap&& oat_map)
: ImageSpace("FakeImageSpace",
/*image_location=*/"",
- /*profile_file=*/{},
+ /*profile_files=*/{},
std::move(map),
std::move(live_bitmap),
map.End()),
diff --git a/runtime/gc/collector/mark_compact-inl.h b/runtime/gc/collector/mark_compact-inl.h
new file mode 100644
index 0000000000..3db51bf732
--- /dev/null
+++ b/runtime/gc/collector/mark_compact-inl.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
+#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
+
+#include "mark_compact.h"
+
+#include "mirror/object-inl.h"
+
+namespace art {
+namespace gc {
+namespace collector {
+
+template <size_t kAlignment>
+inline uintptr_t MarkCompact::LiveWordsBitmap<kAlignment>::SetLiveWords(uintptr_t begin,
+ size_t size) {
+ const uintptr_t begin_bit_idx = MemRangeBitmap::BitIndexFromAddr(begin);
+ DCHECK(!Bitmap::TestBit(begin_bit_idx));
+ const uintptr_t end_bit_idx = MemRangeBitmap::BitIndexFromAddr(begin + size);
+ uintptr_t* bm_address = Bitmap::Begin() + Bitmap::BitIndexToWordIndex(begin_bit_idx);
+ uintptr_t* const end_bm_address = Bitmap::Begin() + Bitmap::BitIndexToWordIndex(end_bit_idx);
+ uintptr_t mask = Bitmap::BitIndexToMask(begin_bit_idx);
+ // Bits that needs to be set in the first word, if it's not also the last word
+ mask = ~(mask - 1);
+ // loop over all the words, except the last one.
+ // TODO: optimize by using memset. Sometimes this function may get called with
+ // large ranges.
+ for (; bm_address < end_bm_address; bm_address++) {
+ *bm_address |= mask;
+ // This needs to be set only once as we are setting all bits in the
+ // subsequent iterations. Hopefully, the compiler will optimize it.
+ mask = ~0;
+ }
+ // Take care of the last word. If we had only one word, then mask != ~0.
+ const uintptr_t end_mask = Bitmap::BitIndexToMask(end_bit_idx);
+ *bm_address |= mask & (end_mask - 1);
+ return begin_bit_idx;
+}
+
+template <size_t kAlignment> template <typename Visitor>
+inline void MarkCompact::LiveWordsBitmap<kAlignment>::VisitLiveStrides(uintptr_t begin_bit_idx,
+ uint8_t* end,
+ const size_t bytes,
+ Visitor&& visitor) const {
+ DCHECK(IsAligned<kAlignment>(end));
+ // We have to use the unchecked version of BitIndexFromAddr() as 'end' could
+ // be outside the range. Do explicit check here.
+ DCHECK_LE(reinterpret_cast<uintptr_t>(end), MemRangeBitmap::CoverEnd());
+ const uintptr_t end_bit_idx =
+ MemRangeBitmap::BitIndexFromAddrUnchecked(reinterpret_cast<uintptr_t>(end));
+ DCHECK_LT(begin_bit_idx, end_bit_idx);
+ uintptr_t begin_word_idx = Bitmap::BitIndexToWordIndex(begin_bit_idx);
+ const uintptr_t end_word_idx = Bitmap::BitIndexToWordIndex(end_bit_idx);
+ DCHECK(Bitmap::TestBit(begin_bit_idx));
+ size_t stride_size = 0;
+ size_t idx_in_word = 0;
+ size_t num_heap_words = bytes / kAlignment;
+ uintptr_t live_stride_start_idx;
+ uintptr_t word = Bitmap::Begin()[begin_word_idx];
+
+ // Setup the first word.
+ word &= ~(Bitmap::BitIndexToMask(begin_bit_idx) - 1);
+ begin_bit_idx = RoundDown(begin_bit_idx, Bitmap::kBitsPerBitmapWord);
+
+ do {
+ if (UNLIKELY(begin_word_idx == end_word_idx)) {
+ word &= Bitmap::BitIndexToMask(end_bit_idx) - 1;
+ }
+ if (~word == 0) {
+ // All bits in the word are marked.
+ if (stride_size == 0) {
+ live_stride_start_idx = begin_bit_idx;
+ }
+ stride_size += Bitmap::kBitsPerBitmapWord;
+ if (num_heap_words <= stride_size) {
+ break;
+ }
+ } else {
+ while (word != 0) {
+ // discard 0s
+ size_t shift = CTZ(word);
+ idx_in_word += shift;
+ word >>= shift;
+ if (stride_size > 0) {
+ if (shift > 0) {
+ if (num_heap_words <= stride_size) {
+ break;
+ }
+ visitor(live_stride_start_idx, stride_size, /*is_last*/ false);
+ num_heap_words -= stride_size;
+ live_stride_start_idx = begin_bit_idx + idx_in_word;
+ stride_size = 0;
+ }
+ } else {
+ live_stride_start_idx = begin_bit_idx + idx_in_word;
+ }
+ // consume 1s
+ shift = CTZ(~word);
+ DCHECK_NE(shift, 0u);
+ word >>= shift;
+ idx_in_word += shift;
+ stride_size += shift;
+ }
+ // If the whole word == 0 or the higher bits are 0s, then we exit out of
+ // the above loop without completely consuming the word, so call visitor,
+ // if needed.
+ if (idx_in_word < Bitmap::kBitsPerBitmapWord && stride_size > 0) {
+ if (num_heap_words <= stride_size) {
+ break;
+ }
+ visitor(live_stride_start_idx, stride_size, /*is_last*/ false);
+ num_heap_words -= stride_size;
+ stride_size = 0;
+ }
+ idx_in_word = 0;
+ }
+ begin_bit_idx += Bitmap::kBitsPerBitmapWord;
+ begin_word_idx++;
+ if (UNLIKELY(begin_word_idx > end_word_idx)) {
+ num_heap_words = std::min(stride_size, num_heap_words);
+ break;
+ }
+ word = Bitmap::Begin()[begin_word_idx];
+ } while (true);
+
+ if (stride_size > 0) {
+ visitor(live_stride_start_idx, num_heap_words, /*is_last*/ true);
+ }
+}
+
+template <size_t kAlignment>
+inline
+uint32_t MarkCompact::LiveWordsBitmap<kAlignment>::FindNthLiveWordOffset(size_t chunk_idx,
+ uint32_t n) const {
+ DCHECK_LT(n, kBitsPerVectorWord);
+ const size_t index = chunk_idx * kBitmapWordsPerVectorWord;
+ for (uint32_t i = 0; i < kBitmapWordsPerVectorWord; i++) {
+ uintptr_t word = Bitmap::Begin()[index + i];
+ if (~word == 0) {
+ if (n < Bitmap::kBitsPerBitmapWord) {
+ return i * Bitmap::kBitsPerBitmapWord + n;
+ }
+ n -= Bitmap::kBitsPerBitmapWord;
+ } else {
+ uint32_t j = 0;
+ while (word != 0) {
+ // count contiguous 0s
+ uint32_t shift = CTZ(word);
+ word >>= shift;
+ j += shift;
+ // count contiguous 1s
+ shift = CTZ(~word);
+ DCHECK_NE(shift, 0u);
+ if (shift > n) {
+ return i * Bitmap::kBitsPerBitmapWord + j + n;
+ }
+ n -= shift;
+ word >>= shift;
+ j += shift;
+ }
+ }
+ }
+ UNREACHABLE();
+}
+
+inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
+ mirror::Object* old_ref = obj->GetFieldObject<
+ mirror::Object, kVerifyNone, kWithoutReadBarrier, /*kIsVolatile*/false>(offset);
+ if (kIsDebugBuild) {
+ if (live_words_bitmap_->HasAddress(old_ref)
+ && reinterpret_cast<uint8_t*>(old_ref) < black_allocations_begin_
+ && !moving_space_bitmap_->Test(old_ref)) {
+ mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
+ std::ostringstream oss;
+ heap_->DumpSpaces(oss);
+ MemMap::DumpMaps(oss, /* terse= */ true);
+ LOG(FATAL) << "Not marked in the bitmap ref=" << old_ref
+ << " from_ref=" << from_ref
+ << " offset=" << offset
+ << " obj=" << obj
+ << " obj-validity=" << IsValidObject(obj)
+ << " from-space=" << static_cast<void*>(from_space_begin_)
+ << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
+ << " from_ref "
+ << heap_->GetVerification()->DumpRAMAroundAddress(
+ reinterpret_cast<uintptr_t>(from_ref), 128)
+ << " obj "
+ << heap_->GetVerification()->DumpRAMAroundAddress(
+ reinterpret_cast<uintptr_t>(obj), 128)
+ << " old_ref " << heap_->GetVerification()->DumpRAMAroundAddress(
+ reinterpret_cast<uintptr_t>(old_ref), 128)
+ << " maps\n" << oss.str();
+ }
+ }
+ mirror::Object* new_ref = PostCompactAddress(old_ref);
+ if (new_ref != old_ref) {
+ obj->SetFieldObjectWithoutWriteBarrier<
+ /*kTransactionActive*/false, /*kCheckTransaction*/false, kVerifyNone, /*kIsVolatile*/false>(
+ offset,
+ new_ref);
+ }
+}
+
+inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
+ mirror::Object* old_ref,
+ const RootInfo& info) {
+ void* stack_end = stack_end_;
+ void* stack_addr = stack_addr_;
+ if (!live_words_bitmap_->HasAddress(old_ref)) {
+ return false;
+ }
+ if (UNLIKELY(stack_end == nullptr)) {
+ pthread_attr_t attr;
+ size_t stack_size;
+ pthread_getattr_np(pthread_self(), &attr);
+ pthread_attr_getstack(&attr, &stack_addr, &stack_size);
+ pthread_attr_destroy(&attr);
+ stack_end = reinterpret_cast<char*>(stack_addr) + stack_size;
+ }
+ if (root < stack_addr || root > stack_end) {
+ auto ret = updated_roots_.insert(root);
+ DCHECK(ret.second) << "root=" << root
+ << " old_ref=" << old_ref
+ << " stack_addr=" << stack_addr
+ << " stack_end=" << stack_end;
+ }
+ DCHECK(reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_
+ || live_words_bitmap_->Test(old_ref))
+ << "ref=" << old_ref
+ << " <" << mirror::Object::PrettyTypeOf(old_ref)
+ << "> RootInfo [" << info << "]";
+ return true;
+}
+
+inline void MarkCompact::UpdateRoot(mirror::CompressedReference<mirror::Object>* root,
+ const RootInfo& info) {
+ DCHECK(!root->IsNull());
+ mirror::Object* old_ref = root->AsMirrorPtr();
+ if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
+ mirror::Object* new_ref = PostCompactAddress(old_ref);
+ if (old_ref != new_ref) {
+ root->Assign(new_ref);
+ }
+ }
+}
+
+inline void MarkCompact::UpdateRoot(mirror::Object** root, const RootInfo& info) {
+ mirror::Object* old_ref = *root;
+ if (!kIsDebugBuild || VerifyRootSingleUpdate(root, old_ref, info)) {
+ mirror::Object* new_ref = PostCompactAddress(old_ref);
+ if (old_ref != new_ref) {
+ *root = new_ref;
+ }
+ }
+}
+
+template <size_t kAlignment>
+inline size_t MarkCompact::LiveWordsBitmap<kAlignment>::CountLiveWordsUpto(size_t bit_idx) const {
+ const size_t word_offset = Bitmap::BitIndexToWordIndex(bit_idx);
+ uintptr_t word;
+ size_t ret = 0;
+ // This is needed only if we decide to make chunks 128-bit but still
+ // choose to use 64-bit word for bitmap. Ideally we should use 128-bit
+ // SIMD instructions to compute popcount.
+ if (kBitmapWordsPerVectorWord > 1) {
+ for (size_t i = RoundDown(word_offset, kBitmapWordsPerVectorWord); i < word_offset; i++) {
+ word = Bitmap::Begin()[i];
+ ret += POPCOUNT(word);
+ }
+ }
+ word = Bitmap::Begin()[word_offset];
+ const uintptr_t mask = Bitmap::BitIndexToMask(bit_idx);
+ DCHECK_NE(word & mask, 0u)
+ << " word_offset:" << word_offset
+ << " bit_idx:" << bit_idx
+ << " bit_idx_in_word:" << (bit_idx % Bitmap::kBitsPerBitmapWord)
+ << std::hex << " word: 0x" << word
+ << " mask: 0x" << mask << std::dec;
+ ret += POPCOUNT(word & (mask - 1));
+ return ret;
+}
+
+inline mirror::Object* MarkCompact::PostCompactBlackObjAddr(mirror::Object* old_ref) const {
+ return reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(old_ref)
+ - black_objs_slide_diff_);
+}
+
+inline mirror::Object* MarkCompact::PostCompactOldObjAddr(mirror::Object* old_ref) const {
+ const uintptr_t begin = live_words_bitmap_->Begin();
+ const uintptr_t addr_offset = reinterpret_cast<uintptr_t>(old_ref) - begin;
+ const size_t vec_idx = addr_offset / kOffsetChunkSize;
+ const size_t live_bytes_in_bitmap_word =
+ live_words_bitmap_->CountLiveWordsUpto(addr_offset / kAlignment) * kAlignment;
+ return reinterpret_cast<mirror::Object*>(begin
+ + chunk_info_vec_[vec_idx]
+ + live_bytes_in_bitmap_word);
+}
+
+inline mirror::Object* MarkCompact::PostCompactAddressUnchecked(mirror::Object* old_ref) const {
+ if (reinterpret_cast<uint8_t*>(old_ref) >= black_allocations_begin_) {
+ return PostCompactBlackObjAddr(old_ref);
+ }
+ if (kIsDebugBuild) {
+ mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
+ DCHECK(live_words_bitmap_->Test(old_ref))
+ << "ref=" << old_ref;
+ if (!moving_space_bitmap_->Test(old_ref)) {
+ std::ostringstream oss;
+ Runtime::Current()->GetHeap()->DumpSpaces(oss);
+ MemMap::DumpMaps(oss, /* terse= */ true);
+ LOG(FATAL) << "ref=" << old_ref
+ << " from_ref=" << from_ref
+ << " from-space=" << static_cast<void*>(from_space_begin_)
+ << " bitmap= " << moving_space_bitmap_->DumpMemAround(old_ref)
+ << heap_->GetVerification()->DumpRAMAroundAddress(
+ reinterpret_cast<uintptr_t>(from_ref), 128)
+ << " maps\n" << oss.str();
+ }
+ }
+ return PostCompactOldObjAddr(old_ref);
+}
+
+inline mirror::Object* MarkCompact::PostCompactAddress(mirror::Object* old_ref) const {
+ // TODO: To further speedup the check, maybe we should consider caching heap
+ // start/end in this object.
+ if (LIKELY(live_words_bitmap_->HasAddress(old_ref))) {
+ return PostCompactAddressUnchecked(old_ref);
+ }
+ return old_ref;
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_INL_H_
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
new file mode 100644
index 0000000000..0b8d9016ab
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.cc
@@ -0,0 +1,2588 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mark_compact-inl.h"
+
+#include "base/quasi_atomic.h"
+#include "base/systrace.h"
+#include "base/utils.h"
+#include "gc/accounting/mod_union_table-inl.h"
+#include "gc/reference_processor.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/task_processor.h"
+#include "gc/verification-inl.h"
+#include "jit/jit_code_cache.h"
+#include "mirror/object-refvisitor-inl.h"
+#include "read_barrier_config.h"
+#include "scoped_thread_state_change-inl.h"
+#include "sigchain.h"
+#include "thread_list.h"
+
+#include <linux/userfaultfd.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fstream>
+#include <numeric>
+
+#ifndef __BIONIC__
+#ifndef MREMAP_DONTUNMAP
+#define MREMAP_DONTUNMAP 4
+#endif
+#ifndef __NR_userfaultfd
+#if defined(__x86_64__)
+#define __NR_userfaultfd 323
+#elif defined(__i386__)
+#define __NR_userfaultfd 374
+#elif defined(__aarch64__)
+#define __NR_userfaultfd 282
+#elif defined(__arm__)
+#define __NR_userfaultfd 388
+#else
+#error "__NR_userfaultfd undefined"
+#endif
+#endif // __NR_userfaultfd
+#endif // __BIONIC__
+
+namespace art {
+
+// We require MREMAP_DONTUNMAP functionality of the mremap syscall, which was
+// introduced in 5.13 kernel version. Check for that on host. Checking
+// on target is not required as MREMAP_DONTUNMAP and userfaultfd were enabled
+// together.
+// Concurrent compaction termination logic depends on the kernel having
+// the fault-retry feature (allowing repeated faults on the same page), which was
+// introduced in 5.7. On target this feature is backported on all the kernels where
+// userfaultfd is enabled.
+#ifdef ART_TARGET
+static constexpr bool gHaveMremapDontunmap = true;
+static constexpr bool gKernelHasFaultRetry = true;
+#else
+static const bool gHaveMremapDontunmap = IsKernelVersionAtLeast(5, 13);
+static const bool gKernelHasFaultRetry = IsKernelVersionAtLeast(5, 7);
+#endif
+
+#ifndef ART_FORCE_USE_READ_BARRIER
+static bool ShouldUseUserfaultfd() {
+#if !defined(__linux__)
+ return false;
+#endif
+ int fd = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY);
+#ifndef ART_TARGET
+ // On host we may not have the kernel patches that restrict userfaultfd to
+ // user mode. But that is not a security concern as we are on host.
+ // Therefore, attempt one more time without UFFD_USER_MODE_ONLY.
+ if (fd == -1 && errno == EINVAL) {
+ fd = syscall(__NR_userfaultfd, O_CLOEXEC);
+ }
+#endif
+ if (fd >= 0) {
+ close(fd);
+ return true;
+ } else {
+ return false;
+ }
+}
+#endif
+
+#ifdef ART_FORCE_USE_READ_BARRIER
+const bool gUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier;
+#else
+const bool gUseReadBarrier = (kUseBakerReadBarrier || kUseTableLookupReadBarrier)
+ && !ShouldUseUserfaultfd();
+#endif
+const bool gUseUserfaultfd = !gUseReadBarrier;
+
+namespace gc {
+namespace collector {
+
+// Turn off kCheckLocks when profiling the GC as it slows down the GC
+// significantly.
+static constexpr bool kCheckLocks = kDebugLocking;
+static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
+
+bool MarkCompact::CreateUserfaultfd(bool post_fork) {
+ if (post_fork || uffd_ == -1) {
+ // Don't use O_NONBLOCK as we rely on read waiting on uffd_ if there isn't
+ // any read event available. We don't use poll.
+ if (gKernelHasFaultRetry) {
+ uffd_ = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY);
+#ifndef ART_TARGET
+ // On host we may not have the kernel patches that restrict userfaultfd to
+ // user mode. But that is not a security concern as we are on host.
+ // Therefore, attempt one more time without UFFD_USER_MODE_ONLY.
+ if (UNLIKELY(uffd_ == -1 && errno == EINVAL)) {
+ uffd_ = syscall(__NR_userfaultfd, O_CLOEXEC);
+ }
+#endif
+ if (UNLIKELY(uffd_ == -1)) {
+ uffd_ = kFallbackMode;
+ LOG(WARNING) << "Userfaultfd isn't supported (reason: " << strerror(errno)
+ << ") and therefore falling back to stop-the-world compaction.";
+ } else {
+ DCHECK_GE(uffd_, 0);
+ // Get/update the features that we want in userfaultfd
+ struct uffdio_api api = {.api = UFFD_API, .features = 0};
+ CHECK_EQ(ioctl(uffd_, UFFDIO_API, &api), 0)
+ << "ioctl_userfaultfd: API: " << strerror(errno);
+ }
+ } else {
+ // Without fault-retry feature in the kernel we can't terminate concurrent
+ // compaction. So fallback to stop-the-world compaction.
+ uffd_ = kFallbackMode;
+ }
+ }
+ uffd_initialized_ = !post_fork || uffd_ == kFallbackMode;
+ return uffd_ >= 0;
+}
+
+template <size_t kAlignment>
+MarkCompact::LiveWordsBitmap<kAlignment>* MarkCompact::LiveWordsBitmap<kAlignment>::Create(
+ uintptr_t begin, uintptr_t end) {
+ return static_cast<LiveWordsBitmap<kAlignment>*>(
+ MemRangeBitmap::Create("Concurrent Mark Compact live words bitmap", begin, end));
+}
+
+MarkCompact::MarkCompact(Heap* heap)
+ : GarbageCollector(heap, "concurrent mark compact"),
+ gc_barrier_(0),
+ mark_stack_lock_("mark compact mark stack lock", kMarkSweepMarkStackLock),
+ bump_pointer_space_(heap->GetBumpPointerSpace()),
+ uffd_(-1),
+ thread_pool_counter_(0),
+ compacting_(false),
+ uffd_initialized_(false) {
+ // TODO: Depending on how the bump-pointer space move is implemented. If we
+ // switch between two virtual memories each time, then we will have to
+ // initialize live_words_bitmap_ accordingly.
+ live_words_bitmap_.reset(LiveWordsBitmap<kAlignment>::Create(
+ reinterpret_cast<uintptr_t>(bump_pointer_space_->Begin()),
+ reinterpret_cast<uintptr_t>(bump_pointer_space_->Limit())));
+
+ // Create one MemMap for all the data structures
+ size_t chunk_info_vec_size = bump_pointer_space_->Capacity() / kOffsetChunkSize;
+ size_t nr_moving_pages = bump_pointer_space_->Capacity() / kPageSize;
+ size_t nr_non_moving_pages = heap->GetNonMovingSpace()->Capacity() / kPageSize;
+
+ std::string err_msg;
+ info_map_ = MemMap::MapAnonymous("Concurrent mark-compact chunk-info vector",
+ chunk_info_vec_size * sizeof(uint32_t)
+ + nr_non_moving_pages * sizeof(ObjReference)
+ + nr_moving_pages * sizeof(ObjReference)
+ + nr_moving_pages * sizeof(uint32_t),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ false,
+ &err_msg);
+ if (UNLIKELY(!info_map_.IsValid())) {
+ LOG(ERROR) << "Failed to allocate concurrent mark-compact chunk-info vector: " << err_msg;
+ } else {
+ uint8_t* p = info_map_.Begin();
+ chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
+ vector_length_ = chunk_info_vec_size;
+
+ p += chunk_info_vec_size * sizeof(uint32_t);
+ first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p);
+
+ p += nr_non_moving_pages * sizeof(ObjReference);
+ first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p);
+
+ p += nr_moving_pages * sizeof(ObjReference);
+ pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p);
+ }
+
+ from_space_map_ = MemMap::MapAnonymous("Concurrent mark-compact from-space",
+ bump_pointer_space_->Capacity(),
+ PROT_NONE,
+ /*low_4gb=*/ kObjPtrPoisoning,
+ &err_msg);
+ if (UNLIKELY(!from_space_map_.IsValid())) {
+ LOG(ERROR) << "Failed to allocate concurrent mark-compact from-space" << err_msg;
+ } else {
+ from_space_begin_ = from_space_map_.Begin();
+ }
+
+ // poisoning requires 32-bit pointers and therefore compaction buffers on
+ // the stack can't be used. We also use the first page-sized buffer for the
+ // purpose of terminating concurrent compaction.
+ const size_t num_pages = 1 + std::max(heap_->GetParallelGCThreadCount(),
+ heap_->GetConcGCThreadCount());
+ compaction_buffers_map_ = MemMap::MapAnonymous("Concurrent mark-compact compaction buffers",
+ kPageSize * (kObjPtrPoisoning ? num_pages : 1),
+ PROT_READ | PROT_WRITE,
+ /*low_4gb=*/ kObjPtrPoisoning,
+ &err_msg);
+ if (UNLIKELY(!compaction_buffers_map_.IsValid())) {
+ LOG(ERROR) << "Failed to allocate concurrent mark-compact compaction buffers" << err_msg;
+ }
+ conc_compaction_termination_page_ = compaction_buffers_map_.Begin();
+ if (kObjPtrPoisoning) {
+ // Touch the page deliberately to avoid userfaults on it. We madvise it in
+ // CompactionPhase() before using it to terminate concurrent compaction.
+ CHECK_EQ(*conc_compaction_termination_page_, 0);
+ }
+}
+
+void MarkCompact::BindAndResetBitmaps() {
+ // TODO: We need to hold heap_bitmap_lock_ only for populating immune_spaces.
+ // The card-table and mod-union-table processing can be done without it. So
+ // change the logic below. Note that the bitmap clearing would require the
+ // lock.
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ // Mark all of the spaces we never collect as immune.
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
+ space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+ CHECK(space->IsZygoteSpace() || space->IsImageSpace());
+ immune_spaces_.AddSpace(space);
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ table->ProcessCards();
+ } else {
+ // Keep cards aged if we don't have a mod-union table since we may need
+ // to scan them in future GCs. This case is for app images.
+ // TODO: We could probably scan the objects right here to avoid doing
+ // another scan through the card-table.
+ card_table->ModifyCardsAtomic(
+ space->Begin(),
+ space->End(),
+ [](uint8_t card) {
+ return (card == gc::accounting::CardTable::kCardClean)
+ ? card
+ : gc::accounting::CardTable::kCardAged;
+ },
+ /* card modified visitor */ VoidFunctor());
+ }
+ } else {
+ CHECK(!space->IsZygoteSpace());
+ CHECK(!space->IsImageSpace());
+ // The card-table corresponding to bump-pointer and non-moving space can
+ // be cleared, because we are going to traverse all the reachable objects
+ // in these spaces. This card-table will eventually be used to track
+ // mutations while concurrent marking is going on.
+ card_table->ClearCardRange(space->Begin(), space->Limit());
+ if (space == bump_pointer_space_) {
+ // It is OK to clear the bitmap with mutators running since the only
+ // place it is read is VisitObjects which has exclusion with this GC.
+ moving_space_bitmap_ = bump_pointer_space_->GetMarkBitmap();
+ moving_space_bitmap_->Clear();
+ } else {
+ CHECK(space == heap_->GetNonMovingSpace());
+ non_moving_space_ = space;
+ non_moving_space_bitmap_ = space->GetMarkBitmap();
+ }
+ }
+ }
+}
+
+void MarkCompact::InitializePhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ mark_stack_ = heap_->GetMarkStack();
+ CHECK(mark_stack_->IsEmpty());
+ immune_spaces_.Reset();
+ moving_first_objs_count_ = 0;
+ non_moving_first_objs_count_ = 0;
+ black_page_count_ = 0;
+ freed_objects_ = 0;
+ from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin();
+ black_allocations_begin_ = bump_pointer_space_->Limit();
+ compacting_ = false;
+}
+
+void MarkCompact::RunPhases() {
+ Thread* self = Thread::Current();
+ thread_running_gc_ = self;
+ InitializePhase();
+ GetHeap()->PreGcVerification(this);
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ MarkingPhase();
+ }
+ {
+ ScopedPause pause(this);
+ MarkingPause();
+ if (kIsDebugBuild) {
+ bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
+ }
+ }
+ // To increase likelihood of black allocations. For testing purposes only.
+ if (kIsDebugBuild && heap_->GetTaskProcessor()->GetRunningThread() == thread_running_gc_) {
+ sleep(3);
+ }
+ {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ ReclaimPhase();
+ PrepareForCompaction();
+ }
+ if (uffd_ != kFallbackMode) {
+ heap_->GetThreadPool()->WaitForWorkersToBeCreated();
+ }
+ {
+ heap_->ThreadFlipBegin(self);
+ {
+ ScopedPause pause(this);
+ PreCompactionPhase();
+ }
+ heap_->ThreadFlipEnd(self);
+ }
+
+ if (uffd_ >= 0) {
+ ReaderMutexLock mu(self, *Locks::mutator_lock_);
+ CompactionPhase();
+ }
+
+ FinishPhase();
+ thread_running_gc_ = nullptr;
+ GetHeap()->PostGcVerification(this);
+}
+
+void MarkCompact::InitMovingSpaceFirstObjects(const size_t vec_len) {
+ // Find the first live word first.
+ size_t to_space_page_idx = 0;
+ uint32_t offset_in_chunk_word;
+ uint32_t offset;
+ mirror::Object* obj;
+ const uintptr_t heap_begin = moving_space_bitmap_->HeapBegin();
+
+ size_t chunk_idx;
+ // Find the first live word in the space
+ for (chunk_idx = 0; chunk_info_vec_[chunk_idx] == 0; chunk_idx++) {
+ if (chunk_idx > vec_len) {
+ // We don't have any live data on the moving-space.
+ return;
+ }
+ }
+ // Use live-words bitmap to find the first word
+ offset_in_chunk_word = live_words_bitmap_->FindNthLiveWordOffset(chunk_idx, /*n*/ 0);
+ offset = chunk_idx * kBitsPerVectorWord + offset_in_chunk_word;
+ DCHECK(live_words_bitmap_->Test(offset)) << "offset=" << offset
+ << " chunk_idx=" << chunk_idx
+ << " N=0"
+ << " offset_in_word=" << offset_in_chunk_word
+ << " word=" << std::hex
+ << live_words_bitmap_->GetWord(chunk_idx);
+ // The first object doesn't require using FindPrecedingObject().
+ obj = reinterpret_cast<mirror::Object*>(heap_begin + offset * kAlignment);
+ // TODO: add a check to validate the object.
+
+ pre_compact_offset_moving_space_[to_space_page_idx] = offset;
+ first_objs_moving_space_[to_space_page_idx].Assign(obj);
+ to_space_page_idx++;
+
+ uint32_t page_live_bytes = 0;
+ while (true) {
+ for (; page_live_bytes <= kPageSize; chunk_idx++) {
+ if (chunk_idx > vec_len) {
+ moving_first_objs_count_ = to_space_page_idx;
+ return;
+ }
+ page_live_bytes += chunk_info_vec_[chunk_idx];
+ }
+ chunk_idx--;
+ page_live_bytes -= kPageSize;
+ DCHECK_LE(page_live_bytes, kOffsetChunkSize);
+ DCHECK_LE(page_live_bytes, chunk_info_vec_[chunk_idx])
+ << " chunk_idx=" << chunk_idx
+ << " to_space_page_idx=" << to_space_page_idx
+ << " vec_len=" << vec_len;
+ DCHECK(IsAligned<kAlignment>(chunk_info_vec_[chunk_idx] - page_live_bytes));
+ offset_in_chunk_word =
+ live_words_bitmap_->FindNthLiveWordOffset(
+ chunk_idx, (chunk_info_vec_[chunk_idx] - page_live_bytes) / kAlignment);
+ offset = chunk_idx * kBitsPerVectorWord + offset_in_chunk_word;
+ DCHECK(live_words_bitmap_->Test(offset))
+ << "offset=" << offset
+ << " chunk_idx=" << chunk_idx
+ << " N=" << ((chunk_info_vec_[chunk_idx] - page_live_bytes) / kAlignment)
+ << " offset_in_word=" << offset_in_chunk_word
+ << " word=" << std::hex << live_words_bitmap_->GetWord(chunk_idx);
+ // TODO: Can we optimize this for large objects? If we are continuing a
+ // large object that spans multiple pages, then we may be able to do without
+ // calling FindPrecedingObject().
+ //
+ // Find the object which encapsulates offset in it, which could be
+ // starting at offset itself.
+ obj = moving_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
+ // TODO: add a check to validate the object.
+ pre_compact_offset_moving_space_[to_space_page_idx] = offset;
+ first_objs_moving_space_[to_space_page_idx].Assign(obj);
+ to_space_page_idx++;
+ chunk_idx++;
+ }
+}
+
+void MarkCompact::InitNonMovingSpaceFirstObjects() {
+ accounting::ContinuousSpaceBitmap* bitmap = non_moving_space_->GetLiveBitmap();
+ uintptr_t begin = reinterpret_cast<uintptr_t>(non_moving_space_->Begin());
+ const uintptr_t end = reinterpret_cast<uintptr_t>(non_moving_space_->End());
+ mirror::Object* prev_obj;
+ size_t page_idx;
+ {
+ // Find first live object
+ mirror::Object* obj = nullptr;
+ bitmap->VisitMarkedRange</*kVisitOnce*/ true>(begin,
+ end,
+ [&obj] (mirror::Object* o) {
+ obj = o;
+ });
+ if (obj == nullptr) {
+ // There are no live objects in the non-moving space
+ return;
+ }
+ page_idx = (reinterpret_cast<uintptr_t>(obj) - begin) / kPageSize;
+ first_objs_non_moving_space_[page_idx++].Assign(obj);
+ prev_obj = obj;
+ }
+ // TODO: check obj is valid
+ uintptr_t prev_obj_end = reinterpret_cast<uintptr_t>(prev_obj)
+ + RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
+ // For every page find the object starting from which we need to call
+ // VisitReferences. It could either be an object that started on some
+ // preceding page, or some object starting within this page.
+ begin = RoundDown(reinterpret_cast<uintptr_t>(prev_obj) + kPageSize, kPageSize);
+ while (begin < end) {
+ // Utilize, if any, large object that started in some preceding page, but
+ // overlaps with this page as well.
+ if (prev_obj != nullptr && prev_obj_end > begin) {
+ DCHECK_LT(prev_obj, reinterpret_cast<mirror::Object*>(begin));
+ first_objs_non_moving_space_[page_idx].Assign(prev_obj);
+ mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (bump_pointer_space_->HasAddress(klass)) {
+ LOG(WARNING) << "found inter-page object " << prev_obj
+ << " in non-moving space with klass " << klass
+ << " in moving space";
+ }
+ } else {
+ prev_obj_end = 0;
+ // It's sufficient to only search for previous object in the preceding page.
+ // If no live object started in that page and some object had started in
+ // the page preceding to that page, which was big enough to overlap with
+ // the current page, then we wouldn't be in the else part.
+ prev_obj = bitmap->FindPrecedingObject(begin, begin - kPageSize);
+ if (prev_obj != nullptr) {
+ prev_obj_end = reinterpret_cast<uintptr_t>(prev_obj)
+ + RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
+ }
+ if (prev_obj_end > begin) {
+ mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (bump_pointer_space_->HasAddress(klass)) {
+ LOG(WARNING) << "found inter-page object " << prev_obj
+ << " in non-moving space with klass " << klass
+ << " in moving space";
+ }
+ first_objs_non_moving_space_[page_idx].Assign(prev_obj);
+ } else {
+ // Find the first live object in this page
+ bitmap->VisitMarkedRange</*kVisitOnce*/ true>(
+ begin,
+ begin + kPageSize,
+ [this, page_idx] (mirror::Object* obj) {
+ first_objs_non_moving_space_[page_idx].Assign(obj);
+ });
+ }
+ // An empty entry indicates that the page has no live objects and hence
+ // can be skipped.
+ }
+ begin += kPageSize;
+ page_idx++;
+ }
+ non_moving_first_objs_count_ = page_idx;
+}
+
+class MarkCompact::ConcurrentCompactionGcTask : public SelfDeletingTask {
+ public:
+ explicit ConcurrentCompactionGcTask(MarkCompact* collector, size_t idx)
+ : collector_(collector), index_(idx) {}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+ void Run(Thread* self ATTRIBUTE_UNUSED) override REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The passed page/buf to ConcurrentCompaction is used by the thread as a
+ // kPageSize buffer for compacting and updating objects into and then
+ // passing the buf to uffd ioctls.
+ if (kObjPtrPoisoning) {
+ uint8_t* page = collector_->compaction_buffers_map_.Begin() + index_ * kPageSize;
+ collector_->ConcurrentCompaction(page);
+ } else {
+ uint8_t buf[kPageSize];
+ collector_->ConcurrentCompaction(buf);
+ }
+ }
+#pragma clang diagnostic pop
+
+ private:
+ MarkCompact* const collector_;
+ size_t index_;
+};
+
+void MarkCompact::PrepareForCompaction() {
+ uint8_t* space_begin = bump_pointer_space_->Begin();
+ size_t vector_len = (black_allocations_begin_ - space_begin) / kOffsetChunkSize;
+ DCHECK_LE(vector_len, vector_length_);
+ for (size_t i = 0; i < vector_len; i++) {
+ DCHECK_LE(chunk_info_vec_[i], kOffsetChunkSize);
+ DCHECK_EQ(chunk_info_vec_[i], live_words_bitmap_->LiveBytesInBitmapWord(i));
+ }
+ InitMovingSpaceFirstObjects(vector_len);
+ InitNonMovingSpaceFirstObjects();
+
+ // TODO: We can do a lot of neat tricks with this offset vector to tune the
+ // compaction as we wish. Originally, the compaction algorithm slides all
+ // live objects towards the beginning of the heap. This is nice because it
+ // keeps the spatial locality of objects intact.
+ // However, sometimes it's desired to compact objects in certain portions
+ // of the heap. For instance, it is expected that, over time,
+ // objects towards the beginning of the heap are long lived and are always
+ // densely packed. In this case, it makes sense to only update references in
+ // there and not try to compact it.
+ // Furthermore, we might have some large objects and may not want to move such
+ // objects.
+ // We can adjust, without too much effort, the values in the chunk_info_vec_ such
+ // that the objects in the dense beginning area aren't moved. OTOH, large
+ // objects, which could be anywhere in the heap, could also be kept from
+ // moving by using a similar trick. The only issue is that by doing this we will
+ // leave an unused hole in the middle of the heap which can't be used for
+ // allocations until we do a *full* compaction.
+ //
+ // At this point every element in the chunk_info_vec_ contains the live-bytes
+ // of the corresponding chunk. For old-to-new address computation we need
+ // every element to reflect total live-bytes till the corresponding chunk.
+
+ // Live-bytes count is required to compute post_compact_end_ below.
+ uint32_t total;
+ // Update the vector one past the heap usage as it is required for black
+ // allocated objects' post-compact address computation.
+ if (vector_len < vector_length_) {
+ vector_len++;
+ total = 0;
+ } else {
+ // Fetch the value stored in the last element before it gets overwritten by
+ // std::exclusive_scan().
+ total = chunk_info_vec_[vector_len - 1];
+ }
+ std::exclusive_scan(chunk_info_vec_, chunk_info_vec_ + vector_len, chunk_info_vec_, 0);
+ total += chunk_info_vec_[vector_len - 1];
+
+ for (size_t i = vector_len; i < vector_length_; i++) {
+ DCHECK_EQ(chunk_info_vec_[i], 0u);
+ }
+ post_compact_end_ = AlignUp(space_begin + total, kPageSize);
+ CHECK_EQ(post_compact_end_, space_begin + moving_first_objs_count_ * kPageSize);
+ black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
+ // How do we handle compaction of heap portion used for allocations after the
+ // marking-pause?
+ // All allocations after the marking-pause are considered black (reachable)
+ // for this GC cycle. However, they need not be allocated contiguously as
+ // different mutators use TLABs. So we will compact the heap till the point
+ // where allocations took place before the marking-pause. And everything after
+ // that will be slid with TLAB holes, and then TLAB info in TLS will be
+ // appropriately updated in the pre-compaction pause.
+ // The chunk-info vector entries for the post marking-pause allocations will be
+ // also updated in the pre-compaction pause.
+
+ if (!uffd_initialized_ && CreateUserfaultfd(/*post_fork*/false)) {
+ // Register the buffer that we use for terminating concurrent compaction
+ struct uffdio_register uffd_register;
+ uffd_register.range.start = reinterpret_cast<uintptr_t>(conc_compaction_termination_page_);
+ uffd_register.range.len = kPageSize;
+ uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ CHECK_EQ(ioctl(uffd_, UFFDIO_REGISTER, &uffd_register), 0)
+ << "ioctl_userfaultfd: register compaction termination page: " << strerror(errno);
+ }
+ // For zygote we create the thread pool each time before starting compaction,
+ // and get rid of it when finished. This is expected to happen rarely as
+ // zygote spends most of the time in native fork loop.
+ if (uffd_ != kFallbackMode) {
+ ThreadPool* pool = heap_->GetThreadPool();
+ if (UNLIKELY(pool == nullptr)) {
+ heap_->CreateThreadPool();
+ pool = heap_->GetThreadPool();
+ }
+ const size_t num_threads = pool->GetThreadCount();
+ thread_pool_counter_ = num_threads;
+ for (size_t i = 0; i < num_threads; i++) {
+ pool->AddTask(thread_running_gc_, new ConcurrentCompactionGcTask(this, i + 1));
+ }
+ CHECK_EQ(pool->GetTaskCount(thread_running_gc_), num_threads);
+ }
+}
+
+class MarkCompact::VerifyRootMarkedVisitor : public SingleRootVisitor {
+ public:
+ explicit VerifyRootMarkedVisitor(MarkCompact* collector) : collector_(collector) { }
+
+ void VisitRoot(mirror::Object* root, const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+void MarkCompact::ReMarkRoots(Runtime* runtime) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ DCHECK_EQ(thread_running_gc_, Thread::Current());
+ Locks::mutator_lock_->AssertExclusiveHeld(thread_running_gc_);
+ MarkNonThreadRoots(runtime);
+ MarkConcurrentRoots(static_cast<VisitRootFlags>(kVisitRootFlagNewRoots
+ | kVisitRootFlagStopLoggingNewRoots
+ | kVisitRootFlagClearRootLog),
+ runtime);
+
+ if (kVerifyRootsMarked) {
+ TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
+ VerifyRootMarkedVisitor visitor(this);
+ runtime->VisitRoots(&visitor);
+ }
+}
+
+void MarkCompact::MarkingPause() {
+ TimingLogger::ScopedTiming t("(Paused)MarkingPause", GetTimings());
+ Runtime* runtime = Runtime::Current();
+ Locks::mutator_lock_->AssertExclusiveHeld(thread_running_gc_);
+ {
+ // Handle the dirty objects as we are a concurrent GC
+ WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ {
+ MutexLock mu2(thread_running_gc_, *Locks::runtime_shutdown_lock_);
+ MutexLock mu3(thread_running_gc_, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = runtime->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ thread->VisitRoots(this, static_cast<VisitRootFlags>(0));
+ // Need to revoke all the thread-local allocation stacks since we will
+ // swap the allocation stacks (below) and don't want anybody to allocate
+ // into the live stack.
+ thread->RevokeThreadLocalAllocationStack();
+ bump_pointer_space_->RevokeThreadLocalBuffers(thread);
+ }
+ }
+ // Re-mark root set. Doesn't include thread-roots as they are already marked
+ // above.
+ ReMarkRoots(runtime);
+ // Scan dirty objects.
+ RecursiveMarkDirtyObjects(/*paused*/ true, accounting::CardTable::kCardDirty);
+ {
+ TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
+ heap_->SwapStacks();
+ live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
+ }
+ }
+ // Fetch only the accumulated objects-allocated count as it is guaranteed to
+ // be up-to-date after the TLAB revocation above.
+ freed_objects_ += bump_pointer_space_->GetAccumulatedObjectsAllocated();
+ // TODO: For PreSweepingGcVerification(), find correct strategy to visit/walk
+ // objects in bump-pointer space when we have a mark-bitmap to indicate live
+ // objects. At the same time we also need to be able to visit black allocations,
+ // even though they are not marked in the bitmap. Without both of these we fail
+ // pre-sweeping verification. As well as we leave windows open wherein a
+ // VisitObjects/Walk on the space would either miss some objects or visit
+ // unreachable ones. These windows are when we are switching from shared
+ // mutator-lock to exclusive and vice-versa starting from here till compaction pause.
+ // heap_->PreSweepingGcVerification(this);
+
+ // Disallow new system weaks to prevent a race which occurs when someone adds
+ // a new system weak before we sweep them. Since this new system weak may not
+ // be marked, the GC may incorrectly sweep it. This also fixes a race where
+ // interning may attempt to return a strong reference to a string that is
+ // about to be swept.
+ runtime->DisallowNewSystemWeaks();
+ // Enable the reference processing slow path, needs to be done with mutators
+ // paused since there is no lock in the GetReferent fast path.
+ heap_->GetReferenceProcessor()->EnableSlowPath();
+
+ // Capture 'end' of moving-space at this point. Every allocation beyond this
+ // point will be considered as black.
+ // Align-up to page boundary so that black allocations happen from next page
+ // onwards.
+ black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, kPageSize);
+ DCHECK(IsAligned<kAlignment>(black_allocations_begin_));
+ black_allocations_begin_ = AlignUp(black_allocations_begin_, kPageSize);
+}
+
+void MarkCompact::SweepSystemWeaks(Thread* self, Runtime* runtime, const bool paused) {
+ TimingLogger::ScopedTiming t(paused ? "(Paused)SweepSystemWeaks" : "SweepSystemWeaks",
+ GetTimings());
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ runtime->SweepSystemWeaks(this);
+}
+
+void MarkCompact::ProcessReferences(Thread* self) {
+ WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetHeap()->GetReferenceProcessor()->ProcessReferences(self, GetTimings());
+}
+
+void MarkCompact::Sweep(bool swap_bitmaps) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ // Ensure that nobody inserted objects in the live stack after we swapped the
+ // stacks.
+ CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
+ {
+ TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
+ // Mark everything allocated since the last GC as live so that we can sweep
+ // concurrently, knowing that new allocations won't be marked as live.
+ accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+ heap_->MarkAllocStackAsLive(live_stack);
+ live_stack->Reset();
+ DCHECK(mark_stack_->IsEmpty());
+ }
+ for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+ if (space->IsContinuousMemMapAllocSpace() && space != bump_pointer_space_) {
+ space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
+ TimingLogger::ScopedTiming split(
+ alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
+ GetTimings());
+ RecordFree(alloc_space->Sweep(swap_bitmaps));
+ }
+ }
+ SweepLargeObjects(swap_bitmaps);
+}
+
+void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
+ space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
+ if (los != nullptr) {
+ TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
+ RecordFreeLOS(los->Sweep(swap_bitmaps));
+ }
+}
+
+void MarkCompact::ReclaimPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ DCHECK(thread_running_gc_ == Thread::Current());
+ Runtime* const runtime = Runtime::Current();
+ // Process the references concurrently.
+ ProcessReferences(thread_running_gc_);
+ // TODO: Try to merge this system-weak sweeping with the one while updating
+ // references during the compaction pause.
+ SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/ false);
+ runtime->AllowNewSystemWeaks();
+ // Clean up class loaders after system weaks are swept since that is how we know if class
+ // unloading occurred.
+ runtime->GetClassLinker()->CleanupClassLoaders();
+ {
+ WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ // Reclaim unmarked objects.
+ Sweep(false);
+ // Swap the live and mark bitmaps for each space which we modified space. This is an
+ // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+ // bitmaps.
+ SwapBitmaps();
+ // Unbind the live and mark bitmaps.
+ GetHeap()->UnBindBitmaps();
+ }
+}
+
+// We want to avoid checking for every reference if it's within the page or
+// not. This can be done if we know where in the page the holder object lies.
+// If it doesn't overlap either boundaries then we can skip the checks.
+template <bool kCheckBegin, bool kCheckEnd>
+class MarkCompact::RefsUpdateVisitor {
+ public:
+ explicit RefsUpdateVisitor(MarkCompact* collector,
+ mirror::Object* obj,
+ uint8_t* begin,
+ uint8_t* end)
+ : collector_(collector), obj_(obj), begin_(begin), end_(end) {
+ DCHECK(!kCheckBegin || begin != nullptr);
+ DCHECK(!kCheckEnd || end != nullptr);
+ }
+
+ void operator()(mirror::Object* old ATTRIBUTE_UNUSED, MemberOffset offset, bool /* is_static */)
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ bool update = true;
+ if (kCheckBegin || kCheckEnd) {
+ uint8_t* ref = reinterpret_cast<uint8_t*>(obj_) + offset.Int32Value();
+ update = (!kCheckBegin || ref >= begin_) && (!kCheckEnd || ref < end_);
+ }
+ if (update) {
+ collector_->UpdateRef(obj_, offset);
+ }
+ }
+
+ // For object arrays we don't need to check boundaries here as it's done in
+ // VisitReferenes().
+ // TODO: Optimize reference updating using SIMD instructions. Object arrays
+ // are perfect as all references are tightly packed.
+ void operator()(mirror::Object* old ATTRIBUTE_UNUSED,
+ MemberOffset offset,
+ bool /*is_static*/,
+ bool /*is_obj_array*/)
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ collector_->UpdateRef(obj_, offset);
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ collector_->UpdateRoot(root);
+ }
+
+ private:
+ MarkCompact* const collector_;
+ mirror::Object* const obj_;
+ uint8_t* const begin_;
+ uint8_t* const end_;
+};
+
+bool MarkCompact::IsValidObject(mirror::Object* obj) const {
+ mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ if (!heap_->GetVerification()->IsValidHeapObjectAddress(klass)) {
+ return false;
+ }
+ return heap_->GetVerification()->IsValidClassUnchecked<kWithFromSpaceBarrier>(
+ obj->GetClass<kVerifyNone, kWithFromSpaceBarrier>());
+}
+
+template <typename Callback>
+void MarkCompact::VerifyObject(mirror::Object* ref, Callback& callback) const {
+ if (kIsDebugBuild) {
+ mirror::Class* klass = ref->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
+ mirror::Class* pre_compact_klass = ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
+ mirror::Class* klass_klass = klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
+ mirror::Class* klass_klass_klass = klass_klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
+ if (bump_pointer_space_->HasAddress(pre_compact_klass) &&
+ reinterpret_cast<uint8_t*>(pre_compact_klass) < black_allocations_begin_) {
+ CHECK(moving_space_bitmap_->Test(pre_compact_klass))
+ << "ref=" << ref
+ << " post_compact_end=" << static_cast<void*>(post_compact_end_)
+ << " pre_compact_klass=" << pre_compact_klass
+ << " black_allocations_begin=" << static_cast<void*>(black_allocations_begin_);
+ CHECK(live_words_bitmap_->Test(pre_compact_klass));
+ }
+ if (!IsValidObject(ref)) {
+ std::ostringstream oss;
+ oss << "Invalid object: "
+ << "ref=" << ref
+ << " klass=" << klass
+ << " klass_klass=" << klass_klass
+ << " klass_klass_klass=" << klass_klass_klass
+ << " pre_compact_klass=" << pre_compact_klass
+ << " from_space_begin=" << static_cast<void*>(from_space_begin_)
+ << " pre_compact_begin=" << static_cast<void*>(bump_pointer_space_->Begin())
+ << " post_compact_end=" << static_cast<void*>(post_compact_end_)
+ << " black_allocations_begin=" << static_cast<void*>(black_allocations_begin_);
+
+ // Call callback before dumping larger data like RAM and space dumps.
+ callback(oss);
+
+ oss << " \nobject="
+ << heap_->GetVerification()->DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(ref), 128)
+ << " \nklass(from)="
+ << heap_->GetVerification()->DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(klass), 128)
+ << "spaces:\n";
+ heap_->DumpSpaces(oss);
+ LOG(FATAL) << oss.str();
+ }
+ }
+}
+
+void MarkCompact::CompactPage(mirror::Object* obj, uint32_t offset, uint8_t* addr) {
+ DCHECK(moving_space_bitmap_->Test(obj)
+ && live_words_bitmap_->Test(obj));
+ DCHECK(live_words_bitmap_->Test(offset)) << "obj=" << obj
+ << " offset=" << offset
+ << " addr=" << static_cast<void*>(addr)
+ << " black_allocs_begin="
+ << static_cast<void*>(black_allocations_begin_)
+ << " post_compact_addr="
+ << static_cast<void*>(post_compact_end_);
+ uint8_t* const start_addr = addr;
+ // How many distinct live-strides do we have.
+ size_t stride_count = 0;
+ uint8_t* last_stride = addr;
+ uint32_t last_stride_begin = 0;
+ auto verify_obj_callback = [&] (std::ostream& os) {
+ os << " stride_count=" << stride_count
+ << " last_stride=" << static_cast<void*>(last_stride)
+ << " offset=" << offset
+ << " start_addr=" << static_cast<void*>(start_addr);
+ };
+ obj = GetFromSpaceAddr(obj);
+ live_words_bitmap_->VisitLiveStrides(offset,
+ black_allocations_begin_,
+ kPageSize,
+ [&addr,
+ &last_stride,
+ &stride_count,
+ &last_stride_begin,
+ verify_obj_callback,
+ this] (uint32_t stride_begin,
+ size_t stride_size,
+ bool /*is_last*/)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ const size_t stride_in_bytes = stride_size * kAlignment;
+ DCHECK_LE(stride_in_bytes, kPageSize);
+ last_stride_begin = stride_begin;
+ DCHECK(IsAligned<kAlignment>(addr));
+ memcpy(addr,
+ from_space_begin_ + stride_begin * kAlignment,
+ stride_in_bytes);
+ if (kIsDebugBuild) {
+ uint8_t* space_begin = bump_pointer_space_->Begin();
+ // We can interpret the first word of the stride as an
+ // obj only from second stride onwards, as the first
+ // stride's first-object may have started on previous
+ // page. The only exception is the first page of the
+ // moving space.
+ if (stride_count > 0
+ || stride_begin * kAlignment < kPageSize) {
+ mirror::Object* o =
+ reinterpret_cast<mirror::Object*>(space_begin
+ + stride_begin
+ * kAlignment);
+ CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
+ CHECK(moving_space_bitmap_->Test(o))
+ << "ref=" << o
+ << " bitmap: "
+ << moving_space_bitmap_->DumpMemAround(o);
+ VerifyObject(reinterpret_cast<mirror::Object*>(addr),
+ verify_obj_callback);
+ }
+ }
+ last_stride = addr;
+ addr += stride_in_bytes;
+ stride_count++;
+ });
+ DCHECK_LT(last_stride, start_addr + kPageSize);
+ DCHECK_GT(stride_count, 0u);
+ size_t obj_size = 0;
+ uint32_t offset_within_obj = offset * kAlignment
+ - (reinterpret_cast<uint8_t*>(obj) - from_space_begin_);
+ // First object
+ if (offset_within_obj > 0) {
+ mirror::Object* to_ref = reinterpret_cast<mirror::Object*>(start_addr - offset_within_obj);
+ if (stride_count > 1) {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false> visitor(this,
+ to_ref,
+ start_addr,
+ nullptr);
+ obj_size = obj->VisitRefsForCompaction</*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(
+ visitor, MemberOffset(offset_within_obj), MemberOffset(-1));
+ } else {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true> visitor(this,
+ to_ref,
+ start_addr,
+ start_addr + kPageSize);
+ obj_size = obj->VisitRefsForCompaction</*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(
+ visitor, MemberOffset(offset_within_obj), MemberOffset(offset_within_obj
+ + kPageSize));
+ }
+ obj_size = RoundUp(obj_size, kAlignment);
+ DCHECK_GT(obj_size, offset_within_obj);
+ obj_size -= offset_within_obj;
+ // If there is only one stride, then adjust last_stride_begin to the
+ // end of the first object.
+ if (stride_count == 1) {
+ last_stride_begin += obj_size / kAlignment;
+ }
+ }
+
+ // Except for the last page being compacted, the pages will have addr ==
+ // start_addr + kPageSize.
+ uint8_t* const end_addr = addr;
+ addr = start_addr;
+ size_t bytes_done = obj_size;
+ // All strides except the last one can be updated without any boundary
+ // checks.
+ DCHECK_LE(addr, last_stride);
+ size_t bytes_to_visit = last_stride - addr;
+ DCHECK_LE(bytes_to_visit, kPageSize);
+ while (bytes_to_visit > bytes_done) {
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
+ VerifyObject(ref, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
+ visitor(this, ref, nullptr, nullptr);
+ obj_size = ref->VisitRefsForCompaction(visitor, MemberOffset(0), MemberOffset(-1));
+ obj_size = RoundUp(obj_size, kAlignment);
+ bytes_done += obj_size;
+ }
+ // Last stride may have multiple objects in it and we don't know where the
+ // last object which crosses the page boundary starts, therefore check
+ // page-end in all of these objects. Also, we need to call
+ // VisitRefsForCompaction() with from-space object as we fetch object size,
+ // which in case of klass requires 'class_size_'.
+ uint8_t* from_addr = from_space_begin_ + last_stride_begin * kAlignment;
+ bytes_to_visit = end_addr - addr;
+ DCHECK_LE(bytes_to_visit, kPageSize);
+ while (bytes_to_visit > bytes_done) {
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
+ obj = reinterpret_cast<mirror::Object*>(from_addr);
+ VerifyObject(ref, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
+ visitor(this, ref, nullptr, start_addr + kPageSize);
+ obj_size = obj->VisitRefsForCompaction(visitor,
+ MemberOffset(0),
+ MemberOffset(end_addr - (addr + bytes_done)));
+ obj_size = RoundUp(obj_size, kAlignment);
+ from_addr += obj_size;
+ bytes_done += obj_size;
+ }
+ // The last page that we compact may have some bytes left untouched in the
+ // end, we should zero them as the kernel copies at page granularity.
+ if (UNLIKELY(bytes_done < kPageSize)) {
+ std::memset(addr + bytes_done, 0x0, kPageSize - bytes_done);
+ }
+}
+
+// We store the starting point (pre_compact_page - first_obj) and first-chunk's
+// size. If more TLAB(s) started in this page, then those chunks are identified
+// using mark bitmap. All this info is prepared in UpdateMovingSpaceBlackAllocations().
+// If we find a set bit in the bitmap, then we copy the remaining page and then
+// use the bitmap to visit each object for updating references.
+void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
+ const size_t page_idx,
+ uint8_t* const pre_compact_page,
+ uint8_t* dest) {
+ DCHECK(IsAligned<kPageSize>(pre_compact_page));
+ size_t bytes_copied;
+ const uint32_t first_chunk_size = black_alloc_pages_first_chunk_size_[page_idx];
+ mirror::Object* next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
+ uint8_t* src_addr = reinterpret_cast<uint8_t*>(GetFromSpaceAddr(first_obj));
+ uint8_t* pre_compact_addr = reinterpret_cast<uint8_t*>(first_obj);
+ uint8_t* const pre_compact_page_end = pre_compact_page + kPageSize;
+ uint8_t* const dest_page_end = dest + kPageSize;
+
+ auto verify_obj_callback = [&] (std::ostream& os) {
+ os << " first_obj=" << first_obj
+ << " next_page_first_obj=" << next_page_first_obj
+ << " first_chunk_sie=" << first_chunk_size
+ << " dest=" << static_cast<void*>(dest)
+ << " pre_compact_page="
+ << static_cast<void* const>(pre_compact_page);
+ };
+ // We have empty portion at the beginning of the page. Zero it.
+ if (pre_compact_addr > pre_compact_page) {
+ bytes_copied = pre_compact_addr - pre_compact_page;
+ DCHECK_LT(bytes_copied, kPageSize);
+ std::memset(dest, 0x0, bytes_copied);
+ dest += bytes_copied;
+ } else {
+ bytes_copied = 0;
+ size_t offset = pre_compact_page - pre_compact_addr;
+ pre_compact_addr = pre_compact_page;
+ src_addr += offset;
+ DCHECK(IsAligned<kPageSize>(src_addr));
+ }
+ // Copy the first chunk of live words
+ std::memcpy(dest, src_addr, first_chunk_size);
+ // Update references in the first chunk. Use object size to find next object.
+ {
+ size_t bytes_to_visit = first_chunk_size;
+ size_t obj_size;
+ // The first object started in some previous page. So we need to check the
+ // beginning.
+ DCHECK_LE(reinterpret_cast<uint8_t*>(first_obj), pre_compact_addr);
+ size_t offset = pre_compact_addr - reinterpret_cast<uint8_t*>(first_obj);
+ if (bytes_copied == 0 && offset > 0) {
+ mirror::Object* to_obj = reinterpret_cast<mirror::Object*>(dest - offset);
+ mirror::Object* from_obj = reinterpret_cast<mirror::Object*>(src_addr - offset);
+ // If the next page's first-obj is in this page or nullptr, then we don't
+ // need to check end boundary
+ if (next_page_first_obj == nullptr
+ || (first_obj != next_page_first_obj
+ && reinterpret_cast<uint8_t*>(next_page_first_obj) <= pre_compact_page_end)) {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false> visitor(this,
+ to_obj,
+ dest,
+ nullptr);
+ obj_size = from_obj->VisitRefsForCompaction<
+ /*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(visitor,
+ MemberOffset(offset),
+ MemberOffset(-1));
+ } else {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true> visitor(this,
+ to_obj,
+ dest,
+ dest_page_end);
+ from_obj->VisitRefsForCompaction<
+ /*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(visitor,
+ MemberOffset(offset),
+ MemberOffset(offset
+ + kPageSize));
+ return;
+ }
+ obj_size = RoundUp(obj_size, kAlignment);
+ obj_size -= offset;
+ dest += obj_size;
+ bytes_to_visit -= obj_size;
+ }
+ bytes_copied += first_chunk_size;
+ // If the last object in this page is next_page_first_obj, then we need to check end boundary
+ bool check_last_obj = false;
+ if (next_page_first_obj != nullptr
+ && reinterpret_cast<uint8_t*>(next_page_first_obj) < pre_compact_page_end
+ && bytes_copied == kPageSize) {
+ size_t diff = pre_compact_page_end - reinterpret_cast<uint8_t*>(next_page_first_obj);
+ DCHECK_LE(diff, kPageSize);
+ DCHECK_LE(diff, bytes_to_visit);
+ bytes_to_visit -= diff;
+ check_last_obj = true;
+ }
+ while (bytes_to_visit > 0) {
+ mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest);
+ VerifyObject(dest_obj, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false> visitor(this,
+ dest_obj,
+ nullptr,
+ nullptr);
+ obj_size = dest_obj->VisitRefsForCompaction(visitor, MemberOffset(0), MemberOffset(-1));
+ obj_size = RoundUp(obj_size, kAlignment);
+ bytes_to_visit -= obj_size;
+ dest += obj_size;
+ }
+ DCHECK_EQ(bytes_to_visit, 0u);
+ if (check_last_obj) {
+ mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest);
+ VerifyObject(dest_obj, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true> visitor(this,
+ dest_obj,
+ nullptr,
+ dest_page_end);
+ mirror::Object* obj = GetFromSpaceAddr(next_page_first_obj);
+ obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
+ MemberOffset(0),
+ MemberOffset(dest_page_end - dest));
+ return;
+ }
+ }
+
+ // Probably a TLAB finished on this page and/or a new TLAB started as well.
+ if (bytes_copied < kPageSize) {
+ src_addr += first_chunk_size;
+ pre_compact_addr += first_chunk_size;
+ // Use mark-bitmap to identify where objects are. First call
+ // VisitMarkedRange for only the first marked bit. If found, zero all bytes
+ // until that object and then call memcpy on the rest of the page.
+ // Then call VisitMarkedRange for all marked bits *after* the one found in
+ // this invocation. This time to visit references.
+ uintptr_t start_visit = reinterpret_cast<uintptr_t>(pre_compact_addr);
+ uintptr_t page_end = reinterpret_cast<uintptr_t>(pre_compact_page_end);
+ mirror::Object* found_obj = nullptr;
+ moving_space_bitmap_->VisitMarkedRange</*kVisitOnce*/true>(start_visit,
+ page_end,
+ [&found_obj](mirror::Object* obj) {
+ found_obj = obj;
+ });
+ size_t remaining_bytes = kPageSize - bytes_copied;
+ if (found_obj == nullptr) {
+ // No more black objects in this page. Zero the remaining bytes and return.
+ std::memset(dest, 0x0, remaining_bytes);
+ return;
+ }
+ // Copy everything in this page, which includes any zeroed regions
+ // in-between.
+ std::memcpy(dest, src_addr, remaining_bytes);
+ DCHECK_LT(reinterpret_cast<uintptr_t>(found_obj), page_end);
+ moving_space_bitmap_->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(found_obj) + mirror::kObjectHeaderSize,
+ page_end,
+ [&found_obj, pre_compact_addr, dest, this, verify_obj_callback] (mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ptrdiff_t diff = reinterpret_cast<uint8_t*>(found_obj) - pre_compact_addr;
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(dest + diff);
+ VerifyObject(ref, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
+ visitor(this, ref, nullptr, nullptr);
+ ref->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
+ MemberOffset(0),
+ MemberOffset(-1));
+ // Remember for next round.
+ found_obj = obj;
+ });
+ // found_obj may have been updated in VisitMarkedRange. Visit the last found
+ // object.
+ DCHECK_GT(reinterpret_cast<uint8_t*>(found_obj), pre_compact_addr);
+ DCHECK_LT(reinterpret_cast<uintptr_t>(found_obj), page_end);
+ ptrdiff_t diff = reinterpret_cast<uint8_t*>(found_obj) - pre_compact_addr;
+ mirror::Object* ref = reinterpret_cast<mirror::Object*>(dest + diff);
+ VerifyObject(ref, verify_obj_callback);
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true> visitor(this,
+ ref,
+ nullptr,
+ dest_page_end);
+ ref->VisitRefsForCompaction</*kFetchObjSize*/false>(
+ visitor, MemberOffset(0), MemberOffset(page_end -
+ reinterpret_cast<uintptr_t>(found_obj)));
+ }
+}
+
+template <bool kFallback>
+void MarkCompact::CompactMovingSpace(uint8_t* page) {
+ // For every page we have a starting object, which may have started in some
+ // preceding page, and an offset within that object from where we must start
+ // copying.
+ // Consult the live-words bitmap to copy all contiguously live words at a
+ // time. These words may constitute multiple objects. To avoid the need for
+ // consulting mark-bitmap to find where does the next live object start, we
+ // use the object-size returned by VisitRefsForCompaction.
+ //
+ // TODO: Should we do this in reverse? If the probability of accessing an object
+ // is inversely proportional to the object's age, then it may make sense.
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ uint8_t* to_space = bump_pointer_space_->Begin();
+ auto copy_ioctl = [this] (void* dst, void* buffer) {
+ struct uffdio_copy uffd_copy;
+ uffd_copy.src = reinterpret_cast<uintptr_t>(buffer);
+ uffd_copy.dst = reinterpret_cast<uintptr_t>(dst);
+ uffd_copy.len = kPageSize;
+ uffd_copy.mode = 0;
+ CHECK_EQ(ioctl(uffd_, UFFDIO_COPY, &uffd_copy), 0)
+ << "ioctl: copy " << strerror(errno);
+ DCHECK_EQ(uffd_copy.copy, static_cast<ssize_t>(kPageSize));
+ };
+ size_t idx = 0;
+ while (idx < moving_first_objs_count_) {
+ // Relaxed memory-order is used as the subsequent ioctl syscall will act as a fence.
+ // In the concurrent case (!kFallback) we need to ensure that the update to
+ // moving_spaces_status_[idx] is released before the contents of the page.
+ if (kFallback
+ || moving_pages_status_[idx].exchange(PageState::kCompacting, std::memory_order_relaxed)
+ == PageState::kUncompacted) {
+ CompactPage(first_objs_moving_space_[idx].AsMirrorPtr(),
+ pre_compact_offset_moving_space_[idx],
+ kFallback ? to_space : page);
+ if (!kFallback) {
+ copy_ioctl(to_space, page);
+ }
+ }
+ to_space += kPageSize;
+ idx++;
+ }
+ // Allocated-black pages
+ size_t count = moving_first_objs_count_ + black_page_count_;
+ uint8_t* pre_compact_page = black_allocations_begin_;
+ DCHECK(IsAligned<kPageSize>(pre_compact_page));
+ while (idx < count) {
+ mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
+ if (first_obj != nullptr
+ && (kFallback
+ || moving_pages_status_[idx].exchange(PageState::kCompacting, std::memory_order_relaxed)
+ == PageState::kUncompacted)) {
+ DCHECK_GT(black_alloc_pages_first_chunk_size_[idx], 0u);
+ SlideBlackPage(first_obj,
+ idx,
+ pre_compact_page,
+ kFallback ? to_space : page);
+ if (!kFallback) {
+ copy_ioctl(to_space, page);
+ }
+ }
+ pre_compact_page += kPageSize;
+ to_space += kPageSize;
+ idx++;
+ }
+}
+
+void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
+ DCHECK_LT(reinterpret_cast<uint8_t*>(first), page + kPageSize);
+ // For every object found in the page, visit the previous object. This ensures
+ // that we can visit without checking page-end boundary.
+ // Call VisitRefsForCompaction with from-space read-barrier as the klass object and
+ // super-class loads require it.
+ // TODO: Set kVisitNativeRoots to false once we implement concurrent
+ // compaction
+ mirror::Object* curr_obj = first;
+ non_moving_space_bitmap_->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(first) + mirror::kObjectHeaderSize,
+ reinterpret_cast<uintptr_t>(page + kPageSize),
+ [&](mirror::Object* next_obj) {
+ // TODO: Once non-moving space update becomes concurrent, we'll
+ // require fetching the from-space address of 'curr_obj' and then call
+ // visitor on that.
+ if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false>
+ visitor(this, curr_obj, page, page + kPageSize);
+ MemberOffset begin_offset(page - reinterpret_cast<uint8_t*>(curr_obj));
+ // Native roots shouldn't be visited as they are done when this
+ // object's beginning was visited in the preceding page.
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(
+ visitor, begin_offset, MemberOffset(-1));
+ } else {
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
+ visitor(this, curr_obj, page, page + kPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
+ MemberOffset(0),
+ MemberOffset(-1));
+ }
+ curr_obj = next_obj;
+ });
+
+ MemberOffset end_offset(page + kPageSize - reinterpret_cast<uint8_t*>(curr_obj));
+ if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
+ RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true>
+ visitor(this, curr_obj, page, page + kPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false, /*kVisitNativeRoots*/false>(
+ visitor, MemberOffset(page - reinterpret_cast<uint8_t*>(curr_obj)), end_offset);
+ } else {
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
+ visitor(this, curr_obj, page, page + kPageSize);
+ curr_obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor, MemberOffset(0), end_offset);
+ }
+}
+
+void MarkCompact::UpdateNonMovingSpace() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ uint8_t* page = non_moving_space_->Begin();
+ for (size_t i = 0; i < non_moving_first_objs_count_; i++) {
+ mirror::Object* obj = first_objs_non_moving_space_[i].AsMirrorPtr();
+ // null means there are no objects on the page to update references.
+ if (obj != nullptr) {
+ UpdateNonMovingPage(obj, page);
+ }
+ page += kPageSize;
+ }
+}
+
+void MarkCompact::UpdateMovingSpaceBlackAllocations() {
+ // For sliding black pages, we need the first-object, which overlaps with the
+ // first byte of the page. Additionally, we compute the size of first chunk of
+ // black objects. This will suffice for most black pages. Unlike, compaction
+ // pages, here we don't need to pre-compute the offset within first-obj from
+ // where sliding has to start. That can be calculated using the pre-compact
+ // address of the page. Therefore, to save space, we store the first chunk's
+ // size in black_alloc_pages_first_chunk_size_ array.
+ // For the pages which may have holes after the first chunk, which could happen
+ // if a new TLAB starts in the middle of the page, we mark the objects in
+ // the mark-bitmap. So, if the first-chunk size is smaller than kPageSize,
+ // then we use the mark-bitmap for the remainder of the page.
+ uint8_t* const begin = bump_pointer_space_->Begin();
+ uint8_t* black_allocs = black_allocations_begin_;
+ DCHECK_LE(begin, black_allocs);
+ size_t consumed_blocks_count = 0;
+ size_t first_block_size;
+ // Get the list of all blocks allocated in the bump-pointer space.
+ std::vector<size_t>* block_sizes = bump_pointer_space_->GetBlockSizes(thread_running_gc_,
+ &first_block_size);
+ DCHECK_LE(first_block_size, (size_t)(black_allocs - begin));
+ if (block_sizes != nullptr) {
+ size_t black_page_idx = moving_first_objs_count_;
+ uint8_t* block_end = begin + first_block_size;
+ uint32_t remaining_chunk_size = 0;
+ uint32_t first_chunk_size = 0;
+ mirror::Object* first_obj = nullptr;
+ for (size_t block_size : *block_sizes) {
+ block_end += block_size;
+ // Skip the blocks that are prior to the black allocations. These will be
+ // merged with the main-block later.
+ if (black_allocs >= block_end) {
+ consumed_blocks_count++;
+ continue;
+ }
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(black_allocs);
+ bool set_mark_bit = remaining_chunk_size > 0;
+ // We don't know how many objects are allocated in the current block. When we hit
+ // a null assume it's the end. This works as every block is expected to
+ // have objects allocated linearly using bump-pointer.
+ // BumpPointerSpace::Walk() also works similarly.
+ while (black_allocs < block_end
+ && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ RememberDexCaches(obj);
+ if (first_obj == nullptr) {
+ first_obj = obj;
+ }
+ // We only need the mark-bitmap in the pages wherein a new TLAB starts in
+ // the middle of the page.
+ if (set_mark_bit) {
+ moving_space_bitmap_->Set(obj);
+ }
+ size_t obj_size = RoundUp(obj->SizeOf(), kAlignment);
+ // Handle objects which cross page boundary, including objects larger
+ // than page size.
+ if (remaining_chunk_size + obj_size >= kPageSize) {
+ set_mark_bit = false;
+ first_chunk_size += kPageSize - remaining_chunk_size;
+ remaining_chunk_size += obj_size;
+ // We should not store first-object and remaining_chunk_size if there were
+ // unused bytes before this TLAB, in which case we must have already
+ // stored the values (below).
+ if (black_alloc_pages_first_chunk_size_[black_page_idx] == 0) {
+ black_alloc_pages_first_chunk_size_[black_page_idx] = first_chunk_size;
+ first_objs_moving_space_[black_page_idx].Assign(first_obj);
+ }
+ black_page_idx++;
+ remaining_chunk_size -= kPageSize;
+ // Consume an object larger than page size.
+ while (remaining_chunk_size >= kPageSize) {
+ black_alloc_pages_first_chunk_size_[black_page_idx] = kPageSize;
+ first_objs_moving_space_[black_page_idx].Assign(obj);
+ black_page_idx++;
+ remaining_chunk_size -= kPageSize;
+ }
+ first_obj = remaining_chunk_size > 0 ? obj : nullptr;
+ first_chunk_size = remaining_chunk_size;
+ } else {
+ DCHECK_LE(first_chunk_size, remaining_chunk_size);
+ first_chunk_size += obj_size;
+ remaining_chunk_size += obj_size;
+ }
+ black_allocs += obj_size;
+ obj = reinterpret_cast<mirror::Object*>(black_allocs);
+ }
+ DCHECK_LE(black_allocs, block_end);
+ DCHECK_LT(remaining_chunk_size, kPageSize);
+ // consume the unallocated portion of the block
+ if (black_allocs < block_end) {
+ // first-chunk of the current page ends here. Store it.
+ if (first_chunk_size > 0) {
+ black_alloc_pages_first_chunk_size_[black_page_idx] = first_chunk_size;
+ first_objs_moving_space_[black_page_idx].Assign(first_obj);
+ first_chunk_size = 0;
+ }
+ first_obj = nullptr;
+ size_t page_remaining = kPageSize - remaining_chunk_size;
+ size_t block_remaining = block_end - black_allocs;
+ if (page_remaining <= block_remaining) {
+ block_remaining -= page_remaining;
+ // current page and the subsequent empty pages in the block
+ black_page_idx += 1 + block_remaining / kPageSize;
+ remaining_chunk_size = block_remaining % kPageSize;
+ } else {
+ remaining_chunk_size += block_remaining;
+ }
+ black_allocs = block_end;
+ }
+ }
+ black_page_count_ = black_page_idx - moving_first_objs_count_;
+ delete block_sizes;
+ }
+ // Update bump-pointer space by consuming all the pre-black blocks into the
+ // main one.
+ bump_pointer_space_->SetBlockSizes(thread_running_gc_,
+ post_compact_end_ - begin,
+ consumed_blocks_count);
+}
+
+void MarkCompact::UpdateNonMovingSpaceBlackAllocations() {
+ accounting::ObjectStack* stack = heap_->GetAllocationStack();
+ const StackReference<mirror::Object>* limit = stack->End();
+ uint8_t* const space_begin = non_moving_space_->Begin();
+ for (StackReference<mirror::Object>* it = stack->Begin(); it != limit; ++it) {
+ mirror::Object* obj = it->AsMirrorPtr();
+ if (obj != nullptr && non_moving_space_bitmap_->HasAddress(obj)) {
+ non_moving_space_bitmap_->Set(obj);
+ // Clear so that we don't try to set the bit again in the next GC-cycle.
+ it->Clear();
+ size_t idx = (reinterpret_cast<uint8_t*>(obj) - space_begin) / kPageSize;
+ uint8_t* page_begin = AlignDown(reinterpret_cast<uint8_t*>(obj), kPageSize);
+ mirror::Object* first_obj = first_objs_non_moving_space_[idx].AsMirrorPtr();
+ if (first_obj == nullptr
+ || (obj < first_obj && reinterpret_cast<uint8_t*>(first_obj) > page_begin)) {
+ first_objs_non_moving_space_[idx].Assign(obj);
+ }
+ mirror::Object* next_page_first_obj = first_objs_non_moving_space_[++idx].AsMirrorPtr();
+ uint8_t* next_page_begin = page_begin + kPageSize;
+ if (next_page_first_obj == nullptr
+ || reinterpret_cast<uint8_t*>(next_page_first_obj) > next_page_begin) {
+ size_t obj_size = RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
+ uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + obj_size;
+ while (next_page_begin < obj_end) {
+ first_objs_non_moving_space_[idx++].Assign(obj);
+ next_page_begin += kPageSize;
+ }
+ }
+ // update first_objs count in case we went past non_moving_first_objs_count_
+ non_moving_first_objs_count_ = std::max(non_moving_first_objs_count_, idx);
+ }
+ }
+}
+
+class MarkCompact::ImmuneSpaceUpdateObjVisitor {
+ public:
+ explicit ImmuneSpaceUpdateObjVisitor(MarkCompact* collector) : collector_(collector) {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_) {
+ RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false> visitor(collector_,
+ obj,
+ /*begin_*/nullptr,
+ /*end_*/nullptr);
+ obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
+ MemberOffset(0),
+ MemberOffset(-1));
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES(Locks::mutator_lock_) {
+ reinterpret_cast<ImmuneSpaceUpdateObjVisitor*>(arg)->operator()(obj);
+ }
+
+ private:
+ MarkCompact* const collector_;
+};
+
+// TODO: JVMTI redefinition leads to situations wherein new class object(s) and the
+// corresponding native roots are setup but are not linked to class tables and
+// therefore are not accessible, leading to memory corruption.
+class MarkCompact::NativeRootsUpdateVisitor : public ClassLoaderVisitor, public DexCacheVisitor {
+ public:
+ explicit NativeRootsUpdateVisitor(MarkCompact* collector, PointerSize pointer_size)
+ : collector_(collector), pointer_size_(pointer_size) {}
+
+ ~NativeRootsUpdateVisitor() {
+ LOG(INFO) << "num_classes: " << classes_visited_.size()
+ << " num_dex_caches: " << dex_caches_visited_.size();
+ }
+
+ void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) {
+ ClassTable* const class_table = class_loader->GetClassTable();
+ if (class_table != nullptr) {
+ class_table->VisitClassesAndRoots(*this);
+ }
+ }
+
+ void Visit(ObjPtr<mirror::DexCache> dex_cache) override
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ if (!dex_cache.IsNull()) {
+ uint32_t cache = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(dex_cache.Ptr()));
+ if (dex_caches_visited_.insert(cache).second) {
+ dex_cache->VisitNativeRoots<kDefaultVerifyFlags, kWithoutReadBarrier>(*this);
+ collector_->dex_caches_.erase(cache);
+ }
+ }
+ }
+
+ void VisitDexCache(mirror::DexCache* dex_cache)
+ REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ dex_cache->VisitNativeRoots<kDefaultVerifyFlags, kWithoutReadBarrier>(*this);
+ }
+
+ void operator()(mirror::Object* obj)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(obj->IsClass<kDefaultVerifyFlags>());
+ ObjPtr<mirror::Class> klass = obj->AsClass<kDefaultVerifyFlags>();
+ VisitClassRoots(klass);
+ }
+
+ // For ClassTable::Visit()
+ bool operator()(ObjPtr<mirror::Class> klass)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!klass.IsNull()) {
+ VisitClassRoots(klass);
+ }
+ return true;
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ collector_->UpdateRoot(root);
+ }
+
+ private:
+ void VisitClassRoots(ObjPtr<mirror::Class> klass)
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ mirror::Class* klass_ptr = klass.Ptr();
+ uint32_t k = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(klass_ptr));
+ // No reason to visit native roots of class in immune spaces.
+ if ((collector_->bump_pointer_space_->HasAddress(klass_ptr)
+ || collector_->non_moving_space_->HasAddress(klass_ptr))
+ && classes_visited_.insert(k).second) {
+ klass->VisitNativeRoots<kWithoutReadBarrier, /*kVisitProxyMethod*/false>(*this,
+ pointer_size_);
+ klass->VisitObsoleteDexCaches<kWithoutReadBarrier>(*this);
+ klass->VisitObsoleteClass<kWithoutReadBarrier>(*this);
+ }
+ }
+
+ std::unordered_set<uint32_t> dex_caches_visited_;
+ std::unordered_set<uint32_t> classes_visited_;
+ MarkCompact* const collector_;
+ PointerSize pointer_size_;
+};
+
+void MarkCompact::PreCompactionPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ Runtime* runtime = Runtime::Current();
+ non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
+ if (kIsDebugBuild) {
+ pthread_attr_t attr;
+ size_t stack_size;
+ void* stack_addr;
+ pthread_getattr_np(pthread_self(), &attr);
+ pthread_attr_getstack(&attr, &stack_addr, &stack_size);
+ pthread_attr_destroy(&attr);
+ stack_addr_ = stack_addr;
+ stack_end_ = reinterpret_cast<char*>(stack_addr) + stack_size;
+ }
+
+ compacting_ = true;
+
+ {
+ TimingLogger::ScopedTiming t2("(Paused)UpdateCompactionDataStructures", GetTimings());
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ // Refresh data-structures to catch-up on allocations that may have
+ // happened since marking-phase pause.
+ // There could be several TLABs that got allocated since marking pause. We
+ // don't want to compact them and instead update the TLAB info in TLS and
+ // let mutators continue to use the TLABs.
+ // We need to set all the bits in live-words bitmap corresponding to allocated
+ // objects. Also, we need to find the objects that are overlapping with
+ // page-begin boundaries. Unlike objects allocated before
+ // black_allocations_begin_, which can be identified via mark-bitmap, we can get
+ // this info only via walking the space past black_allocations_begin_, which
+ // involves fetching object size.
+ // TODO: We can reduce the time spent on this in a pause by performing one
+ // round of this concurrently prior to the pause.
+ UpdateMovingSpaceBlackAllocations();
+ // TODO: If we want to avoid this allocation in a pause then we will have to
+ // allocate an array for the entire moving-space size, which can be made
+ // part of info_map_.
+ moving_pages_status_ = new Atomic<PageState>[moving_first_objs_count_ + black_page_count_];
+ if (kIsDebugBuild) {
+ size_t len = moving_first_objs_count_ + black_page_count_;
+ for (size_t i = 0; i < len; i++) {
+ CHECK_EQ(moving_pages_status_[i].load(std::memory_order_relaxed), PageState::kUncompacted);
+ }
+ }
+ // Iterate over the allocation_stack_, for every object in the non-moving
+ // space:
+ // 1. Mark the object in live bitmap
+ // 2. Erase the object from allocation stack
+ // 3. In the corresponding page, if the first-object vector needs updating
+ // then do so.
+ UpdateNonMovingSpaceBlackAllocations();
+
+ heap_->GetReferenceProcessor()->UpdateRoots(this);
+ }
+
+ {
+ // Thread roots must be updated first (before space mremap and native root
+ // updation) to ensure that pre-update content is accessible.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateThreadRoots", GetTimings());
+ MutexLock mu1(thread_running_gc_, *Locks::runtime_shutdown_lock_);
+ MutexLock mu2(thread_running_gc_, *Locks::thread_list_lock_);
+ std::list<Thread*> thread_list = runtime->GetThreadList()->GetList();
+ for (Thread* thread : thread_list) {
+ thread->VisitRoots(this, kVisitRootFlagAllRoots);
+ thread->AdjustTlab(black_objs_slide_diff_);
+ }
+ }
+
+ {
+ // Native roots must be updated before updating system weaks as class linker
+ // holds roots to class loaders and dex-caches as weak roots. Also, space
+ // mremap must be done after this step as we require reading
+ // class/dex-cache/class-loader content for updating native roots.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateNativeRoots", GetTimings());
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ NativeRootsUpdateVisitor visitor(this, class_linker->GetImagePointerSize());
+ {
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
+ class_linker->VisitBootClasses(&visitor);
+ class_linker->VisitClassLoaders(&visitor);
+ }
+ {
+ WriterMutexLock wmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::dex_lock_);
+ class_linker->VisitDexCaches(&visitor);
+ for (uint32_t cache : dex_caches_) {
+ visitor.VisitDexCache(reinterpret_cast<mirror::DexCache*>(cache));
+ }
+ }
+ dex_caches_.clear();
+ }
+
+ SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/true);
+ KernelPreparation();
+
+ {
+ TimingLogger::ScopedTiming t2("(Paused)UpdateConcurrentRoots", GetTimings());
+ runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
+ }
+ {
+ // TODO: don't visit the transaction roots if it's not active.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateNonThreadRoots", GetTimings());
+ runtime->VisitNonThreadRoots(this);
+ }
+
+ {
+ // TODO: Immune space updation has to happen either before or after
+ // remapping pre-compact pages to from-space. And depending on when it's
+ // done, we have to invoke VisitRefsForCompaction() with or without
+ // read-barrier.
+ TimingLogger::ScopedTiming t2("(Paused)UpdateImmuneSpaces", GetTimings());
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ ImmuneSpaceUpdateObjVisitor visitor(this);
+ if (table != nullptr) {
+ table->ProcessCards();
+ table->VisitObjects(ImmuneSpaceUpdateObjVisitor::Callback, &visitor);
+ } else {
+ WriterMutexLock wmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
+ }
+ }
+ }
+
+ UpdateNonMovingSpace();
+ // fallback mode
+ if (uffd_ == kFallbackMode) {
+ CompactMovingSpace</*kFallback*/true>();
+ } else {
+ // We must start worker threads before resuming mutators to avoid deadlocks.
+ heap_->GetThreadPool()->StartWorkers(thread_running_gc_);
+ }
+ stack_end_ = nullptr;
+}
+
+void MarkCompact::KernelPreparation() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ // TODO: Create mapping's at 2MB aligned addresses to benefit from optimized
+ // mremap.
+ size_t size = bump_pointer_space_->Capacity();
+ uint8_t* begin = bump_pointer_space_->Begin();
+ int flags = MREMAP_MAYMOVE | MREMAP_FIXED;
+ if (gHaveMremapDontunmap) {
+ flags |= MREMAP_DONTUNMAP;
+ }
+
+ void* ret = mremap(begin, size, size, flags, from_space_begin_);
+ CHECK_EQ(ret, static_cast<void*>(from_space_begin_))
+ << "mremap to move pages from moving space to from-space failed: " << strerror(errno)
+ << ". moving-space-addr=" << reinterpret_cast<void*>(begin)
+ << " size=" << size;
+
+ // Without MREMAP_DONTUNMAP the source mapping is unmapped by mremap. So mmap
+ // the moving space again.
+ if (!gHaveMremapDontunmap) {
+ ret = mmap(begin, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0);
+ CHECK_EQ(ret, static_cast<void*>(begin)) << "mmap for moving space failed: " << strerror(errno);
+ }
+
+ DCHECK_EQ(mprotect(from_space_begin_, size, PROT_READ), 0)
+ << "mprotect failed: " << strerror(errno);
+
+ if (uffd_ >= 0) {
+ // Userfaultfd registration
+ struct uffdio_register uffd_register;
+ uffd_register.range.start = reinterpret_cast<uintptr_t>(begin);
+ uffd_register.range.len = size;
+ uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ CHECK_EQ(ioctl(uffd_, UFFDIO_REGISTER, &uffd_register), 0)
+ << "ioctl_userfaultfd: register moving-space: " << strerror(errno);
+ }
+}
+
+void MarkCompact::ConcurrentCompaction(uint8_t* page) {
+ struct uffd_msg msg;
+ uint8_t* unused_space_begin = bump_pointer_space_->Begin()
+ + (moving_first_objs_count_ + black_page_count_) * kPageSize;
+ DCHECK(IsAligned<kPageSize>(unused_space_begin));
+ auto zeropage_ioctl = [this] (void* addr, bool tolerate_eexist) {
+ struct uffdio_zeropage uffd_zeropage;
+ DCHECK(IsAligned<kPageSize>(addr));
+ uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(addr);
+ uffd_zeropage.range.len = kPageSize;
+ uffd_zeropage.mode = 0;
+ int ret = ioctl(uffd_, UFFDIO_ZEROPAGE, &uffd_zeropage);
+ CHECK(ret == 0 || (tolerate_eexist && ret == -1 && errno == EEXIST))
+ << "ioctl: zeropage: " << strerror(errno);
+ DCHECK_EQ(uffd_zeropage.zeropage, static_cast<ssize_t>(kPageSize));
+ };
+
+ auto copy_ioctl = [this] (void* fault_page, void* src) {
+ struct uffdio_copy uffd_copy;
+ uffd_copy.src = reinterpret_cast<uintptr_t>(src);
+ uffd_copy.dst = reinterpret_cast<uintptr_t>(fault_page);
+ uffd_copy.len = kPageSize;
+ uffd_copy.mode = 0;
+ CHECK_EQ(ioctl(uffd_, UFFDIO_COPY, &uffd_copy), 0)
+ << "ioctl: copy: " << strerror(errno);
+ DCHECK_EQ(uffd_copy.copy, static_cast<ssize_t>(kPageSize));
+ };
+
+ while (true) {
+ ssize_t nread = read(uffd_, &msg, sizeof(msg));
+ CHECK_GT(nread, 0);
+ CHECK_EQ(msg.event, UFFD_EVENT_PAGEFAULT);
+ DCHECK_EQ(nread, static_cast<ssize_t>(sizeof(msg)));
+ uint8_t* fault_addr = reinterpret_cast<uint8_t*>(msg.arg.pagefault.address);
+ if (fault_addr == conc_compaction_termination_page_) {
+ // The counter doesn't need to be updated atomically as only one thread
+ // would wake up against the gc-thread's load to this fault_addr. In fact,
+ // the other threads would wake up serially because every exiting thread
+ // will wake up gc-thread, which would retry load but again would find the
+ // page missing. Also, the value will be flushed to caches due to the ioctl
+ // syscall below.
+ uint8_t ret = thread_pool_counter_--;
+ // Only the last thread should map the zeropage so that the gc-thread can
+ // proceed.
+ if (ret == 1) {
+ zeropage_ioctl(fault_addr, /*tolerate_eexist*/ false);
+ } else {
+ struct uffdio_range uffd_range;
+ uffd_range.start = msg.arg.pagefault.address;
+ uffd_range.len = kPageSize;
+ CHECK_EQ(ioctl(uffd_, UFFDIO_WAKE, &uffd_range), 0)
+ << "ioctl: wake: " << strerror(errno);
+ }
+ break;
+ }
+ DCHECK(bump_pointer_space_->HasAddress(reinterpret_cast<mirror::Object*>(fault_addr)));
+ uint8_t* fault_page = AlignDown(fault_addr, kPageSize);
+ if (fault_addr >= unused_space_begin) {
+ // There is a race which allows more than one thread to install a
+ // zero-page. But we can tolerate that. So absorb the EEXIST returned by
+ // the ioctl and move on.
+ zeropage_ioctl(fault_page, /*tolerate_eexist*/ true);
+ continue;
+ }
+ size_t page_idx = (fault_page - bump_pointer_space_->Begin()) / kPageSize;
+ PageState state = moving_pages_status_[page_idx].load(std::memory_order_relaxed);
+ if (state == PageState::kUncompacted) {
+ // Relaxed memory-order is fine as the subsequent ioctl syscall guarantees
+ // status to be flushed before this thread attempts to copy/zeropage the
+ // fault_page.
+ state = moving_pages_status_[page_idx].exchange(PageState::kCompacting,
+ std::memory_order_relaxed);
+ }
+ if (state == PageState::kCompacting) {
+ // Somebody else took (or taking) care of the page, so nothing to do.
+ continue;
+ }
+
+ if (fault_page < post_compact_end_) {
+ // The page has to be compacted.
+ CompactPage(first_objs_moving_space_[page_idx].AsMirrorPtr(),
+ pre_compact_offset_moving_space_[page_idx],
+ page);
+ copy_ioctl(fault_page, page);
+ } else {
+ // The page either has to be slid, or if it's an empty page then a
+ // zeropage needs to be mapped.
+ mirror::Object* first_obj = first_objs_moving_space_[page_idx].AsMirrorPtr();
+ if (first_obj != nullptr) {
+ DCHECK_GT(pre_compact_offset_moving_space_[page_idx], 0u);
+ uint8_t* pre_compact_page = black_allocations_begin_ + (fault_page - post_compact_end_);
+ DCHECK(IsAligned<kPageSize>(pre_compact_page));
+ SlideBlackPage(first_obj,
+ page_idx,
+ pre_compact_page,
+ page);
+ copy_ioctl(fault_page, page);
+ } else {
+ // We should never have a case where two workers are trying to install a
+ // zeropage in this range as we synchronize using
+ // moving_pages_status_[page_idx].
+ zeropage_ioctl(fault_page, /*tolerate_eexist*/ false);
+ }
+ }
+ }
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+void MarkCompact::CompactionPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ {
+ int32_t freed_bytes = black_objs_slide_diff_;
+ bump_pointer_space_->RecordFree(freed_objects_, freed_bytes);
+ RecordFree(ObjectBytePair(freed_objects_, freed_bytes));
+ }
+
+ if (kObjPtrPoisoning) {
+ CompactMovingSpace</*kFallback*/false>(compaction_buffers_map_.Begin());
+ // madvise the page so that we can get userfaults on it. We don't need to
+ // do this when not using poisoning as in that case the address location is
+ // untouched during compaction.
+ ZeroAndReleasePages(conc_compaction_termination_page_, kPageSize);
+ } else {
+ uint8_t buf[kPageSize];
+ CompactMovingSpace</*kFallback*/false>(buf);
+ }
+
+ // The following triggers 'special' userfaults. When received by the
+ // thread-pool workers, they will exit out of the compaction task. This fault
+ // happens because we madvise info_map_ above and it is at least kPageSize in length.
+ DCHECK(IsAligned<kPageSize>(conc_compaction_termination_page_));
+ CHECK_EQ(*reinterpret_cast<volatile uint8_t*>(conc_compaction_termination_page_), 0);
+ DCHECK_EQ(thread_pool_counter_, 0);
+
+ struct uffdio_range unregister_range;
+ unregister_range.start = reinterpret_cast<uintptr_t>(bump_pointer_space_->Begin());
+ unregister_range.len = bump_pointer_space_->Capacity();
+ CHECK_EQ(ioctl(uffd_, UFFDIO_UNREGISTER, &unregister_range), 0)
+ << "ioctl_userfaultfd: unregister moving-space: " << strerror(errno);
+
+ // When poisoning ObjPtr, we are forced to use buffers for page compaction in
+ // lower 4GB. Now that the usage is done, madvise them. But skip the first
+ // page, which is used by the gc-thread for the next iteration. Otherwise, we
+ // get into a deadlock due to userfault on it in the next iteration. This page
+ // is not consuming any physical memory because we already madvised it above
+ // and then we triggered a read userfault, which maps a special zero-page.
+ if (kObjPtrPoisoning) {
+ ZeroAndReleasePages(compaction_buffers_map_.Begin() + kPageSize,
+ compaction_buffers_map_.Size() - kPageSize);
+ } else {
+ ZeroAndReleasePages(conc_compaction_termination_page_, kPageSize);
+ }
+ heap_->GetThreadPool()->StopWorkers(thread_running_gc_);
+}
+#pragma clang diagnostic pop
+
+template <size_t kBufferSize>
+class MarkCompact::ThreadRootsVisitor : public RootVisitor {
+ public:
+ explicit ThreadRootsVisitor(MarkCompact* mark_compact, Thread* const self)
+ : mark_compact_(mark_compact), self_(self) {}
+
+ ~ThreadRootsVisitor() {
+ Flush();
+ }
+
+ void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; i++) {
+ mirror::Object* obj = *roots[i];
+ if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
+ Push(obj);
+ }
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED)
+ override REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ for (size_t i = 0; i < count; i++) {
+ mirror::Object* obj = roots[i]->AsMirrorPtr();
+ if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
+ Push(obj);
+ }
+ }
+ }
+
+ private:
+ void Flush() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ StackReference<mirror::Object>* start;
+ StackReference<mirror::Object>* end;
+ {
+ MutexLock mu(self_, mark_compact_->mark_stack_lock_);
+ // Loop here because even after expanding once it may not be sufficient to
+ // accommodate all references. It's almost impossible, but there is no harm
+ // in implementing it this way.
+ while (!mark_compact_->mark_stack_->BumpBack(idx_, &start, &end)) {
+ mark_compact_->ExpandMarkStack();
+ }
+ }
+ while (idx_ > 0) {
+ *start++ = roots_[--idx_];
+ }
+ DCHECK_EQ(start, end);
+ }
+
+ void Push(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_) {
+ if (UNLIKELY(idx_ >= kBufferSize)) {
+ Flush();
+ }
+ roots_[idx_++].Assign(obj);
+ }
+
+ StackReference<mirror::Object> roots_[kBufferSize];
+ size_t idx_ = 0;
+ MarkCompact* const mark_compact_;
+ Thread* const self_;
+};
+
+class MarkCompact::CheckpointMarkThreadRoots : public Closure {
+ public:
+ explicit CheckpointMarkThreadRoots(MarkCompact* mark_compact) : mark_compact_(mark_compact) {}
+
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ ScopedTrace trace("Marking thread roots");
+ // Note: self is not necessarily equal to thread since thread may be
+ // suspended.
+ Thread* const self = Thread::Current();
+ CHECK(thread == self
+ || thread->IsSuspended()
+ || thread->GetState() == ThreadState::kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ {
+ ThreadRootsVisitor</*kBufferSize*/ 20> visitor(mark_compact_, self);
+ thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ }
+
+ // If thread is a running mutator, then act on behalf of the garbage
+ // collector. See the code in ThreadList::RunCheckpoint.
+ mark_compact_->GetBarrier().Pass(self);
+ }
+
+ private:
+ MarkCompact* const mark_compact_;
+};
+
+void MarkCompact::MarkRootsCheckpoint(Thread* self, Runtime* runtime) {
+ // We revote TLABs later during paused round of marking.
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ CheckpointMarkThreadRoots check_point(this);
+ ThreadList* thread_list = runtime->GetThreadList();
+ gc_barrier_.Init(self, 0);
+ // Request the check point is run on all threads returning a count of the threads that must
+ // run through the barrier including self.
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+ // Release locks then wait for all mutator threads to pass the barrier.
+ // If there are no threads to wait which implys that all the checkpoint functions are finished,
+ // then no need to release locks.
+ if (barrier_count == 0) {
+ return;
+ }
+ Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
+ Locks::mutator_lock_->SharedUnlock(self);
+ {
+ ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
+ gc_barrier_.Increment(self, barrier_count);
+ }
+ Locks::mutator_lock_->SharedLock(self);
+ Locks::heap_bitmap_lock_->ExclusiveLock(self);
+}
+
+void MarkCompact::MarkNonThreadRoots(Runtime* runtime) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ runtime->VisitNonThreadRoots(this);
+}
+
+void MarkCompact::MarkConcurrentRoots(VisitRootFlags flags, Runtime* runtime) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ runtime->VisitConcurrentRoots(this, flags);
+}
+
+void MarkCompact::RevokeAllThreadLocalBuffers() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ bump_pointer_space_->RevokeAllThreadLocalBuffers();
+}
+
+class MarkCompact::ScanObjectVisitor {
+ public:
+ explicit ScanObjectVisitor(MarkCompact* const mark_compact) ALWAYS_INLINE
+ : mark_compact_(mark_compact) {}
+
+ void operator()(ObjPtr<mirror::Object> obj) const
+ ALWAYS_INLINE
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mark_compact_->ScanObject</*kUpdateLiveWords*/ false>(obj.Ptr());
+ }
+
+ private:
+ MarkCompact* const mark_compact_;
+};
+
+void MarkCompact::UpdateAndMarkModUnion() {
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ for (const auto& space : immune_spaces_.GetSpaces()) {
+ const char* name = space->IsZygoteSpace()
+ ? "UpdateAndMarkZygoteModUnionTable"
+ : "UpdateAndMarkImageModUnionTable";
+ DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
+ TimingLogger::ScopedTiming t(name, GetTimings());
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table != nullptr) {
+ // UpdateAndMarkReferences() doesn't visit Reference-type objects. But
+ // that's fine because these objects are immutable enough (referent can
+ // only be cleared) and hence the only referents they can have are intra-space.
+ table->UpdateAndMarkReferences(this);
+ } else {
+ // No mod-union table, scan all dirty/aged cards in the corresponding
+ // card-table. This can only occur for app images.
+ card_table->Scan</*kClearCard*/ false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ ScanObjectVisitor(this),
+ gc::accounting::CardTable::kCardAged);
+ }
+ }
+}
+
+void MarkCompact::MarkReachableObjects() {
+ UpdateAndMarkModUnion();
+ // Recursively mark all the non-image bits set in the mark bitmap.
+ ProcessMarkStack();
+}
+
+class MarkCompact::CardModifiedVisitor {
+ public:
+ explicit CardModifiedVisitor(MarkCompact* const mark_compact,
+ accounting::ContinuousSpaceBitmap* const bitmap,
+ accounting::CardTable* const card_table)
+ : visitor_(mark_compact), bitmap_(bitmap), card_table_(card_table) {}
+
+ void operator()(uint8_t* card,
+ uint8_t expected_value,
+ uint8_t new_value ATTRIBUTE_UNUSED) const {
+ if (expected_value == accounting::CardTable::kCardDirty) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table_->AddrFromCard(card));
+ bitmap_->VisitMarkedRange(start, start + accounting::CardTable::kCardSize, visitor_);
+ }
+ }
+
+ private:
+ ScanObjectVisitor visitor_;
+ accounting::ContinuousSpaceBitmap* bitmap_;
+ accounting::CardTable* const card_table_;
+};
+
+void MarkCompact::ScanDirtyObjects(bool paused, uint8_t minimum_age) {
+ accounting::CardTable* card_table = heap_->GetCardTable();
+ for (const auto& space : heap_->GetContinuousSpaces()) {
+ const char* name = nullptr;
+ switch (space->GetGcRetentionPolicy()) {
+ case space::kGcRetentionPolicyNeverCollect:
+ name = paused ? "(Paused)ScanGrayImmuneSpaceObjects" : "ScanGrayImmuneSpaceObjects";
+ break;
+ case space::kGcRetentionPolicyFullCollect:
+ name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
+ break;
+ case space::kGcRetentionPolicyAlwaysCollect:
+ name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
+ break;
+ default:
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+ }
+ TimingLogger::ScopedTiming t(name, GetTimings());
+ ScanObjectVisitor visitor(this);
+ const bool is_immune_space = space->IsZygoteSpace() || space->IsImageSpace();
+ if (paused) {
+ DCHECK_EQ(minimum_age, gc::accounting::CardTable::kCardDirty);
+ // We can clear the card-table for any non-immune space.
+ if (is_immune_space) {
+ card_table->Scan</*kClearCard*/false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ minimum_age);
+ } else {
+ card_table->Scan</*kClearCard*/true>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ minimum_age);
+ }
+ } else {
+ DCHECK_EQ(minimum_age, gc::accounting::CardTable::kCardAged);
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ if (table) {
+ table->ProcessCards();
+ card_table->Scan</*kClearCard*/false>(space->GetMarkBitmap(),
+ space->Begin(),
+ space->End(),
+ visitor,
+ minimum_age);
+ } else {
+ CardModifiedVisitor card_modified_visitor(this, space->GetMarkBitmap(), card_table);
+ // For the alloc spaces we should age the dirty cards and clear the rest.
+ // For image and zygote-space without mod-union-table, age the dirty
+ // cards but keep the already aged cards unchanged.
+ // In either case, visit the objects on the cards that were changed from
+ // dirty to aged.
+ if (is_immune_space) {
+ card_table->ModifyCardsAtomic(space->Begin(),
+ space->End(),
+ [](uint8_t card) {
+ return (card == gc::accounting::CardTable::kCardClean)
+ ? card
+ : gc::accounting::CardTable::kCardAged;
+ },
+ card_modified_visitor);
+ } else {
+ card_table->ModifyCardsAtomic(space->Begin(),
+ space->End(),
+ AgeCardVisitor(),
+ card_modified_visitor);
+ }
+ }
+ }
+ }
+}
+
+void MarkCompact::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
+ ScanDirtyObjects(paused, minimum_age);
+ ProcessMarkStack();
+}
+
+void MarkCompact::MarkRoots(VisitRootFlags flags) {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ Runtime* runtime = Runtime::Current();
+ // Make sure that the checkpoint which collects the stack roots is the first
+ // one capturning GC-roots. As this one is supposed to find the address
+ // everything allocated after that (during this marking phase) will be
+ // considered 'marked'.
+ MarkRootsCheckpoint(thread_running_gc_, runtime);
+ MarkNonThreadRoots(runtime);
+ MarkConcurrentRoots(flags, runtime);
+}
+
+void MarkCompact::PreCleanCards() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ CHECK(!Locks::mutator_lock_->IsExclusiveHeld(thread_running_gc_));
+ MarkRoots(static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
+ RecursiveMarkDirtyObjects(/*paused*/ false, accounting::CardTable::kCardDirty - 1);
+}
+
+// In a concurrent marking algorithm, if we are not using a write/read barrier, as
+// in this case, then we need a stop-the-world (STW) round in the end to mark
+// objects which were written into concurrently while concurrent marking was
+// performed.
+// In order to minimize the pause time, we could take one of the two approaches:
+// 1. Keep repeating concurrent marking of dirty cards until the time spent goes
+// below a threshold.
+// 2. Do two rounds concurrently and then attempt a paused one. If we figure
+// that it's taking too long, then resume mutators and retry.
+//
+// Given the non-trivial fixed overhead of running a round (card table and root
+// scan), it might be better to go with approach 2.
+void MarkCompact::MarkingPhase() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ DCHECK_EQ(thread_running_gc_, Thread::Current());
+ WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ BindAndResetBitmaps();
+ MarkRoots(
+ static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
+ MarkReachableObjects();
+ // Pre-clean dirtied cards to reduce pauses.
+ PreCleanCards();
+
+ // Setup reference processing and forward soft references once before enabling
+ // slow path (in MarkingPause)
+ ReferenceProcessor* rp = GetHeap()->GetReferenceProcessor();
+ bool clear_soft_references = GetCurrentIteration()->GetClearSoftReferences();
+ rp->Setup(thread_running_gc_, this, /*concurrent=*/ true, clear_soft_references);
+ if (!clear_soft_references) {
+ // Forward as many SoftReferences as possible before inhibiting reference access.
+ rp->ForwardSoftReferences(GetTimings());
+ }
+}
+
+class MarkCompact::RefFieldsVisitor {
+ public:
+ ALWAYS_INLINE explicit RefFieldsVisitor(MarkCompact* const mark_compact)
+ : mark_compact_(mark_compact) {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj,
+ MemberOffset offset,
+ bool is_static ATTRIBUTE_UNUSED) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_compact_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
+ }
+
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ mark_compact_->DelayReferenceReferent(klass, ref);
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kCheckLocks) {
+ Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+ Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
+ }
+ mark_compact_->MarkObject(root->AsMirrorPtr());
+ }
+
+ private:
+ MarkCompact* const mark_compact_;
+};
+
+template <size_t kAlignment>
+size_t MarkCompact::LiveWordsBitmap<kAlignment>::LiveBytesInBitmapWord(size_t chunk_idx) const {
+ const size_t index = chunk_idx * kBitmapWordsPerVectorWord;
+ size_t words = 0;
+ for (uint32_t i = 0; i < kBitmapWordsPerVectorWord; i++) {
+ words += POPCOUNT(Bitmap::Begin()[index + i]);
+ }
+ return words * kAlignment;
+}
+
+void MarkCompact::UpdateLivenessInfo(mirror::Object* obj) {
+ DCHECK(obj != nullptr);
+ uintptr_t obj_begin = reinterpret_cast<uintptr_t>(obj);
+ size_t size = RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
+ uintptr_t bit_index = live_words_bitmap_->SetLiveWords(obj_begin, size);
+ size_t chunk_idx = (obj_begin - live_words_bitmap_->Begin()) / kOffsetChunkSize;
+ // Compute the bit-index within the chunk-info vector word.
+ bit_index %= kBitsPerVectorWord;
+ size_t first_chunk_portion = std::min(size, (kBitsPerVectorWord - bit_index) * kAlignment);
+
+ chunk_info_vec_[chunk_idx++] += first_chunk_portion;
+ DCHECK_LE(first_chunk_portion, size);
+ for (size -= first_chunk_portion; size > kOffsetChunkSize; size -= kOffsetChunkSize) {
+ DCHECK_EQ(chunk_info_vec_[chunk_idx], 0u);
+ chunk_info_vec_[chunk_idx++] = kOffsetChunkSize;
+ }
+ chunk_info_vec_[chunk_idx] += size;
+ freed_objects_--;
+}
+
+template <bool kUpdateLiveWords>
+void MarkCompact::ScanObject(mirror::Object* obj) {
+ RefFieldsVisitor visitor(this);
+ DCHECK(IsMarked(obj)) << "Scanning marked object " << obj << "\n" << heap_->DumpSpaces();
+ if (kUpdateLiveWords && moving_space_bitmap_->HasAddress(obj)) {
+ UpdateLivenessInfo(obj);
+ }
+ obj->VisitReferences(visitor, visitor);
+ RememberDexCaches(obj);
+}
+
+void MarkCompact::RememberDexCaches(mirror::Object* obj) {
+ if (obj->IsDexCache()) {
+ dex_caches_.insert(
+ mirror::CompressedReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
+ }
+}
+
+// Scan anything that's on the mark stack.
+void MarkCompact::ProcessMarkStack() {
+ TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ // TODO: try prefetch like in CMS
+ while (!mark_stack_->IsEmpty()) {
+ mirror::Object* obj = mark_stack_->PopBack();
+ DCHECK(obj != nullptr);
+ ScanObject</*kUpdateLiveWords*/ true>(obj);
+ }
+}
+
+void MarkCompact::ExpandMarkStack() {
+ const size_t new_size = mark_stack_->Capacity() * 2;
+ std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(),
+ mark_stack_->End());
+ mark_stack_->Resize(new_size);
+ for (auto& ref : temp) {
+ mark_stack_->PushBack(ref.AsMirrorPtr());
+ }
+ DCHECK(!mark_stack_->IsFull());
+}
+
+inline void MarkCompact::PushOnMarkStack(mirror::Object* obj) {
+ if (UNLIKELY(mark_stack_->IsFull())) {
+ ExpandMarkStack();
+ }
+ mark_stack_->PushBack(obj);
+}
+
+inline void MarkCompact::MarkObjectNonNull(mirror::Object* obj,
+ mirror::Object* holder,
+ MemberOffset offset) {
+ DCHECK(obj != nullptr);
+ if (MarkObjectNonNullNoPush</*kParallel*/false>(obj, holder, offset)) {
+ PushOnMarkStack(obj);
+ }
+}
+
+template <bool kParallel>
+inline bool MarkCompact::MarkObjectNonNullNoPush(mirror::Object* obj,
+ mirror::Object* holder,
+ MemberOffset offset) {
+ // We expect most of the referenes to be in bump-pointer space, so try that
+ // first to keep the cost of this function minimal.
+ if (LIKELY(moving_space_bitmap_->HasAddress(obj))) {
+ return kParallel ? !moving_space_bitmap_->AtomicTestAndSet(obj)
+ : !moving_space_bitmap_->Set(obj);
+ } else if (non_moving_space_bitmap_->HasAddress(obj)) {
+ return kParallel ? !non_moving_space_bitmap_->AtomicTestAndSet(obj)
+ : !non_moving_space_bitmap_->Set(obj);
+ } else if (immune_spaces_.ContainsObject(obj)) {
+ DCHECK(IsMarked(obj) != nullptr);
+ return false;
+ } else {
+ // Must be a large-object space, otherwise it's a case of heap corruption.
+ if (!IsAligned<kPageSize>(obj)) {
+ // Objects in large-object space are page aligned. So if we have an object
+ // which doesn't belong to any space and is not page-aligned as well, then
+ // it's memory corruption.
+ // TODO: implement protect/unprotect in bump-pointer space.
+ heap_->GetVerification()->LogHeapCorruption(holder, offset, obj, /*fatal*/ true);
+ }
+ DCHECK_NE(heap_->GetLargeObjectsSpace(), nullptr)
+ << "ref=" << obj
+ << " doesn't belong to any of the spaces and large object space doesn't exist";
+ accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+ DCHECK(los_bitmap->HasAddress(obj));
+ return kParallel ? !los_bitmap->AtomicTestAndSet(obj)
+ : !los_bitmap->Set(obj);
+ }
+}
+
+inline void MarkCompact::MarkObject(mirror::Object* obj,
+ mirror::Object* holder,
+ MemberOffset offset) {
+ if (obj != nullptr) {
+ MarkObjectNonNull(obj, holder, offset);
+ }
+}
+
+mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
+ MarkObject(obj, nullptr, MemberOffset(0));
+ return obj;
+}
+
+void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ MarkObject(obj->AsMirrorPtr(), nullptr, MemberOffset(0));
+}
+
+void MarkCompact::VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info) {
+ if (compacting_) {
+ for (size_t i = 0; i < count; ++i) {
+ UpdateRoot(roots[i], info);
+ }
+ } else {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObjectNonNull(*roots[i]);
+ }
+ }
+}
+
+void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) {
+ // TODO: do we need to check if the root is null or not?
+ if (compacting_) {
+ for (size_t i = 0; i < count; ++i) {
+ UpdateRoot(roots[i], info);
+ }
+ } else {
+ for (size_t i = 0; i < count; ++i) {
+ MarkObjectNonNull(roots[i]->AsMirrorPtr());
+ }
+ }
+}
+
+mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
+ if (moving_space_bitmap_->HasAddress(obj)) {
+ const bool is_black = reinterpret_cast<uint8_t*>(obj) >= black_allocations_begin_;
+ if (compacting_) {
+ if (is_black) {
+ return PostCompactBlackObjAddr(obj);
+ } else if (live_words_bitmap_->Test(obj)) {
+ return PostCompactOldObjAddr(obj);
+ } else {
+ return nullptr;
+ }
+ }
+ return (is_black || moving_space_bitmap_->Test(obj)) ? obj : nullptr;
+ } else if (non_moving_space_bitmap_->HasAddress(obj)) {
+ return non_moving_space_bitmap_->Test(obj) ? obj : nullptr;
+ } else if (immune_spaces_.ContainsObject(obj)) {
+ return obj;
+ } else {
+ DCHECK(heap_->GetLargeObjectsSpace())
+ << "ref=" << obj
+ << " doesn't belong to any of the spaces and large object space doesn't exist";
+ accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+ if (los_bitmap->HasAddress(obj)) {
+ DCHECK(IsAligned<kPageSize>(obj));
+ return los_bitmap->Test(obj) ? obj : nullptr;
+ } else {
+ // The given obj is not in any of the known spaces, so return null. This could
+ // happen for instance in interpreter caches wherein a concurrent updation
+ // to the cache could result in obj being a non-reference. This is
+ // tolerable because SweepInterpreterCaches only updates if the given
+ // object has moved, which can't be the case for the non-reference.
+ return nullptr;
+ }
+ }
+}
+
+bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update ATTRIBUTE_UNUSED) {
+ mirror::Object* ref = obj->AsMirrorPtr();
+ if (ref == nullptr) {
+ return true;
+ }
+ return IsMarked(ref);
+}
+
+// Process the 'referent' field in a java.lang.ref.Reference. If the referent
+// has not yet been marked, put it on the appropriate list in the heap for later
+// processing.
+void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> ref) {
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
+}
+
+void MarkCompact::FinishPhase() {
+ info_map_.MadviseDontNeedAndZero();
+ live_words_bitmap_->ClearBitmap();
+ from_space_map_.MadviseDontNeedAndZero();
+ if (UNLIKELY(Runtime::Current()->IsZygote() && uffd_ >= 0)) {
+ heap_->DeleteThreadPool();
+ close(uffd_);
+ uffd_ = -1;
+ uffd_initialized_ = false;
+ }
+ CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
+ mark_stack_->Reset();
+ updated_roots_.clear();
+ delete[] moving_pages_status_;
+ DCHECK_EQ(thread_running_gc_, Thread::Current());
+ ReaderMutexLock mu(thread_running_gc_, *Locks::mutator_lock_);
+ WriterMutexLock mu2(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ heap_->ClearMarkedObjects();
+}
+
+} // namespace collector
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
new file mode 100644
index 0000000000..cb7440ceff
--- /dev/null
+++ b/runtime/gc/collector/mark_compact.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+#define ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
+
+#include <memory>
+#include <unordered_set>
+
+#include "base/atomic.h"
+#include "barrier.h"
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "garbage_collector.h"
+#include "gc/accounting/atomic_stack.h"
+#include "gc/accounting/bitmap-inl.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc_root.h"
+#include "immune_spaces.h"
+#include "offsets.h"
+
+namespace art {
+
+namespace mirror {
+class DexCache;
+}
+
+namespace gc {
+
+class Heap;
+
+namespace space {
+class BumpPointerSpace;
+} // namespace space
+
+namespace collector {
+class MarkCompact : public GarbageCollector {
+ public:
+ static constexpr size_t kAlignment = kObjectAlignment;
+ // Fake file descriptor for fall back mode
+ static constexpr int kFallbackMode = -2;
+
+ explicit MarkCompact(Heap* heap);
+
+ ~MarkCompact() {}
+
+ void RunPhases() override REQUIRES(!Locks::mutator_lock_);
+
+ // Updated before (or in) pre-compaction pause and is accessed only in the
+ // pause or during concurrent compaction. The flag is reset after compaction
+ // is completed and never accessed by mutators. Therefore, safe to update
+ // without any memory ordering.
+ bool IsCompacting(Thread* self) const {
+ return compacting_ && self == thread_running_gc_;
+ }
+
+ GcType GetGcType() const override {
+ return kGcTypeFull;
+ }
+
+ CollectorType GetCollectorType() const override {
+ return kCollectorTypeCMC;
+ }
+
+ Barrier& GetBarrier() {
+ return gc_barrier_;
+ }
+
+ mirror::Object* MarkObject(mirror::Object* obj) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ void VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
+ bool do_atomic_update) override
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ void RevokeAllThreadLocalBuffers() override;
+
+ void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
+ ObjPtr<mirror::Reference> reference) override
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ mirror::Object* IsMarked(mirror::Object* obj) override
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ // Perform GC-root updation and heap protection so that during the concurrent
+ // compaction phase we can receive faults and compact the corresponding pages
+ // on the fly. This is performed in a STW pause.
+ void CompactionPause() REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_);
+
+ mirror::Object* GetFromSpaceAddrFromBarrier(mirror::Object* old_ref) {
+ CHECK(compacting_);
+ if (live_words_bitmap_->HasAddress(old_ref)) {
+ return GetFromSpaceAddr(old_ref);
+ }
+ return old_ref;
+ }
+ // Called from Heap::PostForkChildAction() for non-zygote processes and from
+ // PrepareForCompaction() for zygote processes. Returns true if uffd was
+ // created or was already done.
+ bool CreateUserfaultfd(bool post_fork);
+
+ private:
+ using ObjReference = mirror::ObjectReference</*kPoisonReferences*/ false, mirror::Object>;
+ // Number of bits (live-words) covered by a single chunk-info (below)
+ // entry/word.
+ // TODO: Since popcount is performed usomg SIMD instructions, we should
+ // consider using 128-bit in order to halve the chunk-info size.
+ static constexpr uint32_t kBitsPerVectorWord = kBitsPerIntPtrT;
+ static constexpr uint32_t kOffsetChunkSize = kBitsPerVectorWord * kAlignment;
+ static_assert(kOffsetChunkSize < kPageSize);
+ // Bitmap with bits corresponding to every live word set. For an object
+ // which is 4 words in size will have the corresponding 4 bits set. This is
+ // required for efficient computation of new-address (post-compaction) from
+ // the given old-address (pre-compaction).
+ template <size_t kAlignment>
+ class LiveWordsBitmap : private accounting::MemoryRangeBitmap<kAlignment> {
+ using Bitmap = accounting::Bitmap;
+ using MemRangeBitmap = accounting::MemoryRangeBitmap<kAlignment>;
+
+ public:
+ static_assert(IsPowerOfTwo(kBitsPerVectorWord));
+ static_assert(IsPowerOfTwo(Bitmap::kBitsPerBitmapWord));
+ static_assert(kBitsPerVectorWord >= Bitmap::kBitsPerBitmapWord);
+ static constexpr uint32_t kBitmapWordsPerVectorWord =
+ kBitsPerVectorWord / Bitmap::kBitsPerBitmapWord;
+ static_assert(IsPowerOfTwo(kBitmapWordsPerVectorWord));
+ static LiveWordsBitmap* Create(uintptr_t begin, uintptr_t end);
+
+ // Return offset (within the indexed chunk-info) of the nth live word.
+ uint32_t FindNthLiveWordOffset(size_t chunk_idx, uint32_t n) const;
+ // Sets all bits in the bitmap corresponding to the given range. Also
+ // returns the bit-index of the first word.
+ ALWAYS_INLINE uintptr_t SetLiveWords(uintptr_t begin, size_t size);
+ // Count number of live words upto the given bit-index. This is to be used
+ // to compute the post-compact address of an old reference.
+ ALWAYS_INLINE size_t CountLiveWordsUpto(size_t bit_idx) const;
+ // Call 'visitor' for every stride of contiguous marked bits in the live-words
+ // bitmap, starting from begin_bit_idx. Only visit 'bytes' live bytes or
+ // until 'end', whichever comes first.
+ // Visitor is called with index of the first marked bit in the stride,
+ // stride size and whether it's the last stride in the given range or not.
+ template <typename Visitor>
+ ALWAYS_INLINE void VisitLiveStrides(uintptr_t begin_bit_idx,
+ uint8_t* end,
+ const size_t bytes,
+ Visitor&& visitor) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Count the number of live bytes in the given vector entry.
+ size_t LiveBytesInBitmapWord(size_t chunk_idx) const;
+ void ClearBitmap() { Bitmap::Clear(); }
+ ALWAYS_INLINE uintptr_t Begin() const { return MemRangeBitmap::CoverBegin(); }
+ ALWAYS_INLINE bool HasAddress(mirror::Object* obj) const {
+ return MemRangeBitmap::HasAddress(reinterpret_cast<uintptr_t>(obj));
+ }
+ ALWAYS_INLINE bool Test(uintptr_t bit_index) const {
+ return Bitmap::TestBit(bit_index);
+ }
+ ALWAYS_INLINE bool Test(mirror::Object* obj) const {
+ return MemRangeBitmap::Test(reinterpret_cast<uintptr_t>(obj));
+ }
+ ALWAYS_INLINE uintptr_t GetWord(size_t index) const {
+ static_assert(kBitmapWordsPerVectorWord == 1);
+ return Bitmap::Begin()[index * kBitmapWordsPerVectorWord];
+ }
+ };
+
+ // For a given object address in pre-compact space, return the corresponding
+ // address in the from-space, where heap pages are relocated in the compaction
+ // pause.
+ mirror::Object* GetFromSpaceAddr(mirror::Object* obj) const {
+ DCHECK(live_words_bitmap_->HasAddress(obj)) << " obj=" << obj;
+ return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(obj)
+ + from_space_slide_diff_);
+ }
+
+ // Verifies that that given object reference refers to a valid object.
+ // Otherwise fataly dumps logs, including those from callback.
+ template <typename Callback>
+ void VerifyObject(mirror::Object* ref, Callback& callback) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Check if the obj is within heap and has a klass which is likely to be valid
+ // mirror::Class.
+ bool IsValidObject(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+ void InitializePhase();
+ void FinishPhase() REQUIRES(!Locks::mutator_lock_, !Locks::heap_bitmap_lock_);
+ void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+ void CompactionPhase() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void SweepSystemWeaks(Thread* self, Runtime* runtime, const bool paused)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+ // Update the reference at given offset in the given object with post-compact
+ // address.
+ ALWAYS_INLINE void UpdateRef(mirror::Object* obj, MemberOffset offset)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Verify that the gc-root is updated only once. Returns false if the update
+ // shouldn't be done.
+ ALWAYS_INLINE bool VerifyRootSingleUpdate(void* root,
+ mirror::Object* old_ref,
+ const RootInfo& info)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Update the given root with post-compact address.
+ ALWAYS_INLINE void UpdateRoot(mirror::CompressedReference<mirror::Object>* root,
+ const RootInfo& info = RootInfo(RootType::kRootUnknown))
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE void UpdateRoot(mirror::Object** root,
+ const RootInfo& info = RootInfo(RootType::kRootUnknown))
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Given the pre-compact address, the function returns the post-compact
+ // address of the given object.
+ ALWAYS_INLINE mirror::Object* PostCompactAddress(mirror::Object* old_ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Compute post-compact address of an object in moving space. This function
+ // assumes that old_ref is in moving space.
+ ALWAYS_INLINE mirror::Object* PostCompactAddressUnchecked(mirror::Object* old_ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Compute the new address for an object which was allocated prior to starting
+ // this GC cycle.
+ ALWAYS_INLINE mirror::Object* PostCompactOldObjAddr(mirror::Object* old_ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Compute the new address for an object which was black allocated during this
+ // GC cycle.
+ ALWAYS_INLINE mirror::Object* PostCompactBlackObjAddr(mirror::Object* old_ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Identify immune spaces and reset card-table, mod-union-table, and mark
+ // bitmaps.
+ void BindAndResetBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ // Perform one last round of marking, identifying roots from dirty cards
+ // during a stop-the-world (STW) pause.
+ void MarkingPause() REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_);
+ // Perform stop-the-world pause prior to concurrent compaction.
+ // Updates GC-roots and protects heap so that during the concurrent
+ // compaction phase we can receive faults and compact the corresponding pages
+ // on the fly.
+ void PreCompactionPhase() REQUIRES(Locks::mutator_lock_);
+ // Compute offsets (in chunk_info_vec_) and other data structures required
+ // during concurrent compaction.
+ void PrepareForCompaction() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Copy kPageSize live bytes starting from 'offset' (within the moving space),
+ // which must be within 'obj', into the kPageSize sized memory pointed by 'addr'.
+ // Then update the references within the copied objects. The boundary objects are
+ // partially updated such that only the references that lie in the page are updated.
+ // This is necessary to avoid cascading userfaults.
+ void CompactPage(mirror::Object* obj, uint32_t offset, uint8_t* addr)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Compact the bump-pointer space. Pass page that should be used as buffer for
+ // userfaultfd.
+ template <bool kFallback>
+ void CompactMovingSpace(uint8_t* page = nullptr) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Update all the objects in the given non-moving space page. 'first' object
+ // could have started in some preceding page.
+ void UpdateNonMovingPage(mirror::Object* first, uint8_t* page)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Update all the references in the non-moving space.
+ void UpdateNonMovingSpace() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // For all the pages in non-moving space, find the first object that overlaps
+ // with the pages' start address, and store in first_objs_non_moving_space_ array.
+ void InitNonMovingSpaceFirstObjects() REQUIRES_SHARED(Locks::mutator_lock_);
+ // In addition to the first-objects for every post-compact moving space page,
+ // also find offsets within those objects from where the contents should be
+ // copied to the page. The offsets are relative to the moving-space's
+ // beginning. Store the computed first-object and offset in first_objs_moving_space_
+ // and pre_compact_offset_moving_space_ respectively.
+ void InitMovingSpaceFirstObjects(const size_t vec_len) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Gather the info related to black allocations from bump-pointer space to
+ // enable concurrent sliding of these pages.
+ void UpdateMovingSpaceBlackAllocations() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Update first-object info from allocation-stack for non-moving space black
+ // allocations.
+ void UpdateNonMovingSpaceBlackAllocations() REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+
+ // Slides (retain the empty holes, which are usually part of some in-use TLAB)
+ // black page in the moving space. 'first_obj' is the object that overlaps with
+ // the first byte of the page being slid. pre_compact_page is the pre-compact
+ // address of the page being slid. 'page_idx' is used to fetch the first
+ // allocated chunk's size and next page's first_obj. 'dest' is the kPageSize
+ // sized memory where the contents would be copied.
+ void SlideBlackPage(mirror::Object* first_obj,
+ const size_t page_idx,
+ uint8_t* const pre_compact_page,
+ uint8_t* dest)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Perform reference-processing and the likes before sweeping the non-movable
+ // spaces.
+ void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+
+ // Mark GC-roots (except from immune spaces and thread-stacks) during a STW pause.
+ void ReMarkRoots(Runtime* runtime) REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ // Concurrently mark GC-roots, except from immune spaces.
+ void MarkRoots(VisitRootFlags flags) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Collect thread stack roots via a checkpoint.
+ void MarkRootsCheckpoint(Thread* self, Runtime* runtime) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Second round of concurrent marking. Mark all gray objects that got dirtied
+ // since the first round.
+ void PreCleanCards() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+
+ void MarkNonThreadRoots(Runtime* runtime) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ void MarkConcurrentRoots(VisitRootFlags flags, Runtime* runtime)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+
+ // Traverse through the reachable objects and mark them.
+ void MarkReachableObjects() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Scan (only) immune spaces looking for references into the garbage collected
+ // spaces.
+ void UpdateAndMarkModUnion() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Scan mod-union and card tables, covering all the spaces, to identify dirty objects.
+ // These are in 'minimum age' cards, which is 'kCardAged' in case of concurrent (second round)
+ // marking and kCardDirty during the STW pause.
+ void ScanDirtyObjects(bool paused, uint8_t minimum_age) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Recursively mark dirty objects. Invoked both concurrently as well in a STW
+ // pause in PausePhase().
+ void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Go through all the objects in the mark-stack until it's empty.
+ void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ void ExpandMarkStack() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ // Scan object for references. If kUpdateLivewords is true then set bits in
+ // the live-words bitmap and add size to chunk-info.
+ template <bool kUpdateLiveWords>
+ void ScanObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ // Push objects to the mark-stack right after successfully marking objects.
+ void PushOnMarkStack(mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ // Update the live-words bitmap as well as add the object size to the
+ // chunk-info vector. Both are required for computation of post-compact addresses.
+ // Also updates freed_objects_ counter.
+ void UpdateLivenessInfo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void ProcessReferences(Thread* self)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!Locks::heap_bitmap_lock_);
+
+ void MarkObjectNonNull(mirror::Object* obj,
+ mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ template <bool kParallel>
+ bool MarkObjectNonNullNoPush(mirror::Object* obj,
+ mirror::Object* holder = nullptr,
+ MemberOffset offset = MemberOffset(0))
+ REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ void Sweep(bool swap_bitmaps) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+ void SweepLargeObjects(bool swap_bitmaps) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(Locks::heap_bitmap_lock_);
+
+ // Store all the dex-cache objects visited during marking phase.
+ // This is required during compaction phase to ensure that we don't miss any
+ // of them from visiting (to update references). Somehow, iterating over
+ // class-tables to fetch these misses some of them, leading to memory
+ // corruption.
+ // TODO: once we implement concurrent compaction of classes and dex-caches,
+ // which will visit all of them, we should remove this.
+ void RememberDexCaches(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Perform all kernel operations required for concurrent compaction. Includes
+ // mremap to move pre-compact pages to from-space, followed by userfaultfd
+ // registration on the moving space.
+ void KernelPreparation();
+ // Called by thread-pool workers to read uffd_ and process fault events.
+ void ConcurrentCompaction(uint8_t* page) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ enum PageState : uint8_t {
+ kUncompacted = 0, // The page has not been compacted yet
+ kCompacting // Some thread (GC or mutator) is compacting the page
+ };
+
+ // Buffers, one per worker thread + gc-thread, to be used when
+ // kObjPtrPoisoning == true as in that case we can't have the buffer on the
+ // stack. The first page of the buffer is assigned to
+ // conc_compaction_termination_page_. A read access to this page signals
+ // termination of concurrent compaction by making worker threads terminate the
+ // userfaultfd read loop.
+ MemMap compaction_buffers_map_;
+ // For checkpoints
+ Barrier gc_barrier_;
+ // Every object inside the immune spaces is assumed to be marked.
+ ImmuneSpaces immune_spaces_;
+ // Required only when mark-stack is accessed in shared mode, which happens
+ // when collecting thread-stack roots using checkpoint.
+ Mutex mark_stack_lock_;
+ accounting::ObjectStack* mark_stack_;
+ // Special bitmap wherein all the bits corresponding to an object are set.
+ // TODO: make LiveWordsBitmap encapsulated in this class rather than a
+ // pointer. We tend to access its members in performance-sensitive
+ // code-path. Also, use a single MemMap for all the GC's data structures,
+ // which we will clear in the end. This would help in limiting the number of
+ // VMAs that get created in the kernel.
+ std::unique_ptr<LiveWordsBitmap<kAlignment>> live_words_bitmap_;
+ // Track GC-roots updated so far in a GC-cycle. This is to confirm that no
+ // GC-root is updated twice.
+ // TODO: Must be replaced with an efficient mechanism eventually. Or ensure
+ // that double updation doesn't happen in the first place.
+ std::unordered_set<void*> updated_roots_;
+ // Set of dex-caches visited during marking. See comment above
+ // RememberDexCaches() for the explanation.
+ std::unordered_set<uint32_t> dex_caches_;
+ MemMap from_space_map_;
+ // Any array of live-bytes in logical chunks of kOffsetChunkSize size
+ // in the 'to-be-compacted' space.
+ MemMap info_map_;
+ // The main space bitmap
+ accounting::ContinuousSpaceBitmap* moving_space_bitmap_;
+ accounting::ContinuousSpaceBitmap* non_moving_space_bitmap_;
+ space::ContinuousSpace* non_moving_space_;
+ space::BumpPointerSpace* const bump_pointer_space_;
+ Thread* thread_running_gc_;
+ // Array of pages' compaction status.
+ Atomic<PageState>* moving_pages_status_;
+ size_t vector_length_;
+ size_t live_stack_freeze_size_;
+
+ // For every page in the to-space (post-compact heap) we need to know the
+ // first object from which we must compact and/or update references. This is
+ // for both non-moving and moving space. Additionally, for the moving-space,
+ // we also need the offset within the object from where we need to start
+ // copying.
+ // chunk_info_vec_ holds live bytes for chunks during marking phase. After
+ // marking we perform an exclusive scan to compute offset for every chunk.
+ uint32_t* chunk_info_vec_;
+ // For pages before black allocations, pre_compact_offset_moving_space_[i]
+ // holds offset within the space from where the objects need to be copied in
+ // the ith post-compact page.
+ // Otherwise, black_alloc_pages_first_chunk_size_[i] holds the size of first
+ // non-empty chunk in the ith black-allocations page.
+ union {
+ uint32_t* pre_compact_offset_moving_space_;
+ uint32_t* black_alloc_pages_first_chunk_size_;
+ };
+ // first_objs_moving_space_[i] is the pre-compact address of the object which
+ // would overlap with the starting boundary of the ith post-compact page.
+ ObjReference* first_objs_moving_space_;
+ // First object for every page. It could be greater than the page's start
+ // address, or null if the page is empty.
+ ObjReference* first_objs_non_moving_space_;
+ size_t non_moving_first_objs_count_;
+ // Length of first_objs_moving_space_ and pre_compact_offset_moving_space_
+ // arrays. Also the number of pages which are to be compacted.
+ size_t moving_first_objs_count_;
+ // Number of pages containing black-allocated objects, indicating number of
+ // pages to be slid.
+ size_t black_page_count_;
+
+ uint8_t* from_space_begin_;
+ // moving-space's end pointer at the marking pause. All allocations beyond
+ // this will be considered black in the current GC cycle. Aligned up to page
+ // size.
+ uint8_t* black_allocations_begin_;
+ // End of compacted space. Use for computing post-compact addr of black
+ // allocated objects. Aligned up to page size.
+ uint8_t* post_compact_end_;
+ // Cache (black_allocations_begin_ - post_compact_end_) for post-compact
+ // address computations.
+ ptrdiff_t black_objs_slide_diff_;
+ // Cache (from_space_begin_ - bump_pointer_space_->Begin()) so that we can
+ // compute from-space address of a given pre-comapct addr efficiently.
+ ptrdiff_t from_space_slide_diff_;
+
+ // TODO: Remove once an efficient mechanism to deal with double root updation
+ // is incorporated.
+ void* stack_addr_;
+ void* stack_end_;
+
+ uint8_t* conc_compaction_termination_page_;
+ // Number of objects freed during this GC in moving space. It is decremented
+ // every time an object is discovered. And total-object count is added to it
+ // in MarkingPause(). It reaches the correct count only once the marking phase
+ // is completed.
+ int32_t freed_objects_;
+ // Userfault file descriptor, accessed only by the GC itself.
+ // kFallbackMode value indicates that we are in the fallback mode.
+ int uffd_;
+ // Used to exit from compaction loop at the end of concurrent compaction
+ uint8_t thread_pool_counter_;
+ // True while compacting.
+ bool compacting_;
+ // Flag indicating whether one-time uffd initialization has been done. It will
+ // be false on the first GC for non-zygote processes, and always for zygote.
+ // Its purpose is to minimize the userfaultfd overhead to the minimal in
+ // Heap::PostForkChildAction() as it's invoked in app startup path. With
+ // this, we register the compaction-termination page on the first GC.
+ bool uffd_initialized_;
+
+ class VerifyRootMarkedVisitor;
+ class ScanObjectVisitor;
+ class CheckpointMarkThreadRoots;
+ template<size_t kBufferSize> class ThreadRootsVisitor;
+ class CardModifiedVisitor;
+ class RefFieldsVisitor;
+ template <bool kCheckBegin, bool kCheckEnd> class RefsUpdateVisitor;
+ class NativeRootsUpdateVisitor;
+ class ImmuneSpaceUpdateObjVisitor;
+ class ConcurrentCompactionGcTask;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
+};
+
+} // namespace collector
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_COLLECTOR_MARK_COMPACT_H_
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 9c9996458c..8fdb524f8e 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -30,6 +30,8 @@ enum CollectorType {
kCollectorTypeMS,
// Concurrent mark-sweep.
kCollectorTypeCMS,
+ // Concurrent mark-compact.
+ kCollectorTypeCMC,
// Semi-space / mark-sweep hybrid, enables compaction.
kCollectorTypeSS,
// Heap trimming collector, doesn't do any actual collecting.
@@ -63,11 +65,11 @@ enum CollectorType {
std::ostream& operator<<(std::ostream& os, CollectorType collector_type);
static constexpr CollectorType kCollectorTypeDefault =
-#if ART_DEFAULT_GC_TYPE_IS_CMS
- kCollectorTypeCMS
+#if ART_DEFAULT_GC_TYPE_IS_CMC
+ kCollectorTypeCMC
#elif ART_DEFAULT_GC_TYPE_IS_SS
kCollectorTypeSS
-#else
+#elif ART_DEFAULT_GC_TYPE_IS_CMS
kCollectorTypeCMS
#error "ART default GC type must be set"
#endif
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9e1524e657..9c76060062 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -209,10 +209,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
// IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for the
// BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
- // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant
- // since the allocator_type should be constant propagated.
- if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()
- && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+ // optimized out.
+ if (IsGcConcurrent() && UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
need_gc = true;
}
GetMetrics()->TotalBytesAllocated()->Add(bytes_tl_bulk_allocated);
@@ -442,7 +440,7 @@ inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_co
return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
}
-inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
+inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type ATTRIBUTE_UNUSED,
size_t alloc_size,
bool grow) {
size_t old_target = target_footprint_.load(std::memory_order_relaxed);
@@ -457,7 +455,7 @@ inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
return true;
}
// We are between target_footprint_ and growth_limit_ .
- if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+ if (IsGcConcurrent()) {
return false;
} else {
if (grow) {
diff --git a/runtime/gc/heap-visit-objects-inl.h b/runtime/gc/heap-visit-objects-inl.h
index e20d981fa3..a235c44033 100644
--- a/runtime/gc/heap-visit-objects-inl.h
+++ b/runtime/gc/heap-visit-objects-inl.h
@@ -118,7 +118,7 @@ inline void Heap::VisitObjectsInternal(Visitor&& visitor) {
// For speed reasons, only perform it when Rosalloc could possibly be used.
// (Disabled for read barriers because it never uses Rosalloc).
// (See the DCHECK in RosAllocSpace constructor).
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
// Rosalloc has a race in allocation. Objects can be written into the allocation
// stack before their header writes are visible to this thread.
// See b/28790624 for more details.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8407ba4376..a8195a393f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -21,10 +21,6 @@
#if defined(__BIONIC__) || defined(__GLIBC__)
#include <malloc.h> // For mallinfo()
#endif
-#if defined(__BIONIC__) && defined(ART_TARGET)
-#include <linux/userfaultfd.h>
-#include <sys/ioctl.h>
-#endif
#include <memory>
#include <random>
#include <unistd.h>
@@ -61,6 +57,7 @@
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/concurrent_copying.h"
+#include "gc/collector/mark_compact.h"
#include "gc/collector/mark_sweep.h"
#include "gc/collector/partial_mark_sweep.h"
#include "gc/collector/semi_space.h"
@@ -410,7 +407,6 @@ Heap::Heap(size_t initial_size,
backtrace_lock_(nullptr),
seen_backtrace_count_(0u),
unique_backtrace_count_(0u),
- uffd_(-1),
gc_disabled_for_shutdown_(false),
dump_region_info_before_gc_(dump_region_info_before_gc),
dump_region_info_after_gc_(dump_region_info_after_gc),
@@ -421,7 +417,7 @@ Heap::Heap(size_t initial_size,
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
} else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
@@ -448,7 +444,8 @@ Heap::Heap(size_t initial_size,
mark_bitmap_.reset(new accounting::HeapBitmap(this));
// We don't have hspace compaction enabled with CC.
- if (foreground_collector_type_ == kCollectorTypeCC) {
+ if (foreground_collector_type_ == kCollectorTypeCC
+ || foreground_collector_type_ == kCollectorTypeCMC) {
use_homogeneous_space_compaction_for_oom_ = false;
}
bool support_homogeneous_space_compaction =
@@ -629,10 +626,14 @@ Heap::Heap(size_t initial_size,
std::move(main_mem_map_1));
CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
AddSpace(bump_pointer_space_);
- temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
- std::move(main_mem_map_2));
- CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
- AddSpace(temp_space_);
+ // For Concurrent Mark-compact GC we don't need the temp space to be in
+ // lower 4GB. So its temp space will be created by the GC itself.
+ if (foreground_collector_type_ != kCollectorTypeCMC) {
+ temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
+ std::move(main_mem_map_2));
+ CHECK(temp_space_ != nullptr) << "Failed to create bump pointer space";
+ AddSpace(temp_space_);
+ }
CHECK(separate_non_moving_space);
} else {
CreateMainMallocSpace(std::move(main_mem_map_1), initial_size, growth_limit_, capacity_);
@@ -758,6 +759,10 @@ Heap::Heap(size_t initial_size,
semi_space_collector_ = new collector::SemiSpace(this);
garbage_collectors_.push_back(semi_space_collector_);
}
+ if (MayUseCollector(kCollectorTypeCMC)) {
+ mark_compact_ = new collector::MarkCompact(this);
+ garbage_collectors_.push_back(mark_compact_);
+ }
if (MayUseCollector(kCollectorTypeCC)) {
concurrent_copying_collector_ = new collector::ConcurrentCopying(this,
/*young_gen=*/false,
@@ -963,7 +968,6 @@ void Heap::DecrementDisableMovingGC(Thread* self) {
void Heap::IncrementDisableThreadFlip(Thread* self) {
// Supposed to be called by mutators. If thread_flip_running_ is true, block. Otherwise, go ahead.
- CHECK(kUseReadBarrier);
bool is_nested = self->GetDisableThreadFlipCount() > 0;
self->IncrementDisableThreadFlipCount();
if (is_nested) {
@@ -994,10 +998,26 @@ void Heap::IncrementDisableThreadFlip(Thread* self) {
}
}
+void Heap::EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) {
+ if (gUseUserfaultfd) {
+ // Use volatile to ensure that compiler loads from memory to trigger userfaults, if required.
+ volatile uint8_t volatile_sum;
+ volatile uint8_t* start = reinterpret_cast<volatile uint8_t*>(obj.Ptr());
+ volatile uint8_t* end = AlignUp(start + obj->SizeOf(), kPageSize);
+ uint8_t sum = 0;
+ // The first page is already touched by SizeOf().
+ start += kPageSize;
+ while (start < end) {
+ sum += *start;
+ start += kPageSize;
+ }
+ volatile_sum = sum;
+ }
+}
+
void Heap::DecrementDisableThreadFlip(Thread* self) {
// Supposed to be called by mutators. Decrement disable_thread_flip_count_ and potentially wake up
// the GC waiting before doing a thread flip.
- CHECK(kUseReadBarrier);
self->DecrementDisableThreadFlipCount();
bool is_outermost = self->GetDisableThreadFlipCount() == 0;
if (!is_outermost) {
@@ -1017,7 +1037,6 @@ void Heap::DecrementDisableThreadFlip(Thread* self) {
void Heap::ThreadFlipBegin(Thread* self) {
// Supposed to be called by GC. Set thread_flip_running_ to be true. If disable_thread_flip_count_
// > 0, block. Otherwise, go ahead.
- CHECK(kUseReadBarrier);
ScopedThreadStateChange tsc(self, ThreadState::kWaitingForGcThreadFlip);
MutexLock mu(self, *thread_flip_lock_);
thread_flip_cond_->CheckSafeToWait(self);
@@ -1043,7 +1062,6 @@ void Heap::ThreadFlipBegin(Thread* self) {
void Heap::ThreadFlipEnd(Thread* self) {
// Supposed to be called by GC. Set thread_flip_running_ to false and potentially wake up mutators
// waiting before doing a JNI critical.
- CHECK(kUseReadBarrier);
MutexLock mu(self, *thread_flip_lock_);
CHECK(thread_flip_running_);
thread_flip_running_ = false;
@@ -1083,13 +1101,23 @@ void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_p
}
}
-void Heap::CreateThreadPool() {
- const size_t num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
+void Heap::CreateThreadPool(size_t num_threads) {
+ if (num_threads == 0) {
+ num_threads = std::max(parallel_gc_threads_, conc_gc_threads_);
+ }
if (num_threads != 0) {
thread_pool_.reset(new ThreadPool("Heap thread pool", num_threads));
}
}
+void Heap::WaitForWorkersToBeCreated() {
+ DCHECK(!Runtime::Current()->IsShuttingDown(Thread::Current()))
+ << "Cannot create new threads during runtime shutdown";
+ if (thread_pool_ != nullptr) {
+ thread_pool_->WaitForWorkersToBeCreated();
+ }
+}
+
void Heap::MarkAllocStackAsLive(accounting::ObjectStack* stack) {
space::ContinuousSpace* space1 = main_space_ != nullptr ? main_space_ : non_moving_space_;
space::ContinuousSpace* space2 = non_moving_space_;
@@ -1505,7 +1533,7 @@ void Heap::DoPendingCollectorTransition() {
VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
}
} else if (desired_collector_type == kCollectorTypeCCBackground) {
- DCHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
if (!CareAboutPauseTimes()) {
// Invoke CC full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
@@ -2199,6 +2227,15 @@ void Heap::ChangeCollector(CollectorType collector_type) {
}
break;
}
+ case kCollectorTypeCMC: {
+ gc_plan_.push_back(collector::kGcTypeFull);
+ if (use_tlab_) {
+ ChangeAllocator(kAllocatorTypeTLAB);
+ } else {
+ ChangeAllocator(kAllocatorTypeBumpPointer);
+ }
+ break;
+ }
case kCollectorTypeSS: {
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
@@ -2368,10 +2405,6 @@ void Heap::PreZygoteFork() {
}
// We need to close userfaultfd fd for app/webview zygotes to avoid getattr
// (stat) on the fd during fork.
- if (uffd_ >= 0) {
- close(uffd_);
- uffd_ = -1;
- }
Thread* self = Thread::Current();
MutexLock mu(self, zygote_creation_lock_);
// Try to see if we have any Zygote spaces.
@@ -2710,6 +2743,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
semi_space_collector_->SetSwapSemiSpaces(true);
collector = semi_space_collector_;
break;
+ case kCollectorTypeCMC:
+ collector = mark_compact_;
+ break;
case kCollectorTypeCC:
collector::ConcurrentCopying* active_cc_collector;
if (use_generational_cc_) {
@@ -2728,7 +2764,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
default:
LOG(FATAL) << "Invalid collector type " << static_cast<size_t>(collector_type_);
}
- if (collector != active_concurrent_copying_collector_.load(std::memory_order_relaxed)) {
+ // temp_space_ will be null for kCollectorTypeCMC.
+ if (temp_space_ != nullptr
+ && collector != active_concurrent_copying_collector_.load(std::memory_order_relaxed)) {
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
if (kIsDebugBuild) {
// Try to read each page of the memory map in case mprotect didn't work properly b/19894268.
@@ -3829,70 +3867,6 @@ bool Heap::RequestConcurrentGC(Thread* self,
return true; // Vacuously.
}
-#if defined(__BIONIC__) && defined(ART_TARGET)
-void Heap::MaybePerformUffdIoctls(GcCause cause, uint32_t requested_gc_num) const {
- if (uffd_ >= 0
- && cause == kGcCauseBackground
- && (requested_gc_num < 5 || requested_gc_num % 5 == 0)) {
- // Attempt to use all userfaultfd ioctls that we intend to use.
- // Register ioctl
- {
- struct uffdio_register uffd_register;
- uffd_register.range.start = 0;
- uffd_register.range.len = 0;
- uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
- int ret = ioctl(uffd_, UFFDIO_REGISTER, &uffd_register);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- // Copy ioctl
- {
- struct uffdio_copy uffd_copy = {.src = 0, .dst = 0, .len = 0, .mode = 0};
- int ret = ioctl(uffd_, UFFDIO_COPY, &uffd_copy);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- // Zeropage ioctl
- {
- struct uffdio_zeropage uffd_zeropage;
- uffd_zeropage.range.start = 0;
- uffd_zeropage.range.len = 0;
- uffd_zeropage.mode = 0;
- int ret = ioctl(uffd_, UFFDIO_ZEROPAGE, &uffd_zeropage);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- // Continue ioctl
- {
- struct uffdio_continue uffd_continue;
- uffd_continue.range.start = 0;
- uffd_continue.range.len = 0;
- uffd_continue.mode = 0;
- int ret = ioctl(uffd_, UFFDIO_CONTINUE, &uffd_continue);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- // Wake ioctl
- {
- struct uffdio_range uffd_range = {.start = 0, .len = 0};
- int ret = ioctl(uffd_, UFFDIO_WAKE, &uffd_range);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- // Unregister ioctl
- {
- struct uffdio_range uffd_range = {.start = 0, .len = 0};
- int ret = ioctl(uffd_, UFFDIO_UNREGISTER, &uffd_range);
- CHECK_EQ(ret, -1);
- CHECK_EQ(errno, EINVAL);
- }
- }
-}
-#else
-void Heap::MaybePerformUffdIoctls(GcCause cause ATTRIBUTE_UNUSED,
- uint32_t requested_gc_num ATTRIBUTE_UNUSED) const {}
-#endif
-
void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t requested_gc_num) {
if (!Runtime::Current()->IsShuttingDown(self)) {
// Wait for any GCs currently running to finish. If this incremented GC number, we're done.
@@ -3919,12 +3893,9 @@ void Heap::ConcurrentGC(Thread* self, GcCause cause, bool force_full, uint32_t r
if (gc_type > next_gc_type &&
CollectGarbageInternal(gc_type, cause, false, requested_gc_num)
!= collector::kGcTypeNone) {
- MaybePerformUffdIoctls(cause, requested_gc_num);
break;
}
}
- } else {
- MaybePerformUffdIoctls(cause, requested_gc_num);
}
}
}
@@ -4280,7 +4251,7 @@ void Heap::SweepAllocationRecords(IsMarkedVisitor* visitor) const {
}
void Heap::AllowNewAllocationRecords() const {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
AllocRecordObjectMap* allocation_records = GetAllocationRecords();
if (allocation_records != nullptr) {
@@ -4289,7 +4260,7 @@ void Heap::AllowNewAllocationRecords() const {
}
void Heap::DisallowNewAllocationRecords() const {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), *Locks::alloc_tracker_lock_);
AllocRecordObjectMap* allocation_records = GetAllocationRecords();
if (allocation_records != nullptr) {
@@ -4412,12 +4383,15 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) {
}
void Heap::DisableGCForShutdown() {
- Thread* const self = Thread::Current();
- CHECK(Runtime::Current()->IsShuttingDown(self));
- MutexLock mu(self, *gc_complete_lock_);
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
gc_disabled_for_shutdown_ = true;
}
+bool Heap::IsGCDisabledForShutdown() const {
+ MutexLock mu(Thread::Current(), *gc_complete_lock_);
+ return gc_disabled_for_shutdown_;
+}
+
bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
any_of(boot_image_spaces_.begin(),
@@ -4494,8 +4468,13 @@ mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
DCHECK_LE(alloc_size, self->TlabSize());
} else if (allocator_type == kAllocatorTypeTLAB) {
DCHECK(bump_pointer_space_ != nullptr);
+ // Try to allocate a page-aligned TLAB (not necessary though).
+ // TODO: for large allocations, which are rare, maybe we should allocate
+ // that object and return. There is no need to revoke the current TLAB,
+ // particularly if it's mostly unutilized.
+ size_t def_pr_tlab_size = RoundDown(alloc_size + kDefaultTLABSize, kPageSize) - alloc_size;
size_t next_tlab_size = JHPCalculateNextTlabSize(self,
- kDefaultTLABSize,
+ def_pr_tlab_size,
alloc_size,
&take_sample,
&bytes_until_sample);
@@ -4658,18 +4637,10 @@ void Heap::PostForkChildAction(Thread* self) {
uint64_t last_adj_time = NanoTime();
next_gc_type_ = NonStickyGcType(); // Always start with a full gc.
-#if defined(__BIONIC__) && defined(ART_TARGET)
- uffd_ = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
- if (uffd_ >= 0) {
- struct uffdio_api api = {.api = UFFD_API, .features = 0};
- int ret = ioctl(uffd_, UFFDIO_API, &api);
- CHECK_EQ(ret, 0) << "ioctl_userfaultfd: API: " << strerror(errno);
- } else {
- // The syscall should fail only if it doesn't exist in the kernel or if it's
- // denied by SELinux.
- CHECK(errno == ENOSYS || errno == EACCES) << "userfaultfd: " << strerror(errno);
+ if (gUseUserfaultfd) {
+ DCHECK_NE(mark_compact_, nullptr);
+ mark_compact_->CreateUserfaultfd(/*post_fork*/true);
}
-#endif
// Temporarily increase target_footprint_ and concurrent_start_bytes_ to
// max values to avoid GC during app launch.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 232c96b914..044999d33b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -87,6 +87,7 @@ class RememberedSet;
namespace collector {
class ConcurrentCopying;
class GarbageCollector;
+class MarkCompact;
class MarkSweep;
class SemiSpace;
} // namespace collector
@@ -150,7 +151,7 @@ class Heap {
static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
// Whether or not parallel GC is enabled. If not, then we never create the thread pool.
- static constexpr bool kDefaultEnableParallelGC = false;
+ static constexpr bool kDefaultEnableParallelGC = true;
static uint8_t* const kPreferredAllocSpaceBegin;
// Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
@@ -385,6 +386,9 @@ class Heap {
void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
+ // Ensures that the obj doesn't cause userfaultfd in JNI critical calls.
+ void EnsureObjectUserfaulted(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
// Mutator lock is required for GetContinuousSpaces.
void ClearMarkedObjects()
@@ -578,6 +582,9 @@ class Heap {
return region_space_;
}
+ space::BumpPointerSpace* GetBumpPointerSpace() const {
+ return bump_pointer_space_;
+ }
// Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
// consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
// were specified. Android apps start with a growth limit (small heap size) which is
@@ -661,6 +668,10 @@ class Heap {
return live_stack_.get();
}
+ accounting::ObjectStack* GetAllocationStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ return allocation_stack_.get();
+ }
+
void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
// Mark and empty stack.
@@ -760,8 +771,10 @@ class Heap {
REQUIRES(!*gc_complete_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
- // Thread pool.
- void CreateThreadPool();
+ // Thread pool. Create either the given number of threads, or as per the
+ // values of conc_gc_threads_ and parallel_gc_threads_.
+ void CreateThreadPool(size_t num_threads = 0);
+ void WaitForWorkersToBeCreated();
void DeleteThreadPool();
ThreadPool* GetThreadPool() {
return thread_pool_.get();
@@ -812,6 +825,10 @@ class Heap {
return active_collector;
}
+ collector::MarkCompact* MarkCompactCollector() {
+ return mark_compact_;
+ }
+
CollectorType CurrentCollectorType() {
return collector_type_;
}
@@ -939,6 +956,7 @@ class Heap {
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
+ bool IsGCDisabledForShutdown() const REQUIRES(!*gc_complete_lock_);
// Create a new alloc space and compact default alloc space to it.
HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
@@ -1001,9 +1019,6 @@ class Heap {
return main_space_backup_ != nullptr;
}
- // Attempt to use all the userfaultfd related ioctls.
- void MaybePerformUffdIoctls(GcCause cause, uint32_t requested_gc_num) const;
-
// Size_t saturating arithmetic
static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
return x > y ? x - y : 0;
@@ -1019,19 +1034,11 @@ class Heap {
allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRegion;
}
- static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
- if (kUseReadBarrier) {
- // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
- return true;
- }
- return
- allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeBumpPointer;
- }
static bool IsMovingGc(CollectorType collector_type) {
return
collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeSS ||
+ collector_type == kCollectorTypeCMC ||
collector_type == kCollectorTypeCCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
@@ -1223,6 +1230,7 @@ class Heap {
// sweep GC, false for other GC types.
bool IsGcConcurrent() const ALWAYS_INLINE {
return collector_type_ == kCollectorTypeCC ||
+ collector_type_ == kCollectorTypeCMC ||
collector_type_ == kCollectorTypeCMS ||
collector_type_ == kCollectorTypeCCBackground;
}
@@ -1588,6 +1596,7 @@ class Heap {
std::vector<collector::GarbageCollector*> garbage_collectors_;
collector::SemiSpace* semi_space_collector_;
+ collector::MarkCompact* mark_compact_;
Atomic<collector::ConcurrentCopying*> active_concurrent_copying_collector_;
collector::ConcurrentCopying* young_concurrent_copying_collector_;
collector::ConcurrentCopying* concurrent_copying_collector_;
@@ -1680,9 +1689,6 @@ class Heap {
// Stack trace hashes that we already saw,
std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
- // Userfaultfd file descriptor.
- // TODO (lokeshgidra): remove this when the userfaultfd-based GC is in use.
- int uffd_;
// We disable GC when we are shutting down the runtime in case there are daemon threads still
// allocating.
bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
@@ -1712,6 +1718,7 @@ class Heap {
friend class CollectorTransitionTask;
friend class collector::GarbageCollector;
friend class collector::ConcurrentCopying;
+ friend class collector::MarkCompact;
friend class collector::MarkSweep;
friend class collector::SemiSpace;
friend class GCCriticalSection;
diff --git a/runtime/gc/heap_verification_test.cc b/runtime/gc/heap_verification_test.cc
index ca6a30b11d..789a8e398f 100644
--- a/runtime/gc/heap_verification_test.cc
+++ b/runtime/gc/heap_verification_test.cc
@@ -26,7 +26,7 @@
#include "mirror/string.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
-#include "verification.h"
+#include "verification-inl.h"
namespace art {
namespace gc {
@@ -76,11 +76,11 @@ TEST_F(VerificationTest, IsValidClassOrNotInHeap) {
Handle<mirror::String> string(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "test")));
const Verification* const v = Runtime::Current()->GetHeap()->GetVerification();
- EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(1)));
- EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(4)));
+ EXPECT_FALSE(v->IsValidClass(reinterpret_cast<mirror::Class*>(1)));
+ EXPECT_FALSE(v->IsValidClass(reinterpret_cast<mirror::Class*>(4)));
EXPECT_FALSE(v->IsValidClass(nullptr));
EXPECT_TRUE(v->IsValidClass(string->GetClass()));
- EXPECT_FALSE(v->IsValidClass(string.Get()));
+ EXPECT_FALSE(v->IsValidClass(reinterpret_cast<mirror::Class*>(string.Get())));
}
TEST_F(VerificationTest, IsValidClassInHeap) {
@@ -95,9 +95,9 @@ TEST_F(VerificationTest, IsValidClassInHeap) {
Handle<mirror::String> string(
hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "test")));
const Verification* const v = Runtime::Current()->GetHeap()->GetVerification();
- const uintptr_t uint_klass = reinterpret_cast<uintptr_t>(string->GetClass());
- EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(uint_klass - kObjectAlignment)));
- EXPECT_FALSE(v->IsValidClass(reinterpret_cast<const void*>(&uint_klass)));
+ uintptr_t uint_klass = reinterpret_cast<uintptr_t>(string->GetClass());
+ EXPECT_FALSE(v->IsValidClass(reinterpret_cast<mirror::Class*>(uint_klass - kObjectAlignment)));
+ EXPECT_FALSE(v->IsValidClass(reinterpret_cast<mirror::Class*>(&uint_klass)));
}
TEST_F(VerificationTest, DumpInvalidObjectInfo) {
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
index 5e41ee4ef8..772174f885 100644
--- a/runtime/gc/reference_processor.cc
+++ b/runtime/gc/reference_processor.cc
@@ -90,7 +90,7 @@ void ReferenceProcessor::BroadcastForSlowPath(Thread* self) {
ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
ObjPtr<mirror::Reference> reference) {
auto slow_path_required = [this, self]() REQUIRES_SHARED(Locks::mutator_lock_) {
- return kUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
+ return gUseReadBarrier ? !self->GetWeakRefAccessEnabled() : SlowPathEnabled();
};
if (!slow_path_required()) {
return reference->GetReferent();
@@ -118,10 +118,10 @@ ObjPtr<mirror::Object> ReferenceProcessor::GetReferent(Thread* self,
// Keeping reference_processor_lock_ blocks the broadcast when we try to reenable the fast path.
while (slow_path_required()) {
DCHECK(collector_ != nullptr);
- constexpr bool kOtherReadBarrier = kUseReadBarrier && !kUseBakerReadBarrier;
+ const bool other_read_barrier = !kUseBakerReadBarrier && gUseReadBarrier;
if (UNLIKELY(reference->IsFinalizerReferenceInstance()
|| rp_state_ == RpState::kStarting /* too early to determine mark state */
- || (kOtherReadBarrier && reference->IsPhantomReferenceInstance()))) {
+ || (other_read_barrier && reference->IsPhantomReferenceInstance()))) {
// Odd cases in which it doesn't hurt to just wait, or the wait is likely to be very brief.
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
@@ -210,7 +210,7 @@ void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings)
}
{
MutexLock mu(self, *Locks::reference_processor_lock_);
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
CHECK_EQ(SlowPathEnabled(), concurrent_) << "Slow path must be enabled iff concurrent";
} else {
// Weak ref access is enabled at Zygote compaction by SemiSpace (concurrent_ == false).
@@ -305,7 +305,7 @@ void ReferenceProcessor::ProcessReferences(Thread* self, TimingLogger* timings)
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
// callback isn't yet set.
- if (!kUseReadBarrier && concurrent_) {
+ if (!gUseReadBarrier && concurrent_) {
// Done processing, disable the slow path and broadcast to the waiters.
DisableSlowPath(self);
}
@@ -418,8 +418,8 @@ void ReferenceProcessor::ClearReferent(ObjPtr<mirror::Reference> ref) {
void ReferenceProcessor::WaitUntilDoneProcessingReferences(Thread* self) {
// Wait until we are done processing reference.
- while ((!kUseReadBarrier && SlowPathEnabled()) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ while ((!gUseReadBarrier && SlowPathEnabled()) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(Locks::reference_processor_lock_);
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 20f7a93eb1..2774b9e71c 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -20,6 +20,7 @@
#include "bump_pointer_space.h"
#include "base/bit_utils.h"
+#include "mirror/object-inl.h"
namespace art {
namespace gc {
@@ -89,6 +90,11 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
return ret;
}
+inline mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
+ const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
+ return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
index 5d05ea2d65..a978f62c61 100644
--- a/runtime/gc/space/bump_pointer_space-walk-inl.h
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -17,12 +17,14 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
-#include "bump_pointer_space.h"
+#include "bump_pointer_space-inl.h"
#include "base/bit_utils.h"
#include "mirror/object-inl.h"
#include "thread-current-inl.h"
+#include <memory>
+
namespace art {
namespace gc {
namespace space {
@@ -32,6 +34,7 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
uint8_t* pos = Begin();
uint8_t* end = End();
uint8_t* main_end = pos;
+ std::unique_ptr<std::vector<size_t>> block_sizes_copy;
// Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
// like
// REQUIRES_AS(visitor.operator(mirror::Object*))
@@ -49,15 +52,17 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
MutexLock mu(Thread::Current(), block_lock_);
// If we have 0 blocks then we need to update the main header since we have bump pointer style
// allocation into an unbounded region (actually bounded by Capacity()).
- if (num_blocks_ == 0) {
+ if (block_sizes_.empty()) {
UpdateMainBlock();
}
main_end = Begin() + main_block_size_;
- if (num_blocks_ == 0) {
+ if (block_sizes_.empty()) {
// We don't have any other blocks, this means someone else may be allocating into the main
// block. In this case, we don't want to try and visit the other blocks after the main block
// since these could actually be part of the main block.
end = main_end;
+ } else {
+ block_sizes_copy.reset(new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end()));
}
}
// Walk all of the objects in the main block first.
@@ -66,31 +71,33 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
// No read barrier because obj may not be a valid object.
if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
// There is a race condition where a thread has just allocated an object but not set the
- // class. We can't know the size of this object, so we don't visit it and exit the function
- // since there is guaranteed to be not other blocks.
- return;
+ // class. We can't know the size of this object, so we don't visit it and break the loop
+ pos = main_end;
+ break;
} else {
no_thread_safety_analysis_visit(obj);
pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
}
}
// Walk the other blocks (currently only TLABs).
- while (pos < end) {
- BlockHeader* header = reinterpret_cast<BlockHeader*>(pos);
- size_t block_size = header->size_;
- pos += sizeof(BlockHeader); // Skip the header so that we know where the objects
- mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
- const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
- CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
- // We don't know how many objects are allocated in the current block. When we hit a null class
- // assume its the end. TODO: Have a thread update the header when it flushes the block?
- // No read barrier because obj may not be a valid object.
- while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
- no_thread_safety_analysis_visit(obj);
- obj = GetNextObject(obj);
+ if (block_sizes_copy != nullptr) {
+ for (size_t block_size : *block_sizes_copy) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
+ const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
+ CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
+ // We don't know how many objects are allocated in the current block. When we hit a null class
+ // assume it's the end. TODO: Have a thread update the header when it flushes the block?
+ // No read barrier because obj may not be a valid object.
+ while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
+ no_thread_safety_analysis_visit(obj);
+ obj = GetNextObject(obj);
+ }
+ pos += block_size;
}
- pos += block_size;
+ } else {
+ CHECK_EQ(end, main_end);
}
+ CHECK_EQ(pos, end);
}
} // namespace space
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 3a0155a278..7753f73ca4 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -54,8 +54,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint
growth_end_(limit),
objects_allocated_(0), bytes_allocated_(0),
block_lock_("Block lock"),
- main_block_size_(0),
- num_blocks_(0) {
+ main_block_size_(0) {
+ // This constructor gets called only from Heap::PreZygoteFork(), which
+ // doesn't require a mark_bitmap.
}
BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
@@ -68,8 +69,11 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
growth_end_(mem_map_.End()),
objects_allocated_(0), bytes_allocated_(0),
block_lock_("Block lock", kBumpPointerSpaceBlockLock),
- main_block_size_(0),
- num_blocks_(0) {
+ main_block_size_(0) {
+ mark_bitmap_ =
+ accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
+ Begin(),
+ Capacity());
}
void BumpPointerSpace::Clear() {
@@ -86,7 +90,7 @@ void BumpPointerSpace::Clear() {
growth_end_ = Limit();
{
MutexLock mu(Thread::Current(), block_lock_);
- num_blocks_ = 0;
+ block_sizes_.clear();
main_block_size_ = 0;
}
}
@@ -97,11 +101,6 @@ void BumpPointerSpace::Dump(std::ostream& os) const {
<< reinterpret_cast<void*>(Limit());
}
-mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
- const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
- return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
-}
-
size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
MutexLock mu(Thread::Current(), block_lock_);
RevokeThreadLocalBuffersLocked(thread);
@@ -141,23 +140,19 @@ void BumpPointerSpace::AssertAllThreadLocalBuffersAreRevoked() {
}
void BumpPointerSpace::UpdateMainBlock() {
- DCHECK_EQ(num_blocks_, 0U);
+ DCHECK(block_sizes_.empty());
main_block_size_ = Size();
}
// Returns the start of the storage.
uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
bytes = RoundUp(bytes, kAlignment);
- if (!num_blocks_) {
+ if (block_sizes_.empty()) {
UpdateMainBlock();
}
- uint8_t* storage = reinterpret_cast<uint8_t*>(
- AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader)));
+ uint8_t* storage = reinterpret_cast<uint8_t*>(AllocNonvirtualWithoutAccounting(bytes));
if (LIKELY(storage != nullptr)) {
- BlockHeader* header = reinterpret_cast<BlockHeader*>(storage);
- header->size_ = bytes; // Write out the block header.
- storage += sizeof(BlockHeader);
- ++num_blocks_;
+ block_sizes_.push_back(bytes);
}
return storage;
}
@@ -177,7 +172,7 @@ uint64_t BumpPointerSpace::GetBytesAllocated() {
MutexLock mu3(Thread::Current(), block_lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
- if (num_blocks_ > 0) {
+ if (!block_sizes_.empty()) {
for (Thread* thread : thread_list) {
total += thread->GetThreadLocalBytesAllocated();
}
@@ -195,7 +190,7 @@ uint64_t BumpPointerSpace::GetObjectsAllocated() {
MutexLock mu3(Thread::Current(), block_lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
- if (num_blocks_ > 0) {
+ if (!block_sizes_.empty()) {
for (Thread* thread : thread_list) {
total += thread->GetThreadLocalObjectsAllocated();
}
@@ -240,6 +235,52 @@ size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* u
return num_bytes;
}
+uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) {
+ Locks::mutator_lock_->AssertExclusiveHeld(self);
+ DCHECK(IsAligned<kAlignment>(alignment));
+ uint8_t* end = end_.load(std::memory_order_relaxed);
+ uint8_t* aligned_end = AlignUp(end, alignment);
+ ptrdiff_t diff = aligned_end - end;
+ if (diff > 0) {
+ end_.store(aligned_end, std::memory_order_relaxed);
+ // If we have blocks after the main one. Then just add the diff to the last
+ // block.
+ MutexLock mu(self, block_lock_);
+ if (!block_sizes_.empty()) {
+ block_sizes_.back() += diff;
+ }
+ }
+ return end;
+}
+
+std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
+ std::vector<size_t>* block_sizes = nullptr;
+ MutexLock mu(self, block_lock_);
+ if (!block_sizes_.empty()) {
+ block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
+ } else {
+ UpdateMainBlock();
+ }
+ *main_block_size = main_block_size_;
+ return block_sizes;
+}
+
+void BumpPointerSpace::SetBlockSizes(Thread* self,
+ const size_t main_block_size,
+ const size_t first_valid_idx) {
+ MutexLock mu(self, block_lock_);
+ main_block_size_ = main_block_size;
+ if (!block_sizes_.empty()) {
+ block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
+ }
+ size_t size = main_block_size;
+ for (size_t block_size : block_sizes_) {
+ size += block_size;
+ }
+ DCHECK(IsAligned<kAlignment>(size));
+ end_.store(Begin() + size, std::memory_order_relaxed);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 08ed503b5f..bba171109d 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -17,9 +17,10 @@
#ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
#define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_H_
+#include "base/mutex.h"
#include "space.h"
-#include "base/mutex.h"
+#include <deque>
namespace art {
@@ -30,6 +31,7 @@ class Object;
namespace gc {
namespace collector {
+class MarkCompact;
class MarkSweep;
} // namespace collector
@@ -39,7 +41,7 @@ namespace space {
// implementation as its intended to be evacuated.
class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
public:
- typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+ using WalkCallback = void (*)(void *, void *, int, void *);
SpaceType GetType() const override {
return kSpaceTypeBumpPointerSpace;
@@ -100,10 +102,6 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
return nullptr;
}
- accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
- return nullptr;
- }
-
// Reset the space to empty.
void Clear() override REQUIRES(!block_lock_);
@@ -120,6 +118,11 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ // Return the pre-determined allocated object count. This could be beneficial
+ // when we know that all the TLABs are revoked.
+ int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return objects_allocated_.load(std::memory_order_relaxed);
+ }
bool IsEmpty() const {
return Begin() == End();
}
@@ -128,18 +131,9 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
return true;
}
- bool Contains(const mirror::Object* obj) const override {
- const uint8_t* byte_obj = reinterpret_cast<const uint8_t*>(obj);
- return byte_obj >= Begin() && byte_obj < End();
- }
-
// TODO: Change this? Mainly used for compacting to a particular region of memory.
BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
- // Return the object which comes after obj, while ensuring alignment.
- static mirror::Object* GetNextObject(mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
@@ -165,7 +159,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
- static constexpr size_t kAlignment = 8;
+ static constexpr size_t kAlignment = kObjectAlignment;
protected:
BumpPointerSpace(const std::string& name, MemMap&& mem_map);
@@ -183,23 +177,40 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- // The objects at the start of the space are stored in the main block. The main block doesn't
- // have a header, this lets us walk empty spaces which are mprotected.
+ // The objects at the start of the space are stored in the main block.
size_t main_block_size_ GUARDED_BY(block_lock_);
- // The number of blocks in the space, if it is 0 then the space has one long continuous block
- // which doesn't have an updated header.
- size_t num_blocks_ GUARDED_BY(block_lock_);
+ // List of block sizes (in bytes) after the main-block. Needed for Walk().
+ // If empty then the space has only one long continuous block. Each TLAB
+ // allocation has one entry in this deque.
+ // Keeping block-sizes off-heap simplifies sliding compaction algorithms.
+ // The compaction algorithm should ideally compact all objects into the main
+ // block, thereby enabling erasing corresponding entries from here.
+ std::deque<size_t> block_sizes_ GUARDED_BY(block_lock_);
private:
- struct BlockHeader {
- size_t size_; // Size of the block in bytes, does not include the header.
- size_t unused_; // Ensures alignment of kAlignment.
- };
+ // Return the object which comes after obj, while ensuring alignment.
+ static mirror::Object* GetNextObject(mirror::Object* obj)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Return a vector of block sizes on the space. Required by MarkCompact GC for
+ // walking black objects allocated after marking phase.
+ std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!block_lock_);
+
+ // Once the MarkCompact decides the post-compact layout of the space in the
+ // pre-compaction pause, it calls this function to update the block sizes. It is
+ // done by passing the new main-block size, which consumes a bunch of blocks
+ // into itself, and the index of first unconsumed block. This works as all the
+ // block sizes are ordered. Also updates 'end_' to reflect the change.
+ void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx)
+ REQUIRES(!block_lock_, Locks::mutator_lock_);
- static_assert(sizeof(BlockHeader) % kAlignment == 0,
- "continuous block must be kAlignment aligned");
+ // Align end to the given alignment. This is done in MarkCompact GC when
+ // mutators are suspended so that upcoming TLAB allocations start with a new
+ // page. Returns the pre-alignment end.
+ uint8_t* AlignEnd(Thread* self, size_t alignment) REQUIRES(Locks::mutator_lock_);
friend class collector::MarkSweep;
+ friend class collector::MarkCompact;
DISALLOW_COPY_AND_ASSIGN(BumpPointerSpace);
};
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 4fc4adac91..6041fd02af 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -18,7 +18,7 @@
#define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_
#include "dlmalloc_space.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "thread.h"
namespace art {
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 25cac7efde..1edcdbdf91 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -350,11 +350,18 @@ void DlMallocSpace::CheckMoreCoreForPrecondition() {
}
#endif
+struct MspaceCbArgs {
+ size_t max_contiguous;
+ size_t used;
+};
+
static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
+ MspaceCbArgs* mspace_cb_args = reinterpret_cast<MspaceCbArgs*>(arg);
+ mspace_cb_args->used += used_bytes;
if (used_bytes < chunk_size) {
size_t chunk_free_bytes = chunk_size - used_bytes;
- size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
+ size_t& max_contiguous_allocation = mspace_cb_args->max_contiguous;
max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
}
}
@@ -362,16 +369,17 @@ static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void*
bool DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os,
size_t failed_alloc_bytes) {
Thread* const self = Thread::Current();
- size_t max_contiguous_allocation = 0;
+ MspaceCbArgs mspace_cb_args = {0, 0};
// To allow the Walk/InspectAll() to exclusively-lock the mutator
// lock, temporarily release the shared access to the mutator
// lock here by transitioning to the suspended state.
Locks::mutator_lock_->AssertSharedHeld(self);
ScopedThreadSuspension sts(self, ThreadState::kSuspended);
- Walk(MSpaceChunkCallback, &max_contiguous_allocation);
- if (failed_alloc_bytes > max_contiguous_allocation) {
- os << "; failed due to fragmentation (largest possible contiguous allocation "
- << max_contiguous_allocation << " bytes)";
+ Walk(MSpaceChunkCallback, &mspace_cb_args);
+ if (failed_alloc_bytes > mspace_cb_args.max_contiguous) {
+ os << "; failed due to malloc_space fragmentation (largest possible contiguous allocation "
+ << mspace_cb_args.max_contiguous << " bytes, space in use " << mspace_cb_args.used
+ << " bytes, capacity = " << Capacity() << ")";
return true;
}
return false;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6afd63e4a5..44b0613356 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -23,7 +23,9 @@
#include <memory>
#include <random>
#include <string>
+#include <vector>
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "android-base/unique_fd.h"
@@ -1366,9 +1368,9 @@ class ImageSpace::Loader {
}
};
-static void AppendImageChecksum(uint32_t component_count,
- uint32_t checksum,
- /*inout*/std::string* checksums) {
+void ImageSpace::AppendImageChecksum(uint32_t component_count,
+ uint32_t checksum,
+ /*inout*/ std::string* checksums) {
static_assert(ImageSpace::kImageChecksumPrefix == 'i', "Format prefix check.");
StringAppendF(checksums, "i;%u/%08x", component_count, checksum);
}
@@ -1378,7 +1380,7 @@ static bool CheckAndRemoveImageChecksum(uint32_t component_count,
/*inout*/std::string_view* oat_checksums,
/*out*/std::string* error_msg) {
std::string image_checksum;
- AppendImageChecksum(component_count, checksum, &image_checksum);
+ ImageSpace::AppendImageChecksum(component_count, checksum, &image_checksum);
if (!StartsWith(*oat_checksums, image_checksum)) {
*error_msg = StringPrintf("Image checksum mismatch, expected %s to start with %s",
std::string(*oat_checksums).c_str(),
@@ -1389,182 +1391,6 @@ static bool CheckAndRemoveImageChecksum(uint32_t component_count,
return true;
}
-// Helper class to find the primary boot image and boot image extensions
-// and determine the boot image layout.
-class ImageSpace::BootImageLayout {
- public:
- // Description of a "chunk" of the boot image, i.e. either primary boot image
- // or a boot image extension, used in conjunction with the boot class path to
- // load boot image components.
- struct ImageChunk {
- std::string base_location;
- std::string base_filename;
- std::vector<std::string> profile_files;
- size_t start_index;
- uint32_t component_count;
- uint32_t image_space_count;
- uint32_t reservation_size;
- uint32_t checksum;
- uint32_t boot_image_component_count;
- uint32_t boot_image_checksum;
- uint32_t boot_image_size;
-
- // The following file descriptors hold the memfd files for extensions compiled
- // in memory and described by the above fields. We want to use them to mmap()
- // the contents and then close them while treating the ImageChunk description
- // as immutable (const), so make these fields explicitly mutable.
- mutable android::base::unique_fd art_fd;
- mutable android::base::unique_fd vdex_fd;
- mutable android::base::unique_fd oat_fd;
- };
-
- BootImageLayout(ArrayRef<const std::string> image_locations,
- ArrayRef<const std::string> boot_class_path,
- ArrayRef<const std::string> boot_class_path_locations,
- ArrayRef<const int> boot_class_path_fds,
- ArrayRef<const int> boot_class_path_image_fds,
- ArrayRef<const int> boot_class_path_vdex_fds,
- ArrayRef<const int> boot_class_path_oat_fds)
- : image_locations_(image_locations),
- boot_class_path_(boot_class_path),
- boot_class_path_locations_(boot_class_path_locations),
- boot_class_path_fds_(boot_class_path_fds),
- boot_class_path_image_fds_(boot_class_path_image_fds),
- boot_class_path_vdex_fds_(boot_class_path_vdex_fds),
- boot_class_path_oat_fds_(boot_class_path_oat_fds) {}
-
- std::string GetPrimaryImageLocation();
-
- bool LoadFromSystem(InstructionSet image_isa, /*out*/std::string* error_msg) {
- return LoadOrValidateFromSystem(image_isa, /*oat_checksums=*/ nullptr, error_msg);
- }
-
- bool ValidateFromSystem(InstructionSet image_isa,
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg) {
- DCHECK(oat_checksums != nullptr);
- return LoadOrValidateFromSystem(image_isa, oat_checksums, error_msg);
- }
-
- ArrayRef<const ImageChunk> GetChunks() const {
- return ArrayRef<const ImageChunk>(chunks_);
- }
-
- uint32_t GetBaseAddress() const {
- return base_address_;
- }
-
- size_t GetNextBcpIndex() const {
- return next_bcp_index_;
- }
-
- size_t GetTotalComponentCount() const {
- return total_component_count_;
- }
-
- size_t GetTotalReservationSize() const {
- return total_reservation_size_;
- }
-
- private:
- struct NamedComponentLocation {
- std::string base_location;
- size_t bcp_index;
- std::vector<std::string> profile_filenames;
- };
-
- std::string ExpandLocationImpl(const std::string& location,
- size_t bcp_index,
- bool boot_image_extension) {
- std::vector<std::string> expanded = ExpandMultiImageLocations(
- ArrayRef<const std::string>(boot_class_path_).SubArray(bcp_index, 1u),
- location,
- boot_image_extension);
- DCHECK_EQ(expanded.size(), 1u);
- return expanded[0];
- }
-
- std::string ExpandLocation(const std::string& location, size_t bcp_index) {
- if (bcp_index == 0u) {
- DCHECK_EQ(location, ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/ false));
- return location;
- } else {
- return ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/ true);
- }
- }
-
- std::string GetBcpComponentPath(size_t bcp_index) {
- DCHECK_LE(bcp_index, boot_class_path_.size());
- size_t bcp_slash_pos = boot_class_path_[bcp_index].rfind('/');
- DCHECK_NE(bcp_slash_pos, std::string::npos);
- return boot_class_path_[bcp_index].substr(0u, bcp_slash_pos + 1u);
- }
-
- bool VerifyImageLocation(ArrayRef<const std::string> components,
- /*out*/size_t* named_components_count,
- /*out*/std::string* error_msg);
-
- bool MatchNamedComponents(
- ArrayRef<const std::string> named_components,
- /*out*/std::vector<NamedComponentLocation>* named_component_locations,
- /*out*/std::string* error_msg);
-
- bool ValidateBootImageChecksum(const char* file_description,
- const ImageHeader& header,
- /*out*/std::string* error_msg);
-
- bool ValidateHeader(const ImageHeader& header,
- size_t bcp_index,
- const char* file_description,
- /*out*/std::string* error_msg);
-
- bool ValidateOatFile(const std::string& base_location,
- const std::string& base_filename,
- size_t bcp_index,
- size_t component_count,
- /*out*/std::string* error_msg);
-
- bool ReadHeader(const std::string& base_location,
- const std::string& base_filename,
- size_t bcp_index,
- /*out*/std::string* error_msg);
-
- // Compiles a consecutive subsequence of bootclasspath dex files, whose contents are included in
- // the profiles specified by `profile_filenames`, starting from `bcp_index`.
- bool CompileBootclasspathElements(const std::string& base_location,
- const std::string& base_filename,
- size_t bcp_index,
- const std::vector<std::string>& profile_filenames,
- ArrayRef<const std::string> dependencies,
- /*out*/std::string* error_msg);
-
- bool CheckAndRemoveLastChunkChecksum(/*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg);
-
- template <typename FilenameFn>
- bool LoadOrValidate(FilenameFn&& filename_fn,
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg);
-
- bool LoadOrValidateFromSystem(InstructionSet image_isa,
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg);
-
- ArrayRef<const std::string> image_locations_;
- ArrayRef<const std::string> boot_class_path_;
- ArrayRef<const std::string> boot_class_path_locations_;
- ArrayRef<const int> boot_class_path_fds_;
- ArrayRef<const int> boot_class_path_image_fds_;
- ArrayRef<const int> boot_class_path_vdex_fds_;
- ArrayRef<const int> boot_class_path_oat_fds_;
-
- std::vector<ImageChunk> chunks_;
- uint32_t base_address_ = 0u;
- size_t next_bcp_index_ = 0u;
- size_t total_component_count_ = 0u;
- size_t total_reservation_size_ = 0u;
-};
-
std::string ImageSpace::BootImageLayout::GetPrimaryImageLocation() {
DCHECK(!image_locations_.empty());
std::string location = image_locations_[0];
@@ -1886,7 +1712,7 @@ bool ImageSpace::BootImageLayout::ValidateOatFile(
error_msg->c_str());
return false;
}
- if (!ImageSpace::ValidateOatFile(*oat_file, error_msg, dex_filenames, dex_fds)) {
+ if (!ImageSpace::ValidateOatFile(*oat_file, error_msg, dex_filenames, dex_fds, apex_versions_)) {
return false;
}
return true;
@@ -2151,48 +1977,12 @@ bool ImageSpace::BootImageLayout::CompileBootclasspathElements(
return true;
}
-bool ImageSpace::BootImageLayout::CheckAndRemoveLastChunkChecksum(
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg) {
- DCHECK(oat_checksums != nullptr);
- DCHECK(!chunks_.empty());
- const ImageChunk& chunk = chunks_.back();
- size_t component_count = chunk.component_count;
- size_t checksum = chunk.checksum;
- if (!CheckAndRemoveImageChecksum(component_count, checksum, oat_checksums, error_msg)) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (oat_checksums->empty()) {
- if (next_bcp_index_ != boot_class_path_.size()) {
- *error_msg = StringPrintf("Checksum too short, missing %zu components.",
- boot_class_path_.size() - next_bcp_index_);
- return false;
- }
- return true;
- }
- if (!StartsWith(*oat_checksums, ":")) {
- *error_msg = StringPrintf("Missing ':' separator at start of %s",
- std::string(*oat_checksums).c_str());
- return false;
- }
- oat_checksums->remove_prefix(1u);
- if (oat_checksums->empty()) {
- *error_msg = "Missing checksums after the ':' separator.";
- return false;
- }
- return true;
-}
-
template <typename FilenameFn>
-bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg) {
+bool ImageSpace::BootImageLayout::Load(FilenameFn&& filename_fn,
+ bool allow_in_memory_compilation,
+ /*out*/ std::string* error_msg) {
DCHECK(GetChunks().empty());
DCHECK_EQ(GetBaseAddress(), 0u);
- bool validate = (oat_checksums != nullptr);
- static_assert(ImageSpace::kImageChecksumPrefix == 'i', "Format prefix check.");
- DCHECK_IMPLIES(validate, StartsWith(*oat_checksums, "i"));
ArrayRef<const std::string> components = image_locations_;
size_t named_components_count = 0u;
@@ -2223,17 +2013,14 @@ bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
LOG(ERROR) << "Named image component already covered by previous image: " << base_location;
continue;
}
- if (validate && bcp_index > bcp_pos) {
- *error_msg = StringPrintf("End of contiguous boot class path images, remaining checksum: %s",
- std::string(*oat_checksums).c_str());
- return false;
- }
std::string local_error_msg;
- std::string* err_msg = validate ? error_msg : &local_error_msg;
std::string base_filename;
- if (!filename_fn(base_location, &base_filename, err_msg) ||
- !ReadHeader(base_location, base_filename, bcp_index, err_msg)) {
- if (validate) {
+ if (!filename_fn(base_location, &base_filename, &local_error_msg) ||
+ !ReadHeader(base_location, base_filename, bcp_index, &local_error_msg)) {
+ if (!allow_in_memory_compilation) {
+ // The boot image is unusable and we can't continue by generating a boot image in memory.
+ // All we can do is to return.
+ *error_msg = std::move(local_error_msg);
return false;
}
LOG(ERROR) << "Error reading named image component header for " << base_location
@@ -2280,14 +2067,6 @@ bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
continue;
}
}
- if (validate) {
- if (!CheckAndRemoveLastChunkChecksum(oat_checksums, error_msg)) {
- return false;
- }
- if (oat_checksums->empty() || !StartsWith(*oat_checksums, "i")) {
- return true; // Let the caller deal with the dex file checksums if any.
- }
- }
bcp_pos = GetNextBcpIndex();
}
@@ -2320,24 +2099,10 @@ bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
VLOG(image) << "Found image extension for " << ExpandLocation(base_location, bcp_pos);
bcp_pos = GetNextBcpIndex();
found = true;
- if (validate) {
- if (!CheckAndRemoveLastChunkChecksum(oat_checksums, error_msg)) {
- return false;
- }
- if (oat_checksums->empty() || !StartsWith(*oat_checksums, "i")) {
- return true; // Let the caller deal with the dex file checksums if any.
- }
- }
break;
}
}
if (!found) {
- if (validate) {
- *error_msg = StringPrintf("Missing extension for %s, remaining checksum: %s",
- bcp_component.c_str(),
- std::string(*oat_checksums).c_str());
- return false;
- }
++bcp_pos;
}
}
@@ -2346,16 +2111,16 @@ bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
return true;
}
-bool ImageSpace::BootImageLayout::LoadOrValidateFromSystem(InstructionSet image_isa,
- /*inout*/std::string_view* oat_checksums,
- /*out*/std::string* error_msg) {
+bool ImageSpace::BootImageLayout::LoadFromSystem(InstructionSet image_isa,
+ bool allow_in_memory_compilation,
+ /*out*/ std::string* error_msg) {
auto filename_fn = [image_isa](const std::string& location,
/*out*/std::string* filename,
/*out*/std::string* err_msg ATTRIBUTE_UNUSED) {
*filename = GetSystemImageFilename(location.c_str(), image_isa);
return true;
};
- return LoadOrValidate(filename_fn, oat_checksums, error_msg);
+ return Load(filename_fn, allow_in_memory_compilation, error_msg);
}
class ImageSpace::BootImageLoader {
@@ -3111,8 +2876,24 @@ class ImageSpace::BootImageLoader {
return false;
}
}
+
+ // As an optimization, madvise the oat file into memory if it's being used
+ // for execution with an active runtime. This can significantly improve
+ // ZygoteInit class preload performance.
+ if (executable_) {
+ Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr) {
+ Runtime::MadviseFileForRange(runtime->GetMadviseWillNeedSizeOdex(),
+ oat_file->Size(),
+ oat_file->Begin(),
+ oat_file->End(),
+ oat_file->GetLocation());
+ }
+ }
+
space->oat_file_ = std::move(oat_file);
space->oat_file_non_owned_ = space->oat_file_.get();
+
return true;
}
@@ -3357,7 +3138,7 @@ bool ImageSpace::BootImageLoader::LoadFromSystem(
boot_class_path_image_fds_,
boot_class_path_vdex_fds_,
boot_class_path_oat_fds_);
- if (!layout.LoadFromSystem(image_isa_, error_msg)) {
+ if (!layout.LoadFromSystem(image_isa_, /*allow_in_memory_compilation=*/true, error_msg)) {
return false;
}
@@ -3519,7 +3300,9 @@ void ImageSpace::Dump(std::ostream& os) const {
<< ",name=\"" << GetName() << "\"]";
}
-bool ImageSpace::ValidateApexVersions(const OatFile& oat_file, std::string* error_msg) {
+bool ImageSpace::ValidateApexVersions(const OatFile& oat_file,
+ const std::string& apex_versions,
+ std::string* error_msg) {
// For a boot image, the key value store only exists in the first OAT file. Skip other OAT files.
if (oat_file.GetOatHeader().GetKeyValueStoreSize() == 0) {
return true;
@@ -3542,27 +3325,33 @@ bool ImageSpace::ValidateApexVersions(const OatFile& oat_file, std::string* erro
// For a boot image, it can be generated from a subset of the bootclasspath.
// For an app image, some dex files get compiled with a subset of the bootclasspath.
// For such cases, the OAT APEX versions will be a prefix of the runtime APEX versions.
- if (!android::base::StartsWith(Runtime::Current()->GetApexVersions(), oat_apex_versions)) {
+ if (!android::base::StartsWith(apex_versions, oat_apex_versions)) {
*error_msg = StringPrintf(
"ValidateApexVersions found APEX versions mismatch between oat file '%s' and the runtime "
"(Oat file: '%s', Runtime: '%s')",
oat_file.GetLocation().c_str(),
oat_apex_versions,
- Runtime::Current()->GetApexVersions().c_str());
+ apex_versions.c_str());
return false;
}
return true;
}
bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
- return ValidateOatFile(oat_file, error_msg, ArrayRef<const std::string>(), ArrayRef<const int>());
+ DCHECK(Runtime::Current() != nullptr);
+ return ValidateOatFile(oat_file,
+ error_msg,
+ ArrayRef<const std::string>(),
+ ArrayRef<const int>(),
+ Runtime::Current()->GetApexVersions());
}
bool ImageSpace::ValidateOatFile(const OatFile& oat_file,
std::string* error_msg,
ArrayRef<const std::string> dex_filenames,
- ArrayRef<const int> dex_fds) {
- if (!ValidateApexVersions(oat_file, error_msg)) {
+ ArrayRef<const int> dex_fds,
+ const std::string& apex_versions) {
+ if (!ValidateApexVersions(oat_file, apex_versions, error_msg)) {
return false;
}
@@ -3695,7 +3484,7 @@ size_t ImageSpace::GetNumberOfComponents(ArrayRef<ImageSpace* const> image_space
return n;
}
-static size_t CheckAndCountBCPComponents(std::string_view oat_boot_class_path,
+size_t ImageSpace::CheckAndCountBCPComponents(std::string_view oat_boot_class_path,
ArrayRef<const std::string> boot_class_path,
/*out*/std::string* error_msg) {
// Check that the oat BCP is a prefix of current BCP locations and count components.
@@ -3727,110 +3516,6 @@ static size_t CheckAndCountBCPComponents(std::string_view oat_boot_class_path,
return component_count;
}
-bool ImageSpace::VerifyBootClassPathChecksums(std::string_view oat_checksums,
- std::string_view oat_boot_class_path,
- ArrayRef<const std::string> image_locations,
- ArrayRef<const std::string> boot_class_path_locations,
- ArrayRef<const std::string> boot_class_path,
- ArrayRef<const int> boot_class_path_fds,
- InstructionSet image_isa,
- /*out*/std::string* error_msg) {
- if (oat_checksums.empty() || oat_boot_class_path.empty()) {
- *error_msg = oat_checksums.empty() ? "Empty checksums." : "Empty boot class path.";
- return false;
- }
-
- DCHECK_EQ(boot_class_path_locations.size(), boot_class_path.size());
- size_t bcp_size =
- CheckAndCountBCPComponents(oat_boot_class_path, boot_class_path_locations, error_msg);
- if (bcp_size == static_cast<size_t>(-1)) {
- DCHECK(!error_msg->empty());
- return false;
- }
-
- size_t bcp_pos = 0u;
- if (StartsWith(oat_checksums, "i")) {
- // Use only the matching part of the BCP for validation. FDs are optional, so only pass the
- // sub-array if provided.
- ArrayRef<const int> bcp_fds = boot_class_path_fds.empty()
- ? ArrayRef<const int>()
- : boot_class_path_fds.SubArray(/*pos=*/ 0u, bcp_size);
- BootImageLayout layout(image_locations,
- boot_class_path.SubArray(/*pos=*/ 0u, bcp_size),
- boot_class_path_locations.SubArray(/*pos=*/ 0u, bcp_size),
- bcp_fds,
- /*boot_class_path_image_fds=*/ ArrayRef<const int>(),
- /*boot_class_path_vdex_fds=*/ ArrayRef<const int>(),
- /*boot_class_path_oat_fds=*/ ArrayRef<const int>());
- std::string primary_image_location = layout.GetPrimaryImageLocation();
- std::string system_filename;
- bool has_system = false;
- if (!FindImageFilename(primary_image_location.c_str(),
- image_isa,
- &system_filename,
- &has_system)) {
- *error_msg = StringPrintf("Unable to find image file for %s and %s",
- android::base::Join(image_locations, kComponentSeparator).c_str(),
- GetInstructionSetString(image_isa));
- return false;
- }
-
- DCHECK(has_system);
- if (!layout.ValidateFromSystem(image_isa, &oat_checksums, error_msg)) {
- return false;
- }
- bcp_pos = layout.GetNextBcpIndex();
- }
-
- for ( ; bcp_pos != bcp_size; ++bcp_pos) {
- static_assert(ImageSpace::kDexFileChecksumPrefix == 'd', "Format prefix check.");
- if (!StartsWith(oat_checksums, "d")) {
- *error_msg = StringPrintf("Missing dex checksums, expected %s to start with 'd'",
- std::string(oat_checksums).c_str());
- return false;
- }
- oat_checksums.remove_prefix(1u);
-
- const std::string& bcp_filename = boot_class_path[bcp_pos];
- std::vector<uint32_t> checksums;
- std::vector<std::string> dex_locations;
- const ArtDexFileLoader dex_file_loader;
- if (!dex_file_loader.GetMultiDexChecksums(bcp_filename.c_str(),
- &checksums,
- &dex_locations,
- error_msg)) {
- return false;
- }
- DCHECK(!checksums.empty());
- for (uint32_t checksum : checksums) {
- std::string dex_file_checksum = StringPrintf("/%08x", checksum);
- if (!StartsWith(oat_checksums, dex_file_checksum)) {
- *error_msg = StringPrintf(
- "Dex checksum mismatch for bootclasspath file %s, expected %s to start with %s",
- bcp_filename.c_str(),
- std::string(oat_checksums).c_str(),
- dex_file_checksum.c_str());
- return false;
- }
- oat_checksums.remove_prefix(dex_file_checksum.size());
- }
- if (bcp_pos + 1u != bcp_size) {
- if (!StartsWith(oat_checksums, ":")) {
- *error_msg = StringPrintf("Missing ':' separator at start of %s",
- std::string(oat_checksums).c_str());
- return false;
- }
- oat_checksums.remove_prefix(1u);
- }
- }
- if (!oat_checksums.empty()) {
- *error_msg = StringPrintf("Checksum too long, unexpected tail %s",
- std::string(oat_checksums).c_str());
- return false;
- }
- return true;
-}
-
bool ImageSpace::VerifyBootClassPathChecksums(
std::string_view oat_checksums,
std::string_view oat_boot_class_path,
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 8a93f2bad1..bf9cda23a4 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -17,13 +17,15 @@
#ifndef ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
#define ART_RUNTIME_GC_SPACE_IMAGE_SPACE_H_
+#include "android-base/unique_fd.h"
+#include "base/array_ref.h"
#include "gc/accounting/space_bitmap.h"
#include "image.h"
+#include "runtime.h"
#include "space.h"
namespace art {
-template <typename T> class ArrayRef;
class DexFile;
enum class InstructionSet;
class OatFile;
@@ -239,18 +241,6 @@ class ImageSpace : public MemMapSpace {
// Returns the total number of components (jar files) associated with the image spaces.
static size_t GetNumberOfComponents(ArrayRef<gc::space::ImageSpace* const> image_spaces);
- // Returns whether the checksums are valid for the given boot class path,
- // image location and ISA (may differ from the ISA of an initialized Runtime).
- // The boot image and dex files do not need to be loaded in memory.
- static bool VerifyBootClassPathChecksums(std::string_view oat_checksums,
- std::string_view oat_boot_class_path,
- ArrayRef<const std::string> image_locations,
- ArrayRef<const std::string> boot_class_path_locations,
- ArrayRef<const std::string> boot_class_path,
- ArrayRef<const int> boot_class_path_fds,
- InstructionSet image_isa,
- /*out*/std::string* error_msg);
-
// Returns whether the oat checksums and boot class path description are valid
// for the given boot image spaces and boot class path. Used for boot image extensions.
static bool VerifyBootClassPathChecksums(
@@ -267,8 +257,10 @@ class ImageSpace : public MemMapSpace {
const std::string& image_location,
bool boot_image_extension = false);
- // Returns true if the APEX versions in the OAT file match the current APEX versions.
- static bool ValidateApexVersions(const OatFile& oat_file, std::string* error_msg);
+ // Returns true if the APEX versions in the OAT file match the given APEX versions.
+ static bool ValidateApexVersions(const OatFile& oat_file,
+ const std::string& apex_versions,
+ std::string* error_msg);
// Returns true if the dex checksums in the given oat file match the
// checksums of the original dex files on disk. This is intended to be used
@@ -279,17 +271,23 @@ class ImageSpace : public MemMapSpace {
// oat and odex file.
//
// This function is exposed for testing purposes.
+ //
+ // Calling this function requires an active runtime.
static bool ValidateOatFile(const OatFile& oat_file, std::string* error_msg);
// Same as above, but allows to use `dex_filenames` and `dex_fds` to find the dex files instead of
- // using the dex filenames in the header of the oat file. This overload is useful when the actual
- // dex filenames are different from what's in the header (e.g., when we run dex2oat on host), or
- // when the runtime can only access files through FDs (e.g., when we run dex2oat on target in a
- // restricted SELinux domain).
+ // using the dex filenames in the header of the oat file, and also takes `apex_versions` from the
+ // input. This overload is useful when the actual dex filenames are different from what's in the
+ // header (e.g., when we run dex2oat on host), when the runtime can only access files through FDs
+ // (e.g., when we run dex2oat on target in a restricted SELinux domain), or when there is no
+ // active runtime.
+ //
+ // Calling this function does not require an active runtime.
static bool ValidateOatFile(const OatFile& oat_file,
std::string* error_msg,
ArrayRef<const std::string> dex_filenames,
- ArrayRef<const int> dex_fds);
+ ArrayRef<const int> dex_fds,
+ const std::string& apex_versions);
// Return the end of the image which includes non-heap objects such as ArtMethods and ArtFields.
uint8_t* GetImageEnd() const {
@@ -303,6 +301,181 @@ class ImageSpace : public MemMapSpace {
void ReleaseMetadata() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void AppendImageChecksum(uint32_t component_count,
+ uint32_t checksum,
+ /*inout*/ std::string* checksums);
+
+ static size_t CheckAndCountBCPComponents(std::string_view oat_boot_class_path,
+ ArrayRef<const std::string> boot_class_path,
+ /*out*/ std::string* error_msg);
+
+ // Helper class to find the primary boot image and boot image extensions
+ // and determine the boot image layout.
+ class BootImageLayout {
+ public:
+ // Description of a "chunk" of the boot image, i.e. either primary boot image
+ // or a boot image extension, used in conjunction with the boot class path to
+ // load boot image components.
+ struct ImageChunk {
+ std::string base_location;
+ std::string base_filename;
+ std::vector<std::string> profile_files;
+ size_t start_index;
+ uint32_t component_count;
+ uint32_t image_space_count;
+ uint32_t reservation_size;
+ uint32_t checksum;
+ uint32_t boot_image_component_count;
+ uint32_t boot_image_checksum;
+ uint32_t boot_image_size;
+
+ // The following file descriptors hold the memfd files for extensions compiled
+ // in memory and described by the above fields. We want to use them to mmap()
+ // the contents and then close them while treating the ImageChunk description
+ // as immutable (const), so make these fields explicitly mutable.
+ mutable android::base::unique_fd art_fd;
+ mutable android::base::unique_fd vdex_fd;
+ mutable android::base::unique_fd oat_fd;
+ };
+
+ BootImageLayout(ArrayRef<const std::string> image_locations,
+ ArrayRef<const std::string> boot_class_path,
+ ArrayRef<const std::string> boot_class_path_locations,
+ ArrayRef<const int> boot_class_path_fds,
+ ArrayRef<const int> boot_class_path_image_fds,
+ ArrayRef<const int> boot_class_path_vdex_fds,
+ ArrayRef<const int> boot_class_path_oat_fds,
+ const std::string* apex_versions = nullptr)
+ : image_locations_(image_locations),
+ boot_class_path_(boot_class_path),
+ boot_class_path_locations_(boot_class_path_locations),
+ boot_class_path_fds_(boot_class_path_fds),
+ boot_class_path_image_fds_(boot_class_path_image_fds),
+ boot_class_path_vdex_fds_(boot_class_path_vdex_fds),
+ boot_class_path_oat_fds_(boot_class_path_oat_fds),
+ apex_versions_(GetApexVersions(apex_versions)) {}
+
+ std::string GetPrimaryImageLocation();
+
+ bool LoadFromSystem(InstructionSet image_isa,
+ bool allow_in_memory_compilation,
+ /*out*/ std::string* error_msg);
+
+ ArrayRef<const ImageChunk> GetChunks() const { return ArrayRef<const ImageChunk>(chunks_); }
+
+ uint32_t GetBaseAddress() const { return base_address_; }
+
+ size_t GetNextBcpIndex() const { return next_bcp_index_; }
+
+ size_t GetTotalComponentCount() const { return total_component_count_; }
+
+ size_t GetTotalReservationSize() const { return total_reservation_size_; }
+
+ private:
+ struct NamedComponentLocation {
+ std::string base_location;
+ size_t bcp_index;
+ std::vector<std::string> profile_filenames;
+ };
+
+ std::string ExpandLocationImpl(const std::string& location,
+ size_t bcp_index,
+ bool boot_image_extension) {
+ std::vector<std::string> expanded = ExpandMultiImageLocations(
+ ArrayRef<const std::string>(boot_class_path_).SubArray(bcp_index, 1u),
+ location,
+ boot_image_extension);
+ DCHECK_EQ(expanded.size(), 1u);
+ return expanded[0];
+ }
+
+ std::string ExpandLocation(const std::string& location, size_t bcp_index) {
+ if (bcp_index == 0u) {
+ DCHECK_EQ(location,
+ ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/false));
+ return location;
+ } else {
+ return ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/true);
+ }
+ }
+
+ std::string GetBcpComponentPath(size_t bcp_index) {
+ DCHECK_LE(bcp_index, boot_class_path_.size());
+ size_t bcp_slash_pos = boot_class_path_[bcp_index].rfind('/');
+ DCHECK_NE(bcp_slash_pos, std::string::npos);
+ return boot_class_path_[bcp_index].substr(0u, bcp_slash_pos + 1u);
+ }
+
+ bool VerifyImageLocation(ArrayRef<const std::string> components,
+ /*out*/ size_t* named_components_count,
+ /*out*/ std::string* error_msg);
+
+ bool MatchNamedComponents(
+ ArrayRef<const std::string> named_components,
+ /*out*/ std::vector<NamedComponentLocation>* named_component_locations,
+ /*out*/ std::string* error_msg);
+
+ bool ValidateBootImageChecksum(const char* file_description,
+ const ImageHeader& header,
+ /*out*/ std::string* error_msg);
+
+ bool ValidateHeader(const ImageHeader& header,
+ size_t bcp_index,
+ const char* file_description,
+ /*out*/ std::string* error_msg);
+
+ bool ValidateOatFile(const std::string& base_location,
+ const std::string& base_filename,
+ size_t bcp_index,
+ size_t component_count,
+ /*out*/ std::string* error_msg);
+
+ bool ReadHeader(const std::string& base_location,
+ const std::string& base_filename,
+ size_t bcp_index,
+ /*out*/ std::string* error_msg);
+
+ // Compiles a consecutive subsequence of bootclasspath dex files, whose contents are included in
+ // the profiles specified by `profile_filenames`, starting from `bcp_index`.
+ bool CompileBootclasspathElements(const std::string& base_location,
+ const std::string& base_filename,
+ size_t bcp_index,
+ const std::vector<std::string>& profile_filenames,
+ ArrayRef<const std::string> dependencies,
+ /*out*/ std::string* error_msg);
+
+ template <typename FilenameFn>
+ bool Load(FilenameFn&& filename_fn,
+ bool allow_in_memory_compilation,
+ /*out*/ std::string* error_msg);
+
+ // This function prefers taking APEX versions from the input instead of from the runtime if
+ // possible. If the input is present, `ValidateFromSystem` can work without an active runtime.
+ static const std::string& GetApexVersions(const std::string* apex_versions) {
+ if (apex_versions == nullptr) {
+ DCHECK(Runtime::Current() != nullptr);
+ return Runtime::Current()->GetApexVersions();
+ } else {
+ return *apex_versions;
+ }
+ }
+
+ ArrayRef<const std::string> image_locations_;
+ ArrayRef<const std::string> boot_class_path_;
+ ArrayRef<const std::string> boot_class_path_locations_;
+ ArrayRef<const int> boot_class_path_fds_;
+ ArrayRef<const int> boot_class_path_image_fds_;
+ ArrayRef<const int> boot_class_path_vdex_fds_;
+ ArrayRef<const int> boot_class_path_oat_fds_;
+
+ std::vector<ImageChunk> chunks_;
+ uint32_t base_address_ = 0u;
+ size_t next_bcp_index_ = 0u;
+ size_t total_component_count_ = 0u;
+ size_t total_reservation_size_ = 0u;
+ const std::string& apex_versions_;
+ };
+
protected:
// Tries to initialize an ImageSpace from the given image path, returning null on error.
//
@@ -342,7 +515,6 @@ class ImageSpace : public MemMapSpace {
friend class Space;
private:
- class BootImageLayout;
class BootImageLoader;
template <typename ReferenceVisitor>
class ClassTableVisitor;
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index 3a6d0e12e2..b3a591703b 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -321,56 +321,6 @@ TEST_F(DexoptTest, ValidateOatFile) {
EXPECT_FALSE(ImageSpace::ValidateOatFile(*oat, &error_msg));
}
-TEST_F(DexoptTest, Checksums) {
- Runtime* runtime = Runtime::Current();
- ASSERT_TRUE(runtime != nullptr);
- ASSERT_FALSE(runtime->GetHeap()->GetBootImageSpaces().empty());
-
- std::vector<std::string> bcp = runtime->GetBootClassPath();
- std::vector<std::string> bcp_locations = runtime->GetBootClassPathLocations();
- std::vector<const DexFile*> dex_files = runtime->GetClassLinker()->GetBootClassPath();
-
- std::string error_msg;
- auto create_and_verify = [&]() {
- std::string checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
- ArrayRef<gc::space::ImageSpace* const>(runtime->GetHeap()->GetBootImageSpaces()),
- ArrayRef<const DexFile* const>(dex_files));
- return gc::space::ImageSpace::VerifyBootClassPathChecksums(
- checksums,
- android::base::Join(bcp_locations, ':'),
- ArrayRef<const std::string>(runtime->GetImageLocations()),
- ArrayRef<const std::string>(bcp_locations),
- ArrayRef<const std::string>(bcp),
- /*boot_class_path_fds=*/ ArrayRef<const int>(),
- kRuntimeISA,
- &error_msg);
- };
-
- ASSERT_TRUE(create_and_verify()) << error_msg;
-
- std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
- for (const std::string& src : { GetDexSrc1(), GetDexSrc2() }) {
- std::vector<std::unique_ptr<const DexFile>> new_dex_files;
- const ArtDexFileLoader dex_file_loader;
- ASSERT_TRUE(dex_file_loader.Open(src.c_str(),
- src,
- /*verify=*/ true,
- /*verify_checksum=*/ false,
- &error_msg,
- &new_dex_files))
- << error_msg;
-
- bcp.push_back(src);
- bcp_locations.push_back(src);
- for (std::unique_ptr<const DexFile>& df : new_dex_files) {
- dex_files.push_back(df.get());
- opened_dex_files.push_back(std::move(df));
- }
-
- ASSERT_TRUE(create_and_verify()) << error_msg;
- }
-}
-
template <bool kImage, bool kRelocate>
class ImageSpaceLoadingTest : public CommonRuntimeTest {
protected:
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 50006568ca..59ab3f3214 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -38,7 +38,7 @@ class ZygoteSpace;
// A common parent of DlMallocSpace and RosAllocSpace.
class MallocSpace : public ContinuousMemMapAllocSpace {
public:
- typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+ using WalkCallback = void (*)(void *start, void *end, size_t num_bytes, void* callback_arg);
SpaceType GetType() const override {
return kSpaceTypeMallocSpace;
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 1463eb7d2a..27b9e9c367 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -46,7 +46,7 @@ static constexpr bool kCyclicRegionAllocation = kIsDebugBuild;
// A space that consists of equal-sized regions.
class RegionSpace final : public ContinuousMemMapAllocSpace {
public:
- typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+ using WalkCallback = void (*)(void *start, void *end, size_t num_bytes, void* callback_arg);
enum EvacMode {
kEvacModeNewlyAllocated,
diff --git a/runtime/gc/system_weak.h b/runtime/gc/system_weak.h
index ef85b3942f..77b9548211 100644
--- a/runtime/gc/system_weak.h
+++ b/runtime/gc/system_weak.h
@@ -48,7 +48,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
void Allow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
allow_new_system_weak_ = true;
new_weak_condition_.Broadcast(Thread::Current());
@@ -57,7 +57,7 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
void Disallow() override
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!allow_disallow_lock_) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), allow_disallow_lock_);
allow_new_system_weak_ = false;
}
@@ -78,8 +78,8 @@ class SystemWeakHolder : public AbstractSystemWeakHolder {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(allow_disallow_lock_) {
// Wait for GC's sweeping to complete and allow new records
- while (UNLIKELY((!kUseReadBarrier && !allow_new_system_weak_) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
+ while (UNLIKELY((!gUseReadBarrier && !allow_new_system_weak_) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(&allow_disallow_lock_);
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index ca112972c2..4f552a6203 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -111,6 +111,7 @@ static bool CollectorDoesAllowOrBroadcast() {
CollectorType type = Runtime::Current()->GetHeap()->CurrentCollectorType();
switch (type) {
case CollectorType::kCollectorTypeCMS:
+ case CollectorType::kCollectorTypeCMC:
case CollectorType::kCollectorTypeCC:
case CollectorType::kCollectorTypeSS:
return true;
@@ -124,6 +125,7 @@ static bool CollectorDoesDisallow() {
CollectorType type = Runtime::Current()->GetHeap()->CurrentCollectorType();
switch (type) {
case CollectorType::kCollectorTypeCMS:
+ case CollectorType::kCollectorTypeCMC:
return true;
default:
@@ -149,7 +151,12 @@ TEST_F(SystemWeakTest, Keep) {
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_);
- EXPECT_EQ(1U, cswh.sweep_count_);
+ // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation.
+ // TODO: Explore this can be reverted back to unconditionally compare with 1
+ // once concurrent updation of native roots is full implemented in userfaultfd
+ // GC.
+ size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U;
+ EXPECT_EQ(expected_sweep_count, cswh.sweep_count_);
// Expect the weak to not be cleared.
EXPECT_FALSE(cswh.Get().IsNull());
@@ -170,7 +177,12 @@ TEST_F(SystemWeakTest, Discard) {
// Expect the holder to have been called.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_);
- EXPECT_EQ(1U, cswh.sweep_count_);
+ // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation.
+ // TODO: Explore this can be reverted back to unconditionally compare with 1
+ // once concurrent updation of native roots is full implemented in userfaultfd
+ // GC.
+ size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U;
+ EXPECT_EQ(expected_sweep_count, cswh.sweep_count_);
// Expect the weak to be cleared.
EXPECT_TRUE(cswh.Get().IsNull());
@@ -194,7 +206,12 @@ TEST_F(SystemWeakTest, Remove) {
// Expect the holder to have been called.
ASSERT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
ASSERT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_);
- ASSERT_EQ(1U, cswh.sweep_count_);
+ // Userfaultfd GC uses SweepSystemWeaks also for concurrent updation.
+ // TODO: Explore this can be reverted back to unconditionally compare with 1
+ // once concurrent updation of native roots is full implemented in userfaultfd
+ // GC.
+ size_t expected_sweep_count = gUseUserfaultfd ? 2U : 1U;
+ EXPECT_EQ(expected_sweep_count, cswh.sweep_count_);
// Expect the weak to not be cleared.
ASSERT_FALSE(cswh.Get().IsNull());
@@ -209,7 +226,7 @@ TEST_F(SystemWeakTest, Remove) {
// Expectation: no change in the numbers.
EXPECT_EQ(CollectorDoesAllowOrBroadcast() ? 1U : 0U, cswh.allow_count_);
EXPECT_EQ(CollectorDoesDisallow() ? 1U : 0U, cswh.disallow_count_);
- EXPECT_EQ(1U, cswh.sweep_count_);
+ EXPECT_EQ(expected_sweep_count, cswh.sweep_count_);
}
} // namespace gc
diff --git a/runtime/gc/verification-inl.h b/runtime/gc/verification-inl.h
new file mode 100644
index 0000000000..1ef96e2954
--- /dev/null
+++ b/runtime/gc/verification-inl.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_VERIFICATION_INL_H_
+#define ART_RUNTIME_GC_VERIFICATION_INL_H_
+
+#include "verification.h"
+
+#include "mirror/class-inl.h"
+
+namespace art {
+namespace gc {
+
+template <ReadBarrierOption kReadBarrierOption>
+bool Verification::IsValidClassUnchecked(mirror::Class* klass) const {
+ mirror::Class* k1 = klass->GetClass<kVerifyNone, kReadBarrierOption>();
+ if (!IsValidHeapObjectAddress(k1)) {
+ return false;
+ }
+ // `k1` should be class class, take the class again to verify.
+ // Note that this check may not be valid for the no image space
+ // since the class class might move around from moving GC.
+ mirror::Class* k2 = k1->GetClass<kVerifyNone, kReadBarrierOption>();
+ if (!IsValidHeapObjectAddress(k2)) {
+ return false;
+ }
+ return k1 == k2;
+}
+
+template <ReadBarrierOption kReadBarrierOption>
+bool Verification::IsValidClass(mirror::Class* klass) const {
+ if (!IsValidHeapObjectAddress(klass)) {
+ return false;
+ }
+ return IsValidClassUnchecked<kReadBarrierOption>(klass);
+}
+
+template <ReadBarrierOption kReadBarrierOption>
+bool Verification::IsValidObject(mirror::Object* obj) const {
+ if (!IsValidHeapObjectAddress(obj)) {
+ return false;
+ }
+ mirror::Class* klass = obj->GetClass<kVerifyNone, kReadBarrierOption>();
+ return IsValidClass(klass);
+}
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_VERIFICATION_INL_H_
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 9e0b8a2ff1..5790755bbe 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "verification.h"
+#include "verification-inl.h"
#include <iomanip>
#include <sstream>
@@ -29,23 +29,16 @@ namespace art {
namespace gc {
std::string Verification::DumpRAMAroundAddress(uintptr_t addr, uintptr_t bytes) const {
- const uintptr_t dump_start = addr - bytes;
- const uintptr_t dump_end = addr + bytes;
+ uintptr_t* dump_start = reinterpret_cast<uintptr_t*>(addr - bytes);
+ uintptr_t* dump_end = reinterpret_cast<uintptr_t*>(addr + bytes);
std::ostringstream oss;
- if (dump_start < dump_end &&
- IsAddressInHeapSpace(reinterpret_cast<const void*>(dump_start)) &&
- IsAddressInHeapSpace(reinterpret_cast<const void*>(dump_end - 1))) {
- oss << " adjacent_ram=";
- for (uintptr_t p = dump_start; p < dump_end; ++p) {
- if (p == addr) {
- // Marker of where the address is.
- oss << "|";
- }
- uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
- oss << std::hex << std::setfill('0') << std::setw(2) << static_cast<uintptr_t>(*ptr);
+ oss << " adjacent_ram=";
+ for (const uintptr_t* p = dump_start; p < dump_end; ++p) {
+ if (p == reinterpret_cast<uintptr_t*>(addr)) {
+ // Marker of where the address is.
+ oss << "|";
}
- } else {
- oss << " <invalid address>";
+ oss << std::hex << std::setfill('0') << std::setw(sizeof(uintptr_t) * 2) << *p << " ";
}
return oss.str();
}
@@ -132,25 +125,6 @@ bool Verification::IsValidHeapObjectAddress(const void* addr, space::Space** out
return IsAligned<kObjectAlignment>(addr) && IsAddressInHeapSpace(addr, out_space);
}
-bool Verification::IsValidClass(const void* addr) const {
- if (!IsValidHeapObjectAddress(addr)) {
- return false;
- }
- mirror::Class* klass = reinterpret_cast<mirror::Class*>(const_cast<void*>(addr));
- mirror::Class* k1 = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
- if (!IsValidHeapObjectAddress(k1)) {
- return false;
- }
- // `k1` should be class class, take the class again to verify.
- // Note that this check may not be valid for the no image space since the class class might move
- // around from moving GC.
- mirror::Class* k2 = k1->GetClass<kVerifyNone, kWithoutReadBarrier>();
- if (!IsValidHeapObjectAddress(k2)) {
- return false;
- }
- return k1 == k2;
-}
-
using ObjectSet = std::set<mirror::Object*>;
using WorkQueue = std::deque<std::pair<mirror::Object*, std::string>>;
diff --git a/runtime/gc/verification.h b/runtime/gc/verification.h
index 6b456fd349..7a5d01a40a 100644
--- a/runtime/gc/verification.h
+++ b/runtime/gc/verification.h
@@ -19,6 +19,7 @@
#include "obj_ptr.h"
#include "offsets.h"
+#include "read_barrier_option.h"
namespace art {
@@ -50,7 +51,16 @@ class Verification {
bool fatal) const REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the klass is likely to be a valid mirror::Class.
- bool IsValidClass(const void* klass) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns true if the class is a valid mirror::Class or possibly spuriously.
+ template <ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
+ bool IsValidClassUnchecked(mirror::Class* klass) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if the klass is likely to be a valid mirror::Class.
+ template <ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
+ bool IsValidClass(mirror::Class* klass) const REQUIRES_SHARED(Locks::mutator_lock_);
+ // Return true if the obj is likely to be a valid obj with valid mirror::Class.
+ template <ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
+ bool IsValidObject(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Does not allow null, checks alignment.
bool IsValidHeapObjectAddress(const void* addr, space::Space** out_space = nullptr) const
diff --git a/runtime/handle.cc b/runtime/handle.cc
index af77e2362b..e9c91135f5 100644
--- a/runtime/handle.cc
+++ b/runtime/handle.cc
@@ -42,6 +42,7 @@
namespace art {
+// NOLINTBEGIN(bugprone-macro-parentheses)
#define MAKE_OBJECT_FOR_GDB(ROOT, NAME, MIRROR) \
template <> MIRROR* Handle<MIRROR>::GetFromGdb() { \
return Get(); \
@@ -53,5 +54,6 @@ namespace art {
CLASS_MIRROR_ROOT_LIST(MAKE_OBJECT_FOR_GDB)
#undef MAKE_OBJECT_FOR_GDB
+// NOLINTEND(bugprone-macro-parentheses)
} // namespace art
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 6ec98ff4d8..8735dcfc81 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -55,6 +55,9 @@
#include "thread_list.h"
namespace art {
+extern "C" NO_RETURN void artDeoptimize(Thread* self);
+extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self);
+
namespace instrumentation {
constexpr bool kVerboseInstrumentation = false;
@@ -104,89 +107,69 @@ class InstallStubsClassVisitor : public ClassVisitor {
Instrumentation* const instrumentation_;
};
-InstrumentationStackPopper::InstrumentationStackPopper(Thread* self)
- : self_(self),
- instrumentation_(Runtime::Current()->GetInstrumentation()),
- pop_until_(0u) {}
-
-InstrumentationStackPopper::~InstrumentationStackPopper() {
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
- self_->GetInstrumentationStack();
- for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until_;) {
- i = stack->erase(i);
- }
+Instrumentation::Instrumentation()
+ : current_force_deopt_id_(0),
+ instrumentation_stubs_installed_(false),
+ instrumentation_level_(InstrumentationLevel::kInstrumentNothing),
+ forced_interpret_only_(false),
+ have_method_entry_listeners_(false),
+ have_method_exit_listeners_(false),
+ have_method_unwind_listeners_(false),
+ have_dex_pc_listeners_(false),
+ have_field_read_listeners_(false),
+ have_field_write_listeners_(false),
+ have_exception_thrown_listeners_(false),
+ have_watched_frame_pop_listeners_(false),
+ have_branch_listeners_(false),
+ have_exception_handled_listeners_(false),
+ quick_alloc_entry_points_instrumentation_counter_(0),
+ alloc_entrypoints_instrumented_(false) {
}
-bool InstrumentationStackPopper::PopFramesTo(uintptr_t stack_pointer,
- MutableHandle<mirror::Throwable>& exception) {
- std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
- self_->GetInstrumentationStack();
- DCHECK(!self_->IsExceptionPending());
- if (!instrumentation_->HasMethodUnwindListeners()) {
- pop_until_ = stack_pointer;
+bool Instrumentation::ProcessMethodUnwindCallbacks(Thread* self,
+ std::queue<ArtMethod*>& methods,
+ MutableHandle<mirror::Throwable>& exception) {
+ DCHECK(!self->IsExceptionPending());
+ if (!HasMethodUnwindListeners()) {
return true;
}
if (kVerboseInstrumentation) {
LOG(INFO) << "Popping frames for exception " << exception->Dump();
}
// The instrumentation events expect the exception to be set.
- self_->SetException(exception.Get());
+ self->SetException(exception.Get());
bool new_exception_thrown = false;
- auto i = stack->upper_bound(pop_until_);
-
- // Now pop all frames until reaching stack_pointer, or a new exception is
- // thrown. Note that `stack_pointer` doesn't need to be a return PC address
- // (in fact the exception handling code passes the start of the frame where
- // the catch handler is).
- for (; i != stack->end() && i->first <= stack_pointer; i++) {
- const InstrumentationStackFrame& frame = i->second;
- ArtMethod* method = frame.method_;
- // Notify listeners of method unwind.
- // TODO: improve the dex_pc information here.
- uint32_t dex_pc = dex::kDexNoIndex;
+
+ // Process callbacks for all methods that would be unwound until a new exception is thrown.
+ while (!methods.empty()) {
+ ArtMethod* method = methods.front();
+ methods.pop();
if (kVerboseInstrumentation) {
LOG(INFO) << "Popping for unwind " << method->PrettyMethod();
}
- if (!method->IsRuntimeMethod() && !frame.interpreter_entry_) {
- instrumentation_->MethodUnwindEvent(self_, frame.this_object_, method, dex_pc);
- new_exception_thrown = self_->GetException() != exception.Get();
- if (new_exception_thrown) {
- pop_until_ = i->first;
- break;
- }
+
+ if (method->IsRuntimeMethod()) {
+ continue;
+ }
+
+ // Notify listeners of method unwind.
+ // TODO: improve the dex_pc information here.
+ uint32_t dex_pc = dex::kDexNoIndex;
+ MethodUnwindEvent(self, method, dex_pc);
+ new_exception_thrown = self->GetException() != exception.Get();
+ if (new_exception_thrown) {
+ break;
}
}
- if (!new_exception_thrown) {
- pop_until_ = stack_pointer;
- }
- exception.Assign(self_->GetException());
- self_->ClearException();
+
+ exception.Assign(self->GetException());
+ self->ClearException();
if (kVerboseInstrumentation && new_exception_thrown) {
LOG(INFO) << "Did partial pop of frames due to new exception";
}
return !new_exception_thrown;
}
-Instrumentation::Instrumentation()
- : current_force_deopt_id_(0),
- instrumentation_stubs_installed_(false),
- instrumentation_level_(InstrumentationLevel::kInstrumentNothing),
- forced_interpret_only_(false),
- have_method_entry_listeners_(false),
- have_method_exit_listeners_(false),
- have_method_unwind_listeners_(false),
- have_dex_pc_listeners_(false),
- have_field_read_listeners_(false),
- have_field_write_listeners_(false),
- have_exception_thrown_listeners_(false),
- have_watched_frame_pop_listeners_(false),
- have_branch_listeners_(false),
- have_exception_handled_listeners_(false),
- deoptimized_methods_lock_(new ReaderWriterMutex("deoptimized methods lock",
- kGenericBottomLock)),
- quick_alloc_entry_points_instrumentation_counter_(0),
- alloc_entrypoints_instrumented_(false) {
-}
void Instrumentation::InstallStubsForClass(ObjPtr<mirror::Class> klass) {
if (!klass->IsResolved()) {
@@ -206,6 +189,7 @@ static bool CanHandleInitializationCheck(const void* code) {
return class_linker->IsQuickResolutionStub(code) ||
class_linker->IsQuickToInterpreterBridge(code) ||
class_linker->IsQuickGenericJniStub(code) ||
+ (code == interpreter::GetNterpWithClinitEntryPoint()) ||
(code == GetQuickInstrumentationEntryPoint());
}
@@ -227,6 +211,45 @@ static bool IsProxyInit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;");
}
+// Returns true if we need entry exit stub to call entry hooks. JITed code
+// directly call entry / exit hooks and don't need the stub.
+static bool CodeNeedsEntryExitStub(const void* entry_point, ArtMethod* method)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Proxy.init should never have entry/exit stubs.
+ if (IsProxyInit(method)) {
+ return false;
+ }
+
+ // In some tests runtime isn't setup fully and hence the entry points could
+ // be nullptr.
+ if (entry_point == nullptr) {
+ return true;
+ }
+
+ // Code running in the interpreter doesn't need entry/exit stubs.
+ if (Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(entry_point)) {
+ return false;
+ }
+
+ // When jiting code for debuggable runtimes / instrumentation is active we generate the code to
+ // call method entry / exit hooks when required. Hence it is not required to update to
+ // instrumentation entry point for JITed code in debuggable mode.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->GetCodeCache()->ContainsPc(entry_point)) {
+ // If JITed code was compiled with instrumentation support we don't need entry / exit stub.
+ OatQuickMethodHeader* header = OatQuickMethodHeader::FromEntryPoint(entry_point);
+ return !CodeInfo::IsDebuggable(header->GetOptimizedCodeInfoPtr());
+ }
+
+ // GenericJni trampoline can handle entry / exit hooks in debuggable runtimes.
+ if (Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(entry_point) &&
+ Runtime::Current()->IsJavaDebuggable()) {
+ return false;
+ }
+
+ return true;
+}
+
static void UpdateEntryPoints(ArtMethod* method, const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
@@ -244,6 +267,11 @@ static void UpdateEntryPoints(ArtMethod* method, const void* quick_code)
if (IsProxyInit(method)) {
CHECK_NE(quick_code, GetQuickInstrumentationEntryPoint());
}
+ const Instrumentation* instr = Runtime::Current()->GetInstrumentation();
+ if (instr->EntryExitStubsInstalled()) {
+ DCHECK(quick_code == GetQuickInstrumentationEntryPoint() ||
+ !CodeNeedsEntryExitStub(quick_code, method));
+ }
}
// If the method is from a boot image, don't dirty it if the entrypoint
// doesn't change.
@@ -252,64 +280,22 @@ static void UpdateEntryPoints(ArtMethod* method, const void* quick_code)
}
}
-bool Instrumentation::CodeNeedsEntryExitStub(const void* code, ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Proxy.init should never have entry/exit stubs.
- if (IsProxyInit(method)) {
- return false;
- }
-
- // In some tests runtime isn't setup fully and hence the entry points could
- // be nullptr.
- if (code == nullptr) {
- return true;
- }
-
- // Code running in the interpreter doesn't need entry/exit stubs.
- if (Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(code)) {
- return false;
- }
-
- // When jiting code for debuggable apps we generate the code to call method
- // entry / exit hooks when required. Hence it is not required to update
- // to instrumentation entry point for JITed code in debuggable mode.
- if (!Runtime::Current()->IsJavaDebuggable()) {
- return true;
- }
-
- // Native functions can have JITed entry points but we don't include support
- // for calling entry / exit hooks directly from the JITed code for native
- // functions. So we still have to install entry exit stubs for such cases.
- if (method->IsNative()) {
- return true;
- }
-
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
- return false;
- }
- return true;
+bool Instrumentation::NeedsDexPcEvents(ArtMethod* method, Thread* thread) {
+ return (InterpretOnly(method) || thread->IsForceInterpreter()) && HasDexPcListeners();
}
bool Instrumentation::InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
return false;
}
- return InterpretOnly() ||
- IsDeoptimized(method) ||
- Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method);
+ return InterpretOnly() || IsDeoptimized(method);
}
-static bool CanUseAotCode(ArtMethod* method, const void* quick_code)
+static bool CanUseAotCode(const void* quick_code)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (quick_code == nullptr) {
return false;
}
- if (method->IsNative()) {
- // AOT code for native methods can always be used.
- return true;
- }
-
Runtime* runtime = Runtime::Current();
// For simplicity, we never use AOT code for debuggable.
if (runtime->IsJavaDebuggable()) {
@@ -345,7 +331,7 @@ static const void* GetOptimizedCodeFor(ArtMethod* method) REQUIRES_SHARED(Locks:
// In debuggable mode, we can only use AOT code for native methods.
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const void* aot_code = method->GetOatMethodQuickCode(class_linker->GetImagePointerSize());
- if (CanUseAotCode(method, aot_code)) {
+ if (CanUseAotCode(aot_code)) {
return aot_code;
}
@@ -396,7 +382,12 @@ void Instrumentation::InitializeMethodsCode(ArtMethod* method, const void* aot_c
// stub only if we have compiled code or we can execute nterp, and the method needs a class
// initialization check.
if (aot_code != nullptr || method->IsNative() || CanUseNterp(method)) {
- UpdateEntryPoints(method, GetQuickResolutionStub());
+ if (kIsDebugBuild && CanUseNterp(method)) {
+ // Adds some test coverage for the nterp clinit entrypoint.
+ UpdateEntryPoints(method, interpreter::GetNterpWithClinitEntryPoint());
+ } else {
+ UpdateEntryPoints(method, GetQuickResolutionStub());
+ }
} else {
UpdateEntryPoints(method, GetQuickToInterpreterBridge());
}
@@ -404,7 +395,7 @@ void Instrumentation::InitializeMethodsCode(ArtMethod* method, const void* aot_c
}
// Use the provided AOT code if possible.
- if (CanUseAotCode(method, aot_code)) {
+ if (CanUseAotCode(aot_code)) {
UpdateEntryPoints(method, aot_code);
return;
}
@@ -482,17 +473,26 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
instrumentation_exit_pc_(instrumentation_exit_pc),
reached_existing_instrumentation_frames_(false),
force_deopt_id_(force_deopt_id),
- deopt_all_frames_(deopt_all_frames) {}
+ deopt_all_frames_(deopt_all_frames),
+ runtime_methods_need_deopt_check_(false) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
- if (m == nullptr) {
+ if (m == nullptr || m->IsRuntimeMethod()) {
if (kVerboseInstrumentation) {
- LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
+ LOG(INFO) << " Skipping upcall / runtime method. Frame " << GetFrameId();
}
- return true; // Ignore upcalls.
+ return true; // Ignore upcalls and runtime methods.
}
if (GetCurrentQuickFrame() == nullptr) {
+ // Since we are updating the instrumentation related information we have to recalculate
+ // NeedsDexPcEvents. For example, when a new method or thread is deoptimized / interpreter
+ // stubs are installed the NeedsDexPcEvents could change for the shadow frames on the stack.
+ // If we don't update it here we would miss reporting dex pc events which is incorrect.
+ ShadowFrame* shadow_frame = GetCurrentShadowFrame();
+ DCHECK(shadow_frame != nullptr);
+ shadow_frame->SetNotifyDexPcMoveEvents(
+ Runtime::Current()->GetInstrumentation()->NeedsDexPcEvents(GetMethod(), GetThread()));
if (kVerboseInstrumentation) {
LOG(INFO) << "Pushing shadow frame method " << m->PrettyMethod();
}
@@ -507,11 +507,6 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
auto it = instrumentation_stack_->find(GetReturnPcAddr());
CHECK(it != instrumentation_stack_->end());
const InstrumentationStackFrame& frame = it->second;
- if (m->IsRuntimeMethod()) {
- if (frame.interpreter_entry_) {
- return true;
- }
- }
// We've reached a frame which has already been installed with instrumentation exit stub.
// We should have already installed instrumentation or be interpreter on previous frames.
@@ -529,17 +524,24 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
LOG(INFO) << "Ignoring already instrumented " << frame.Dump();
}
} else {
+ if (m->IsNative() && Runtime::Current()->IsJavaDebuggable()) {
+ // Native methods in debuggable runtimes don't use instrumentation stubs.
+ return true;
+ }
+
// If it is a JITed frame then just set the deopt bit if required
// otherwise continue
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header != nullptr && method_header->HasShouldDeoptimizeFlag()) {
if (deopt_all_frames_) {
+ runtime_methods_need_deopt_check_ = true;
SetShouldDeoptimizeFlag(DeoptimizeFlagValue::kDebug);
}
return true;
}
CHECK_NE(return_pc, 0U);
- if (UNLIKELY(reached_existing_instrumentation_frames_ && !m->IsRuntimeMethod())) {
+ DCHECK(!m->IsRuntimeMethod());
+ if (UNLIKELY(reached_existing_instrumentation_frames_)) {
// We already saw an existing instrumentation frame so this should be a runtime-method
// inserted by the interpreter or runtime.
std::string thread_name;
@@ -550,21 +552,9 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
<< " return_pc is " << std::hex << return_pc;
UNREACHABLE();
}
- if (m->IsRuntimeMethod()) {
- size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
- ArtMethod** caller_frame = reinterpret_cast<ArtMethod**>(
- reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()) + frame_size);
- if (*caller_frame != nullptr && (*caller_frame)->IsNative()) {
- // Do not install instrumentation exit on return to JNI stubs.
- return true;
- }
- }
+
InstrumentationStackFrame instrumentation_frame(
- m->IsRuntimeMethod() ? nullptr : GetThisObject().Ptr(),
- m,
- return_pc,
- false,
- force_deopt_id_);
+ GetThisObject().Ptr(), m, return_pc, false, force_deopt_id_);
if (kVerboseInstrumentation) {
LOG(INFO) << "Pushing frame " << instrumentation_frame.Dump();
}
@@ -584,6 +574,7 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
bool reached_existing_instrumentation_frames_;
uint64_t force_deopt_id_;
bool deopt_all_frames_;
+ bool runtime_methods_need_deopt_check_;
};
if (kVerboseInstrumentation) {
std::string thread_name;
@@ -601,6 +592,10 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
deopt_all_frames);
visitor.WalkStack(true);
+ if (visitor.runtime_methods_need_deopt_check_) {
+ thread->SetDeoptCheckRequired(true);
+ }
+
if (instrumentation->ShouldNotifyMethodEnterExitEvents()) {
// Create method enter events for all methods currently on the thread's stack. We only do this
// if we haven't already processed the method enter events.
@@ -611,6 +606,34 @@ void InstrumentationInstallStack(Thread* thread, void* arg, bool deopt_all_frame
thread->VerifyStack();
}
+void UpdateNeedsDexPcEventsOnStack(Thread* thread) REQUIRES(Locks::mutator_lock_) {
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+
+ struct InstallStackVisitor final : public StackVisitor {
+ InstallStackVisitor(Thread* thread_in, Context* context)
+ : StackVisitor(thread_in, context, kInstrumentationStackWalk) {}
+
+ bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+ ShadowFrame* shadow_frame = GetCurrentShadowFrame();
+ if (shadow_frame != nullptr) {
+ shadow_frame->SetNotifyDexPcMoveEvents(
+ Runtime::Current()->GetInstrumentation()->NeedsDexPcEvents(GetMethod(), GetThread()));
+ }
+ return true;
+ }
+ };
+
+ if (kVerboseInstrumentation) {
+ std::string thread_name;
+ thread->GetThreadName(thread_name);
+ LOG(INFO) << "Updating DexPcMoveEvents on shadow frames on stack " << thread_name;
+ }
+
+ std::unique_ptr<Context> context(Context::Create());
+ InstallStackVisitor visitor(thread, context.get());
+ visitor.WalkStack(true);
+}
+
void Instrumentation::InstrumentThreadStack(Thread* thread, bool force_deopt) {
instrumentation_stubs_installed_ = true;
InstrumentationInstallStack(thread, this, force_deopt);
@@ -622,14 +645,16 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
struct RestoreStackVisitor final : public StackVisitor {
- RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc,
+ RestoreStackVisitor(Thread* thread_in,
+ uintptr_t instrumentation_exit_pc,
Instrumentation* instrumentation)
: StackVisitor(thread_in, nullptr, kInstrumentationStackWalk),
thread_(thread_in),
instrumentation_exit_pc_(instrumentation_exit_pc),
instrumentation_(instrumentation),
instrumentation_stack_(thread_in->GetInstrumentationStack()),
- frames_removed_(0) {}
+ frames_removed_(0),
+ runtime_methods_need_deopt_check_(false) {}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
@@ -647,7 +672,13 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
if (kVerboseInstrumentation) {
LOG(INFO) << " Skipping upcall. Frame " << GetFrameId();
}
- return true; // Ignore upcalls.
+ return true; // Ignore upcalls and runtime methods.
+ }
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header != nullptr && method_header->HasShouldDeoptimizeFlag()) {
+ if (IsShouldDeoptimizeFlagForDebugSet()) {
+ runtime_methods_need_deopt_check_ = true;
+ }
}
auto it = instrumentation_stack_->find(GetReturnPcAddr());
if (it != instrumentation_stack_->end()) {
@@ -684,6 +715,7 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
Instrumentation* const instrumentation_;
std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* const instrumentation_stack_;
size_t frames_removed_;
+ bool runtime_methods_need_deopt_check_;
};
if (kVerboseInstrumentation) {
std::string thread_name;
@@ -698,6 +730,10 @@ static void InstrumentationRestoreStack(Thread* thread, void* arg)
reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc());
RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
visitor.WalkStack(true);
+ DCHECK_IMPLIES(visitor.runtime_methods_need_deopt_check_, thread->IsDeoptCheckRequired());
+ if (!visitor.runtime_methods_need_deopt_check_) {
+ thread->SetDeoptCheckRequired(false);
+ }
CHECK_EQ(visitor.frames_removed_, stack->size());
stack->clear();
}
@@ -791,6 +827,12 @@ void Instrumentation::AddListener(InstrumentationListener* listener, uint32_t ev
exception_handled_listeners_,
listener,
&have_exception_handled_listeners_);
+ if (HasEvent(kDexPcMoved, events)) {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) {
+ UpdateNeedsDexPcEventsOnStack(thread);
+ }
+ }
}
static void PotentiallyRemoveListenerFrom(Instrumentation::InstrumentationEvent event,
@@ -872,6 +914,12 @@ void Instrumentation::RemoveListener(InstrumentationListener* listener, uint32_t
exception_handled_listeners_,
listener,
&have_exception_handled_listeners_);
+ if (HasEvent(kDexPcMoved, events)) {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) {
+ UpdateNeedsDexPcEventsOnStack(thread);
+ }
+ }
}
Instrumentation::InstrumentationLevel Instrumentation::GetCurrentInstrumentationLevel() const {
@@ -1046,6 +1094,8 @@ std::string Instrumentation::EntryPointString(const void* code) {
return "obsolete";
} else if (code == interpreter::GetNterpEntryPoint()) {
return "nterp";
+ } else if (code == interpreter::GetNterpWithClinitEntryPoint()) {
+ return "nterp with clinit";
} else if (class_linker->IsQuickGenericJniStub(code)) {
return "generic jni";
} else if (Runtime::Current()->GetOatFileManager().ContainsPc(code)) {
@@ -1119,14 +1169,6 @@ bool Instrumentation::IsDeoptimizedMethod(ArtMethod* method) {
return deoptimized_methods_.find(method) != deoptimized_methods_.end();
}
-ArtMethod* Instrumentation::BeginDeoptimizedMethod() {
- if (deoptimized_methods_.empty()) {
- // Empty.
- return nullptr;
- }
- return *deoptimized_methods_.begin();
-}
-
bool Instrumentation::RemoveDeoptimizedMethod(ArtMethod* method) {
auto it = deoptimized_methods_.find(method);
if (it == deoptimized_methods_.end()) {
@@ -1136,10 +1178,6 @@ bool Instrumentation::RemoveDeoptimizedMethod(ArtMethod* method) {
return true;
}
-bool Instrumentation::IsDeoptimizedMethodsEmptyLocked() const {
- return deoptimized_methods_.empty();
-}
-
void Instrumentation::Deoptimize(ArtMethod* method) {
CHECK(!method->IsNative());
CHECK(!method->IsProxyMethod());
@@ -1147,7 +1185,7 @@ void Instrumentation::Deoptimize(ArtMethod* method) {
Thread* self = Thread::Current();
{
- WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
CHECK(has_not_been_deoptimized) << "Method " << ArtMethod::PrettyMethod(method)
<< " is already deoptimized";
@@ -1173,9 +1211,8 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
CHECK(!method->IsProxyMethod());
CHECK(method->IsInvokable());
- Thread* self = Thread::Current();
{
- WriterMutexLock mu(self, *GetDeoptimizedMethodsLock());
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
bool found_and_erased = RemoveDeoptimizedMethod(method);
CHECK(found_and_erased) << "Method " << ArtMethod::PrettyMethod(method)
<< " is not deoptimized";
@@ -1192,7 +1229,11 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
UpdateEntryPoints(method, GetQuickToInterpreterBridge());
} else if (NeedsClinitCheckBeforeCall(method) &&
!method->GetDeclaringClass()->IsVisiblyInitialized()) {
- UpdateEntryPoints(method, GetQuickResolutionStub());
+ if (EntryExitStubsInstalled()) {
+ UpdateEntryPoints(method, GetQuickInstrumentationEntryPoint());
+ } else {
+ UpdateEntryPoints(method, GetQuickResolutionStub());
+ }
} else {
UpdateEntryPoints(method, GetMaybeInstrumentedCodeForInvoke(method));
}
@@ -1204,29 +1245,26 @@ void Instrumentation::Undeoptimize(ArtMethod* method) {
}
bool Instrumentation::IsDeoptimizedMethodsEmpty() const {
- ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
return deoptimized_methods_.empty();
}
bool Instrumentation::IsDeoptimized(ArtMethod* method) {
DCHECK(method != nullptr);
- ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
return IsDeoptimizedMethod(method);
}
-
void Instrumentation::DisableDeoptimization(const char* key) {
// Remove any instrumentation support added for deoptimization.
ConfigureStubs(key, InstrumentationLevel::kInstrumentNothing);
+ Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
// Undeoptimized selected methods.
while (true) {
ArtMethod* method;
{
- ReaderMutexLock mu(Thread::Current(), *GetDeoptimizedMethodsLock());
- if (IsDeoptimizedMethodsEmptyLocked()) {
+ if (deoptimized_methods_.empty()) {
break;
}
- method = BeginDeoptimizedMethod();
+ method = *deoptimized_methods_.begin();
CHECK(method != nullptr);
}
Undeoptimize(method);
@@ -1270,10 +1308,14 @@ const void* Instrumentation::GetCodeForInvoke(ArtMethod* method) {
DCHECK(!method->IsProxyMethod()) << method->PrettyMethod();
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
const void* code = method->GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
- // If we don't have the instrumentation, the resolution stub, or the
- // interpreter as entrypoint, just return the current entrypoint, assuming
- // it's the most optimized.
+ // If we don't have the instrumentation, the resolution stub, the
+ // interpreter, or the nterp with clinit as entrypoint, just return the current entrypoint,
+ // assuming it's the most optimized.
+ // We don't want to return the nterp with clinit entrypoint as it calls the
+ // resolution stub, and the resolution stub will call `GetCodeForInvoke` to know the actual
+ // code to invoke.
if (code != GetQuickInstrumentationEntryPoint() &&
+ code != interpreter::GetNterpWithClinitEntryPoint() &&
!class_linker->IsQuickResolutionStub(code) &&
!class_linker->IsQuickToInterpreterBridge(code)) {
return code;
@@ -1345,16 +1387,12 @@ template<> void Instrumentation::MethodExitEventImpl(Thread* thread,
}
void Instrumentation::MethodUnwindEvent(Thread* thread,
- ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const {
if (HasMethodUnwindListeners()) {
- Thread* self = Thread::Current();
- StackHandleScope<1> hs(self);
- Handle<mirror::Object> thiz(hs.NewHandle(this_object));
for (InstrumentationListener* listener : method_unwind_listeners_) {
if (listener != nullptr) {
- listener->MethodUnwind(thread, thiz, method, dex_pc);
+ listener->MethodUnwind(thread, method, dex_pc);
}
}
}
@@ -1489,7 +1527,7 @@ void Instrumentation::PushInstrumentationStackFrame(Thread* self,
if (!interpreter_entry) {
MethodEnterEvent(self, method);
if (self->IsExceptionPending()) {
- MethodUnwindEvent(self, h_this.Get(), method, 0);
+ MethodUnwindEvent(self, method, 0);
return;
}
}
@@ -1518,83 +1556,18 @@ DeoptimizationMethodType Instrumentation::GetDeoptimizationMethodType(ArtMethod*
return DeoptimizationMethodType::kDefault;
}
-// Try to get the shorty of a runtime method if it's an invocation stub.
-static char GetRuntimeMethodShorty(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
- char shorty = 'V';
- StackVisitor::WalkStack(
- [&shorty](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
- ArtMethod* m = stack_visitor->GetMethod();
- if (m == nullptr || m->IsRuntimeMethod()) {
- return true;
- }
- // The first Java method.
- if (m->IsNative()) {
- // Use JNI method's shorty for the jni stub.
- shorty = m->GetShorty()[0];
- } else if (m->IsProxyMethod()) {
- // Proxy method just invokes its proxied method via
- // art_quick_proxy_invoke_handler.
- shorty = m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty()[0];
- } else {
- const Instruction& instr = m->DexInstructions().InstructionAt(stack_visitor->GetDexPc());
- if (instr.IsInvoke()) {
- uint16_t method_index = static_cast<uint16_t>(instr.VRegB());
- const DexFile* dex_file = m->GetDexFile();
- if (interpreter::IsStringInit(dex_file, method_index)) {
- // Invoking string init constructor is turned into invoking
- // StringFactory.newStringFromChars() which returns a string.
- shorty = 'L';
- } else {
- shorty = dex_file->GetMethodShorty(method_index)[0];
- }
-
- } else {
- // It could be that a non-invoke opcode invokes a stub, which in turn
- // invokes Java code. In such cases, we should never expect a return
- // value from the stub.
- }
- }
- // Stop stack walking since we've seen a Java frame.
- return false;
- },
- thread,
- /* context= */ nullptr,
- art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
- return shorty;
-}
-
-JValue Instrumentation::GetReturnValue(
- Thread* self, ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result) {
+JValue Instrumentation::GetReturnValue(ArtMethod* method,
+ bool* is_ref,
+ uint64_t* gpr_result,
+ uint64_t* fpr_result) {
uint32_t length;
const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
- char return_shorty;
// Runtime method does not call into MethodExitEvent() so there should not be
// suspension point below.
ScopedAssertNoThreadSuspension ants(__FUNCTION__, method->IsRuntimeMethod());
- if (method->IsRuntimeMethod()) {
- Runtime* runtime = Runtime::Current();
- if (method != runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit) &&
- method != runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck)) {
- // If the caller is at an invocation point and the runtime method is not
- // for clinit, we need to pass return results to the caller.
- // We need the correct shorty to decide whether we need to pass the return
- // result for deoptimization below.
- return_shorty = GetRuntimeMethodShorty(self);
- } else {
- // Some runtime methods such as allocations, unresolved field getters, etc.
- // have return value. We don't need to set return_value since MethodExitEvent()
- // below isn't called for runtime methods. Deoptimization doesn't need the
- // value either since the dex instruction will be re-executed by the
- // interpreter, except these two cases:
- // (1) For an invoke, which is handled above to get the correct shorty.
- // (2) For MONITOR_ENTER/EXIT, which cannot be re-executed since it's not
- // idempotent. However there is no return value for it anyway.
- return_shorty = 'V';
- }
- } else {
- return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
- }
+ DCHECK(!method->IsRuntimeMethod());
+ char return_shorty = method->GetInterfaceMethodIfProxy(pointer_size)->GetShorty(&length)[0];
*is_ref = return_shorty == '[' || return_shorty == 'L';
JValue return_value;
@@ -1608,27 +1581,135 @@ JValue Instrumentation::GetReturnValue(
return return_value;
}
-bool Instrumentation::ShouldDeoptimizeMethod(Thread* self, const NthCallerVisitor& visitor) {
- bool should_deoptimize_frame = false;
- const OatQuickMethodHeader* header = visitor.GetCurrentOatQuickMethodHeader();
- if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
- uint8_t should_deopt_flag = visitor.GetShouldDeoptimizeFlag();
- // DeoptimizeFlag could be set for debugging or for CHA invalidations.
- // Deoptimize here only if it was requested for debugging. CHA
- // invalidations are handled in the JITed code.
- if ((should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
- should_deoptimize_frame = true;
- }
+bool Instrumentation::PushDeoptContextIfNeeded(Thread* self,
+ DeoptimizationMethodType deopt_type,
+ bool is_ref,
+ const JValue& return_value)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (self->IsExceptionPending()) {
+ return false;
+ }
+
+ ArtMethod** sp = self->GetManagedStack()->GetTopQuickFrame();
+ DCHECK(sp != nullptr && (*sp)->IsRuntimeMethod());
+ if (!ShouldDeoptimizeCaller(self, sp)) {
+ return false;
+ }
+
+ // TODO(mythria): The current deopt behaviour is we just re-execute the
+ // alloc instruction so we don't need the return value. For instrumentation
+ // related deopts, we actually don't need to and can use the result we got
+ // here. Since this is a debug only feature it is not very important but
+ // consider reusing the result in future.
+ self->PushDeoptimizationContext(
+ return_value, is_ref, nullptr, /* from_code= */ false, deopt_type);
+ self->SetException(Thread::GetDeoptimizationException());
+ return true;
+}
+
+void Instrumentation::DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue return_value,
+ bool is_reference) {
+ if (self->IsAsyncExceptionPending() || ShouldDeoptimizeCaller(self, sp)) {
+ self->PushDeoptimizationContext(return_value,
+ is_reference,
+ nullptr,
+ /* from_code= */ false,
+ type);
+ artDeoptimize(self);
}
- return (visitor.caller != nullptr) &&
- (InterpreterStubsInstalled() || IsDeoptimized(visitor.caller) ||
+}
+
+bool Instrumentation::NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method) {
+ return (method != nullptr) &&
+ (InterpreterStubsInstalled() ||
+ IsDeoptimized(method) ||
self->IsForceInterpreter() ||
// NB Since structurally obsolete compiled methods might have the offsets of
// methods/fields compiled in we need to go back to interpreter whenever we hit
// them.
- visitor.caller->GetDeclaringClass()->IsObsoleteObject() ||
- Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller) ||
- should_deoptimize_frame);
+ method->GetDeclaringClass()->IsObsoleteObject() ||
+ Dbg::IsForcedInterpreterNeededForUpcall(self, method));
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) {
+ // When exit stubs aren't installed we don't need to check for any instrumentation related
+ // deoptimizations.
+ // TODO(mythria): Once we remove instrumentation stubs rename AreExitStubsInstalled. This is
+ // used to check if any instrumentation related work needs to be done. For ex: calling method
+ // entry / exit hooks, checking for instrumentation related deopts in suspend points
+ if (!AreExitStubsInstalled()) {
+ return false;
+ }
+
+ ArtMethod* runtime_method = *sp;
+ DCHECK(runtime_method->IsRuntimeMethod());
+ QuickMethodFrameInfo frame_info = Runtime::Current()->GetRuntimeMethodFrameInfo(runtime_method);
+
+ uintptr_t caller_sp = reinterpret_cast<uintptr_t>(sp) + frame_info.FrameSizeInBytes();
+ ArtMethod* caller = *(reinterpret_cast<ArtMethod**>(caller_sp));
+ uintptr_t caller_pc_addr = reinterpret_cast<uintptr_t>(sp) + frame_info.GetReturnPcOffset();
+ uintptr_t caller_pc = *reinterpret_cast<uintptr_t*>(caller_pc_addr);
+
+ return ShouldDeoptimizeCaller(self, caller, caller_pc, caller_sp);
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor) {
+ uintptr_t caller_sp = reinterpret_cast<uintptr_t>(visitor.GetCurrentQuickFrame());
+ // When the caller isn't executing quick code there is no need to deoptimize.
+ if (visitor.GetCurrentOatQuickMethodHeader() == nullptr) {
+ return false;
+ }
+ return ShouldDeoptimizeCaller(self, visitor.GetOuterMethod(), visitor.caller_pc, caller_sp);
+}
+
+bool Instrumentation::ShouldDeoptimizeCaller(Thread* self,
+ ArtMethod* caller,
+ uintptr_t caller_pc,
+ uintptr_t caller_sp) {
+ if (caller == nullptr ||
+ caller->IsNative() ||
+ caller_pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ // If caller_pc is QuickInstrumentationExit then deoptimization will be handled by the
+ // instrumentation exit trampoline so we don't need to handle deoptimizations here.
+ // We need to check for a deoptimization here because when a redefinition happens it is
+ // not safe to use any compiled code because the field offsets might change. For native
+ // methods, we don't embed any field offsets so no need to check for a deoptimization.
+ // If the caller is null we don't need to do anything. This can happen when the caller
+ // is being interpreted by the switch interpreter (when called from
+ // artQuickToInterpreterBridge) / during shutdown / early startup.
+ return false;
+ }
+
+ bool needs_deopt = NeedsSlowInterpreterForMethod(self, caller);
+
+ // Non java debuggable apps don't support redefinition and hence it isn't required to check if
+ // frame needs to be deoptimized. We also want to avoid getting method header when we need a
+ // deopt anyway.
+ if (Runtime::Current()->IsJavaDebuggable() && !needs_deopt) {
+ const OatQuickMethodHeader* header = caller->GetOatQuickMethodHeader(caller_pc);
+ if (header != nullptr && header->HasShouldDeoptimizeFlag()) {
+ DCHECK(header->IsOptimized());
+ uint8_t* should_deopt_flag_addr =
+ reinterpret_cast<uint8_t*>(caller_sp) + header->GetShouldDeoptimizeFlagOffset();
+ if ((*should_deopt_flag_addr & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0) {
+ needs_deopt = true;
+ }
+ }
+ }
+
+ if (needs_deopt) {
+ if (!Runtime::Current()->IsAsyncDeoptimizeable(caller, caller_pc)) {
+ LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
+ << caller->PrettyMethod();
+ return false;
+ }
+ return true;
+ }
+
+ return false;
}
TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
@@ -1653,19 +1734,19 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
self->VerifyStack();
ArtMethod* method = instrumentation_frame.method_;
+ DCHECK(!method->IsRuntimeMethod());
bool is_ref;
- JValue return_value = GetReturnValue(self, method, &is_ref, gpr_result, fpr_result);
+ JValue return_value = GetReturnValue(method, &is_ref, gpr_result, fpr_result);
StackHandleScope<1> hs(self);
MutableHandle<mirror::Object> res(hs.NewHandle<mirror::Object>(nullptr));
if (is_ref) {
// Take a handle to the return value so we won't lose it if we suspend.
- // FIXME: The `is_ref` is often guessed wrong, so even object aligment
- // assertion would fail for some tests. See b/204766614 .
- // DCHECK_ALIGNED(return_value.GetL(), kObjectAlignment);
+ DCHECK_ALIGNED(return_value.GetL(), kObjectAlignment);
res.Assign(return_value.GetL());
}
- if (!method->IsRuntimeMethod() && !instrumentation_frame.interpreter_entry_) {
+ if (!instrumentation_frame.interpreter_entry_) {
+ DCHECK(!method->IsRuntimeMethod());
// Note that sending the event may change the contents of *return_pc_addr.
MethodExitEvent(self, instrumentation_frame.method_, OptionalFrame{}, return_value);
}
@@ -1677,57 +1758,61 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
// Check if we forced all threads to deoptimize in the time between this frame being created and
// now.
bool should_deoptimize_frame = instrumentation_frame.force_deopt_id_ != current_force_deopt_id_;
- bool deoptimize = ShouldDeoptimizeMethod(self, visitor) || should_deoptimize_frame;
+ bool deoptimize = ShouldDeoptimizeCaller(self, visitor) || should_deoptimize_frame;
if (is_ref) {
// Restore the return value if it's a reference since it might have moved.
*reinterpret_cast<mirror::Object**>(gpr_result) = res.Get();
}
- if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) {
- if (kVerboseInstrumentation) {
- LOG(INFO) << "Deoptimizing "
- << visitor.caller->PrettyMethod()
- << " by returning from "
- << method->PrettyMethod()
- << " with result "
- << std::hex << return_value.GetJ() << std::dec
- << " in "
- << *self;
- }
- DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
- self->PushDeoptimizationContext(return_value,
- is_ref,
- /* exception= */ nullptr,
- /* from_code= */ false,
- deopt_method_type);
- return GetTwoWordSuccessValue(*return_pc_addr,
- reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
- } else {
- if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) {
- VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
- << " at PC " << reinterpret_cast<void*>(*return_pc_addr);
- }
- if (kVerboseInstrumentation) {
- LOG(INFO) << "Returning from " << method->PrettyMethod()
- << " to PC " << reinterpret_cast<void*>(*return_pc_addr);
+
+ if (deoptimize) {
+ // NthCallerVisitor also takes inlined frames into consideration, so visitor.caller points to
+ // the inlined function. We need the actual method corresponding to the return_pc_addr to check
+ // if the method is deoptimizeable. So fetch the outer method.
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), *return_pc_addr)) {
+ if (kVerboseInstrumentation) {
+ LOG(INFO) << "Deoptimizing "
+ << visitor.caller->PrettyMethod()
+ << " by returning from "
+ << method->PrettyMethod()
+ << " with result "
+ << std::hex << return_value.GetJ() << std::dec
+ << " in "
+ << *self;
+ }
+ DeoptimizationMethodType deopt_method_type = GetDeoptimizationMethodType(method);
+ self->PushDeoptimizationContext(return_value,
+ is_ref,
+ /* exception= */ nullptr,
+ /* from_code= */ false,
+ deopt_method_type);
+ return GetTwoWordSuccessValue(
+ *return_pc_addr, reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
+ } else {
+ VLOG(deopt) << "Got a deoptimization request on un-deoptimizable "
+ << visitor.caller->PrettyMethod() << " at PC "
+ << reinterpret_cast<void*>(*return_pc_addr);
}
- return GetTwoWordSuccessValue(0, *return_pc_addr);
}
+
+ if (kVerboseInstrumentation) {
+ LOG(INFO) << "Returning from " << method->PrettyMethod() << " to PC "
+ << reinterpret_cast<void*>(*return_pc_addr);
+ }
+ return GetTwoWordSuccessValue(0, *return_pc_addr);
}
-uintptr_t Instrumentation::PopFramesForDeoptimization(Thread* self, uintptr_t pop_until) const {
+uintptr_t Instrumentation::PopInstrumentationStackUntil(Thread* self, uintptr_t pop_until) const {
std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
self->GetInstrumentationStack();
// Pop all instrumentation frames below `pop_until`.
uintptr_t return_pc = 0u;
for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until;) {
- auto e = i;
- ++i;
if (kVerboseInstrumentation) {
- LOG(INFO) << "Popping for deoptimization " << e->second.method_->PrettyMethod();
+ LOG(INFO) << "Popping for deoptimization " << i->second.method_->PrettyMethod();
}
- return_pc = e->second.return_pc_;
- stack->erase(e);
+ return_pc = i->second.return_pc_;
+ i = stack->erase(i);
}
return return_pc;
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index c811935e9d..23c433e66b 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -23,6 +23,7 @@
#include <list>
#include <memory>
#include <optional>
+#include <queue>
#include <unordered_set>
#include "arch/instruction_set.h"
@@ -31,6 +32,7 @@
#include "base/macros.h"
#include "base/safe_map.h"
#include "gc_root.h"
+#include "jvalue.h"
#include "offsets.h"
namespace art {
@@ -92,7 +94,6 @@ struct InstrumentationListener {
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) = 0;
@@ -221,8 +222,7 @@ class Instrumentation {
// Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
void DisableDeoptimization(const char* key)
- REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!GetDeoptimizedMethodsLock());
+ REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
bool AreAllMethodsDeoptimized() const {
return InterpreterStubsInstalled();
@@ -233,52 +233,44 @@ class Instrumentation {
void DeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !GetDeoptimizedMethodsLock());
+ !Locks::classlinker_classes_lock_);
// Executes everything with compiled code (or interpreter if there is no code). May visit class
// linker classes through ConfigureStubs.
void UndeoptimizeEverything(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !GetDeoptimizedMethodsLock());
+ !Locks::classlinker_classes_lock_);
// Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
// method (except a class initializer) set to the resolution trampoline will be deoptimized only
// once its declaring class is initialized.
- void Deoptimize(ArtMethod* method)
- REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
+ void Deoptimize(ArtMethod* method) REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
// Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
// (except a class initializer) set to the resolution trampoline will be updated only once its
// declaring class is initialized.
- void Undeoptimize(ArtMethod* method)
- REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !GetDeoptimizedMethodsLock());
+ void Undeoptimize(ArtMethod* method) REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
- bool IsDeoptimized(ArtMethod* method)
- REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDeoptimized(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
// Indicates if any method needs to be deoptimized. This is used to avoid walking the stack to
// determine if a deoptimization is required.
- bool IsDeoptimizedMethodsEmpty() const
- REQUIRES(!GetDeoptimizedMethodsLock()) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsDeoptimizedMethodsEmpty() const REQUIRES_SHARED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
void EnableMethodTracing(const char* key,
bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !GetDeoptimizedMethodsLock());
+ !Locks::classlinker_classes_lock_);
// Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
void DisableMethodTracing(const char* key)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
REQUIRES(!Locks::thread_list_lock_,
- !Locks::classlinker_classes_lock_,
- !GetDeoptimizedMethodsLock());
+ !Locks::classlinker_classes_lock_);
void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
@@ -300,11 +292,11 @@ class Instrumentation {
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(ArtMethod* method, const void* new_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update the code of a native method to a JITed stub.
void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* new_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the code that we can execute for an invoke including from the JIT.
const void* GetCodeForInvoke(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -383,6 +375,20 @@ class Instrumentation {
return have_exception_handled_listeners_;
}
+ // Returns if dex pc events need to be reported for the specified method.
+ // These events are reported when DexPCListeners are installed and at least one of the
+ // following conditions hold:
+ // 1. The method is deoptimized. This is done when there is a breakpoint on method.
+ // 2. When the thread is deoptimized. This is used when single stepping a single thread.
+ // 3. When interpreter stubs are installed. In this case no additional information is maintained
+ // about which methods need dex pc move events. This is usually used for features which need
+ // them for several methods across threads or need expensive processing. So it is OK to not
+ // further optimize this case.
+ // DexPCListeners are installed when there is a breakpoint on any method / single stepping
+ // on any of thread. These are removed when the last breakpoint was removed. See AddListener and
+ // RemoveListener for more details.
+ bool NeedsDexPcEvents(ArtMethod* method, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
+
bool NeedsSlowInterpreterForListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_field_read_listeners_ ||
have_field_write_listeners_ ||
@@ -413,7 +419,6 @@ class Instrumentation {
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread,
- ObjPtr<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -479,12 +484,35 @@ class Instrumentation {
void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
REQUIRES_SHARED(Locks::mutator_lock_);
- JValue GetReturnValue(Thread* self,
- ArtMethod* method,
- bool* is_ref,
- uint64_t* gpr_result,
- uint64_t* fpr_result) REQUIRES_SHARED(Locks::mutator_lock_);
- bool ShouldDeoptimizeMethod(Thread* self, const NthCallerVisitor& visitor)
+ JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool PushDeoptContextIfNeeded(Thread* self,
+ DeoptimizationMethodType deopt_type,
+ bool is_ref,
+ const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DeoptimizeIfNeeded(Thread* self,
+ ArtMethod** sp,
+ DeoptimizationMethodType type,
+ JValue result,
+ bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ // TODO(mythria): Update uses of ShouldDeoptimizeCaller that takes a visitor by a method that
+ // doesn't need to walk the stack. This is used on method exits to check if the caller needs a
+ // deoptimization.
+ bool ShouldDeoptimizeCaller(Thread* self, const NthCallerVisitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ // This returns if the caller of runtime method requires a deoptimization. This checks both if the
+ // method requires a deopt or if this particular frame needs a deopt because of a class
+ // redefinition.
+ bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
+ // This is a helper function used by the two variants of ShouldDeoptimizeCaller.
+ // Remove this once ShouldDeoptimizeCaller is updated not to use NthCallerVisitor.
+ bool ShouldDeoptimizeCaller(Thread* self,
+ ArtMethod* caller,
+ uintptr_t caller_pc,
+ uintptr_t caller_sp) REQUIRES_SHARED(Locks::mutator_lock_);
+ // This returns if the specified method requires a deoptimization. This doesn't account if a stack
+ // frame involving this method requires a deoptimization.
+ bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
@@ -510,19 +538,17 @@ class Instrumentation {
uintptr_t* return_pc_addr,
uint64_t* gpr_result,
uint64_t* fpr_result)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
+ REQUIRES_SHARED(Locks::mutator_lock_);
- // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
- // instrumentation frame that's popped.
- uintptr_t PopFramesForDeoptimization(Thread* self, uintptr_t stack_pointer) const
+ // Pops instrumentation frames until the specified stack_pointer from the current thread. Returns
+ // the return pc for the last instrumentation frame that's popped.
+ uintptr_t PopInstrumentationStackUntil(Thread* self, uintptr_t stack_pointer) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Call back for configure stubs.
- void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!GetDeoptimizedMethodsLock());
+ void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void InstallStubsForMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
+ void InstallStubsForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
// Install instrumentation exit stub on every method of the stack of the given thread.
// This is used by:
@@ -548,6 +574,11 @@ class Instrumentation {
return alloc_entrypoints_instrumented_;
}
+ bool ProcessMethodUnwindCallbacks(Thread* self,
+ std::queue<ArtMethod*>& methods,
+ MutableHandle<mirror::Throwable>& exception)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
InstrumentationLevel GetCurrentInstrumentationLevel() const;
private:
@@ -555,11 +586,6 @@ class Instrumentation {
// False otherwise.
bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
- // Returns true if we need entry exit stub to call entry hooks. JITed code
- // directly call entry / exit hooks and don't need the stub.
- static bool CodeNeedsEntryExitStub(const void* code, ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_);
-
// Update the current instrumentation_level_.
void UpdateInstrumentationLevel(InstrumentationLevel level);
@@ -570,12 +596,10 @@ class Instrumentation {
// becomes the highest instrumentation level required by a client.
void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!GetDeoptimizedMethodsLock(),
- !Locks::thread_list_lock_,
+ REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_);
void UpdateStubs() REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
- REQUIRES(!GetDeoptimizedMethodsLock(),
- !Locks::thread_list_lock_,
+ REQUIRES(!Locks::thread_list_lock_,
!Locks::classlinker_classes_lock_);
// If there are no pending deoptimizations restores the stack to the normal state by updating the
@@ -619,22 +643,11 @@ class Instrumentation {
REQUIRES_SHARED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
- bool AddDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
- bool IsDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
- bool RemoveDeoptimizedMethod(ArtMethod* method)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(GetDeoptimizedMethodsLock());
- ArtMethod* BeginDeoptimizedMethod()
- REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
- bool IsDeoptimizedMethodsEmptyLocked() const
- REQUIRES_SHARED(Locks::mutator_lock_, GetDeoptimizedMethodsLock());
+ bool AddDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
+ bool IsDeoptimizedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool RemoveDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
void UpdateMethodsCodeImpl(ArtMethod* method, const void* new_code)
- REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
-
- ReaderWriterMutex* GetDeoptimizedMethodsLock() const {
- return deoptimized_methods_lock_.get();
- }
+ REQUIRES_SHARED(Locks::mutator_lock_);
// A counter that's incremented every time a DeoptimizeAllFrames. We check each
// InstrumentationStackFrames creation id against this number and if they differ we deopt even if
@@ -718,8 +731,7 @@ class Instrumentation {
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
- mutable std::unique_ptr<ReaderWriterMutex> deoptimized_methods_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
- std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(GetDeoptimizedMethodsLock());
+ std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(Locks::mutator_lock_);
// Current interpreter handler table. This is updated each time the thread state flags are
// modified.
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index b0a81b6472..d58fb4a5be 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -77,7 +77,6 @@ class TestInstrumentationListener final : public instrumentation::Instrumentatio
}
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
override REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -387,7 +386,7 @@ class InstrumentationTest : public CommonRuntimeTest {
break;
}
case instrumentation::Instrumentation::kMethodUnwind:
- instr->MethodUnwindEvent(self, obj, method, dex_pc);
+ instr->MethodUnwindEvent(self, method, dex_pc);
break;
case instrumentation::Instrumentation::kDexPcMoved:
instr->DexPcMovedEvent(self, obj, method, dex_pc);
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index f587d0170f..10b2d65f45 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -190,8 +190,8 @@ void InternTable::WaitUntilAccessible(Thread* self) {
{
ScopedThreadSuspension sts(self, ThreadState::kWaitingWeakGcRootRead);
MutexLock mu(self, *Locks::intern_table_lock_);
- while ((!kUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
- (kUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
+ while ((!gUseReadBarrier && weak_root_state_ == gc::kWeakRootStateNoReadsOrWrites) ||
+ (gUseReadBarrier && !self->GetWeakRefAccessEnabled())) {
weak_intern_condition_.Wait(self);
}
}
@@ -218,7 +218,7 @@ ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
if (strong != nullptr) {
return strong;
}
- if (kUseReadBarrier ? self->GetWeakRefAccessEnabled()
+ if (gUseReadBarrier ? self->GetWeakRefAccessEnabled()
: weak_root_state_ != gc::kWeakRootStateNoReadsOrWrites) {
break;
}
@@ -230,7 +230,7 @@ ObjPtr<mirror::String> InternTable::Insert(ObjPtr<mirror::String> s,
auto h = hs.NewHandleWrapper(&s);
WaitUntilAccessible(self);
}
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
CHECK_EQ(weak_root_state_, gc::kWeakRootStateNormal);
} else {
CHECK(self->GetWeakRefAccessEnabled());
@@ -405,7 +405,10 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
if (new_object == nullptr) {
it = set->erase(it);
} else {
- *it = GcRoot<mirror::String>(new_object->AsString());
+ // Don't use AsString as it does IsString check in debug builds which, in
+ // case of userfaultfd GC, is called when the object's content isn't
+ // thereyet.
+ *it = GcRoot<mirror::String>(ObjPtr<mirror::String>::DownCast(new_object));
++it;
}
}
@@ -426,7 +429,7 @@ void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) {
}
void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
weak_root_state_ = new_state;
if (new_state != gc::kWeakRootStateNoReadsOrWrites) {
weak_intern_condition_.Broadcast(Thread::Current());
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 38c94abf06..e6fb221b10 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -255,6 +255,7 @@ static JValue ExecuteSwitch(Thread* self,
}
}
+NO_STACK_PROTECTOR
static inline JValue Execute(
Thread* self,
const CodeItemDataAccessor& accessor,
@@ -265,15 +266,45 @@ static inline JValue Execute(
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
+ // We cache the result of NeedsDexPcEvents in the shadow frame so we don't need to call
+ // NeedsDexPcEvents on every instruction for better performance. NeedsDexPcEvents only gets
+ // updated asynchronoulsy in a SuspendAll scope and any existing shadow frames are updated with
+ // new value. So it is safe to cache it here.
+ shadow_frame.SetNotifyDexPcMoveEvents(
+ Runtime::Current()->GetInstrumentation()->NeedsDexPcEvents(shadow_frame.GetMethod(), self));
+
if (LIKELY(!from_deoptimize)) { // Entering the method, but not via deoptimization.
if (kIsDebugBuild) {
CHECK_EQ(shadow_frame.GetDexPC(), 0u);
self->AssertNoPendingException();
}
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
ArtMethod *method = shadow_frame.GetMethod();
- if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
+ // If we can continue in JIT and have JITed code available execute JITed code.
+ if (!stay_in_interpreter && !self->IsForceInterpreter() && !shadow_frame.GetForcePopFrame()) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit->MethodEntered(self, shadow_frame.GetMethod());
+ if (jit->CanInvokeCompiledCode(method)) {
+ JValue result;
+
+ // Pop the shadow frame before calling into compiled code.
+ self->PopShadowFrame();
+ // Calculate the offset of the first input reg. The input registers are in the high regs.
+ // It's ok to access the code item here since JIT code will have been touched by the
+ // interpreter and compiler already.
+ uint16_t arg_offset = accessor.RegistersSize() - accessor.InsSize();
+ ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
+ // Push the shadow frame back as the caller will expect it.
+ self->PushShadowFrame(&shadow_frame);
+
+ return result;
+ }
+ }
+ }
+
+ instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+ if (UNLIKELY(instrumentation->HasMethodEntryListeners() || shadow_frame.GetForcePopFrame())) {
instrumentation->MethodEnterEvent(self, method);
if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
// The caller will retry this invoke or ignore the result. Just return immediately without
@@ -286,7 +317,6 @@ static inline JValue Execute(
}
if (UNLIKELY(self->IsExceptionPending())) {
instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(accessor.InsSize()),
method,
0);
JValue ret = JValue();
@@ -298,28 +328,6 @@ static inline JValue Execute(
return ret;
}
}
-
- if (!stay_in_interpreter && !self->IsForceInterpreter()) {
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- jit->MethodEntered(self, shadow_frame.GetMethod());
- if (jit->CanInvokeCompiledCode(method)) {
- JValue result;
-
- // Pop the shadow frame before calling into compiled code.
- self->PopShadowFrame();
- // Calculate the offset of the first input reg. The input registers are in the high regs.
- // It's ok to access the code item here since JIT code will have been touched by the
- // interpreter and compiler already.
- uint16_t arg_offset = accessor.RegistersSize() - accessor.InsSize();
- ArtInterpreterToCompiledCodeBridge(self, nullptr, &shadow_frame, arg_offset, &result);
- // Push the shadow frame back as the caller will expect it.
- self->PushShadowFrame(&shadow_frame);
-
- return result;
- }
- }
- }
}
ArtMethod* method = shadow_frame.GetMethod();
@@ -366,7 +374,7 @@ void EnterInterpreterFromInvoke(Thread* self,
num_ins = accessor.InsSize();
} else if (!method->IsInvokable()) {
self->EndAssertNoThreadSuspension(old_cause);
- method->ThrowInvocationTimeError();
+ method->ThrowInvocationTimeError(receiver);
return;
} else {
DCHECK(method->IsNative()) << method->PrettyMethod();
@@ -476,6 +484,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
const uint32_t dex_pc = shadow_frame->GetDexPC();
uint32_t new_dex_pc = dex_pc;
if (UNLIKELY(self->IsExceptionPending())) {
+ DCHECK(self->GetException() != Thread::GetDeoptimizationException());
// If we deoptimize from the QuickExceptionHandler, we already reported the exception throw
// event to the instrumentation. Skip throw listeners for the first frame. The deopt check
// should happen after the throw listener is called as throw listener can trigger a
@@ -514,7 +523,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
new_dex_pc = dex_pc + instr->SizeInCodeUnits();
} else if (instr->IsInvoke()) {
DCHECK(deopt_method_type == DeoptimizationMethodType::kDefault);
- if (IsStringInit(instr, shadow_frame->GetMethod())) {
+ if (IsStringInit(*instr, shadow_frame->GetMethod())) {
uint16_t this_obj_vreg = GetReceiverRegisterForStringInit(instr);
// Move the StringFactory.newStringFromChars() result into the register representing
// "this object" when invoking the string constructor in the original dex instruction.
@@ -569,6 +578,7 @@ void EnterInterpreterFromDeoptimize(Thread* self,
ret_val->SetJ(value.GetJ());
}
+NO_STACK_PROTECTOR
JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame) {
DCHECK_EQ(self, Thread::Current());
@@ -585,6 +595,7 @@ JValue EnterInterpreterFromEntryPoint(Thread* self, const CodeItemDataAccessor&
return Execute(self, accessor, *shadow_frame, JValue());
}
+NO_STACK_PROTECTOR
void ArtInterpreterToInterpreterBridge(Thread* self,
const CodeItemDataAccessor& accessor,
ShadowFrame* shadow_frame,
diff --git a/runtime/interpreter/interpreter_cache-inl.h b/runtime/interpreter/interpreter_cache-inl.h
index cea8157d26..269f5fa9ab 100644
--- a/runtime/interpreter/interpreter_cache-inl.h
+++ b/runtime/interpreter/interpreter_cache-inl.h
@@ -37,9 +37,9 @@ inline void InterpreterCache::Set(Thread* self, const void* key, size_t value) {
DCHECK(self->GetInterpreterCache() == this) << "Must be called from owning thread";
// For simplicity, only update the cache if weak ref accesses are enabled. If
- // they are disabled, this means the GC is processing the cache, and is
+ // they are disabled, this means the CC GC could be processing the cache, and
// reading it concurrently.
- if (kUseReadBarrier && self->GetWeakRefAccessEnabled()) {
+ if (!gUseReadBarrier || self->GetWeakRefAccessEnabled()) {
data_[IndexOf(key)] = Entry{key, value};
}
}
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index c57d0233a6..8714bc613c 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -47,7 +47,7 @@ class Thread;
class ALIGNED(16) InterpreterCache {
public:
// Aligned since we load the whole entry in single assembly instruction.
- typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
+ using Entry ALIGNED(2 * sizeof(size_t)) = std::pair<const void*, size_t>;
// 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
// Value of 256 has around 75% cache hit rate.
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index c8a87c1d75..a9d473b3ba 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -185,7 +185,6 @@ bool MoveToExceptionHandler(Thread* self,
// Exception is not caught by the current method. We will unwind to the
// caller. Notify any instrumentation listener.
instrumentation->MethodUnwindEvent(self,
- shadow_frame.GetThisObject(),
shadow_frame.GetMethod(),
shadow_frame.GetDexPC());
}
@@ -243,7 +242,8 @@ static ALWAYS_INLINE bool DoCallCommon(ArtMethod* called_method,
JValue* result,
uint16_t number_of_inputs,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) REQUIRES_SHARED(Locks::mutator_lock_);
+ uint32_t vregC,
+ bool string_init) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool is_range>
ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
@@ -255,6 +255,7 @@ ALWAYS_INLINE void CopyRegisters(ShadowFrame& caller_frame,
// END DECLARATIONS.
+NO_STACK_PROTECTOR
void ArtInterpreterToCompiledCodeBridge(Thread* self,
ArtMethod* caller,
ShadowFrame* shadow_frame,
@@ -1213,15 +1214,8 @@ static inline bool DoCallCommon(ArtMethod* called_method,
JValue* result,
uint16_t number_of_inputs,
uint32_t (&arg)[Instruction::kMaxVarArgRegs],
- uint32_t vregC) {
- bool string_init = false;
- // Replace calls to String.<init> with equivalent StringFactory call.
- if (UNLIKELY(called_method->GetDeclaringClass()->IsStringClass()
- && called_method->IsConstructor())) {
- called_method = WellKnownClasses::StringInitToStringFactory(called_method);
- string_init = true;
- }
-
+ uint32_t vregC,
+ bool string_init) {
// Compute method information.
CodeItemDataAccessor accessor(called_method->DexInstructionData());
// Number of registers for the callee's call frame.
@@ -1411,8 +1405,14 @@ static inline bool DoCallCommon(ArtMethod* called_method,
}
template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+NO_STACK_PROTECTOR
+bool DoCall(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ bool is_string_init,
+ JValue* result) {
// Argument word count.
const uint16_t number_of_inputs =
(is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
@@ -1429,8 +1429,14 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
}
return DoCallCommon<is_range, do_assignability_check>(
- called_method, self, shadow_frame,
- result, number_of_inputs, arg, vregC);
+ called_method,
+ self,
+ shadow_frame,
+ result,
+ number_of_inputs,
+ arg,
+ vregC,
+ is_string_init);
}
template <bool is_range, bool do_access_check, bool transaction_active>
@@ -1557,9 +1563,12 @@ void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
template REQUIRES_SHARED(Locks::mutator_lock_) \
- bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
+ bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, \
+ Thread* self, \
ShadowFrame& shadow_frame, \
- const Instruction* inst, uint16_t inst_data, \
+ const Instruction* inst, \
+ uint16_t inst_data, \
+ bool string_init, \
JValue* result)
EXPLICIT_DO_CALL_TEMPLATE_DECL(false, false);
EXPLICIT_DO_CALL_TEMPLATE_DECL(false, true);
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 0b91120c58..49d7e649ea 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -20,7 +20,6 @@
#include "android-base/macros.h"
#include "instrumentation.h"
#include "interpreter.h"
-#include "interpreter_intrinsics.h"
#include "transaction.h"
#include <math.h>
@@ -126,8 +125,13 @@ void RecordArrayElementsInTransaction(ObjPtr<mirror::Array> array, int32_t count
// DoFastInvoke and DoInvokeVirtualQuick functions.
// Returns true on success, otherwise throws an exception and returns false.
template<bool is_range, bool do_assignability_check>
-bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result);
+bool DoCall(ArtMethod* called_method,
+ Thread* self,
+ ShadowFrame& shadow_frame,
+ const Instruction* inst,
+ uint16_t inst_data,
+ bool string_init,
+ JValue* result);
// Called by the switch interpreter to know if we can stay in it.
bool ShouldStayInSwitchInterpreter(ArtMethod* method)
@@ -220,7 +224,7 @@ static inline ALWAYS_INLINE void PerformNonStandardReturn(
// Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
// Returns true on success, otherwise throws an exception and returns false.
-template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp>
+template<InvokeType type, bool is_range, bool do_access_check>
static ALWAYS_INLINE bool DoInvoke(Thread* self,
ShadowFrame& shadow_frame,
const Instruction* inst,
@@ -231,63 +235,19 @@ static ALWAYS_INLINE bool DoInvoke(Thread* self,
if (UNLIKELY(self->ObserveAsyncException())) {
return false;
}
- const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
- const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
+ const uint32_t vregC = is_range ? inst->VRegC_3rc() : inst->VRegC_35c();
+ ObjPtr<mirror::Object> obj = type == kStatic ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();
-
- // Try to find the method in small thread-local cache first (only used when
- // nterp is not used as mterp and nterp use the cache in an incompatible way).
- InterpreterCache* tls_cache = self->GetInterpreterCache();
- size_t tls_value;
- ArtMethod* resolved_method;
- if (!IsNterpSupported() && LIKELY(tls_cache->Get(self, inst, &tls_value))) {
- resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
- } else {
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- constexpr ClassLinker::ResolveMode resolve_mode =
- do_access_check ? ClassLinker::ResolveMode::kCheckICCEAndIAE
- : ClassLinker::ResolveMode::kNoChecks;
- resolved_method = class_linker->ResolveMethod<resolve_mode>(self, method_idx, sf_method, type);
- if (UNLIKELY(resolved_method == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- }
- if (!IsNterpSupported()) {
- tls_cache->Set(self, inst, reinterpret_cast<size_t>(resolved_method));
- }
- }
-
- // Null pointer check and virtual method resolution.
- ObjPtr<mirror::Object> receiver =
- (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
- ArtMethod* called_method;
- called_method = FindMethodToCall<type, do_access_check>(
- method_idx, resolved_method, &receiver, sf_method, self);
- if (UNLIKELY(called_method == nullptr)) {
- CHECK(self->IsExceptionPending());
- result->SetJ(0);
- return false;
- }
- if (UNLIKELY(!called_method->IsInvokable())) {
- called_method->ThrowInvocationTimeError();
+ bool string_init = false;
+ ArtMethod* called_method = FindMethodToCall<type>(self, sf_method, &obj, *inst, &string_init);
+ if (called_method == nullptr) {
+ DCHECK(self->IsExceptionPending());
result->SetJ(0);
return false;
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (is_mterp && !is_range && called_method->IsIntrinsic()) {
- if (MterpHandleIntrinsic(&shadow_frame, called_method, inst, inst_data,
- shadow_frame.GetResultRegister())) {
- if (jit != nullptr && sf_method != nullptr) {
- jit->NotifyInterpreterToCompiledCodeTransition(self, sf_method);
- }
- return !self->IsExceptionPending();
- }
- }
-
- return DoCall<is_range, do_access_check>(called_method, self, shadow_frame, inst, inst_data,
- result);
+ return DoCall<is_range, do_access_check>(
+ called_method, self, shadow_frame, inst, inst_data, string_init, result);
}
static inline ObjPtr<mirror::MethodHandle> ResolveMethodHandle(Thread* self,
@@ -755,34 +715,6 @@ void ArtInterpreterToCompiledCodeBridge(Thread* self,
uint16_t arg_offset,
JValue* result);
-static inline bool IsStringInit(const DexFile* dex_file, uint32_t method_idx)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- const dex::MethodId& method_id = dex_file->GetMethodId(method_idx);
- const char* class_name = dex_file->StringByTypeIdx(method_id.class_idx_);
- const char* method_name = dex_file->GetMethodName(method_id);
- // Instead of calling ResolveMethod() which has suspend point and can trigger
- // GC, look up the method symbolically.
- // Compare method's class name and method name against string init.
- // It's ok since it's not allowed to create your own java/lang/String.
- // TODO: verify that assumption.
- if ((strcmp(class_name, "Ljava/lang/String;") == 0) &&
- (strcmp(method_name, "<init>") == 0)) {
- return true;
- }
- return false;
-}
-
-static inline bool IsStringInit(const Instruction* instr, ArtMethod* caller)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
- instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
- uint16_t callee_method_idx = (instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) ?
- instr->VRegB_3rc() : instr->VRegB_35c();
- return IsStringInit(caller->GetDexFile(), callee_method_idx);
- }
- return false;
-}
-
// Set string value created from StringFactory.newStringFromXXX() into all aliases of
// StringFactory.newEmptyString().
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
deleted file mode 100644
index c8344bc760..0000000000
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ /dev/null
@@ -1,678 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "interpreter/interpreter_intrinsics.h"
-
-#include "dex/dex_instruction.h"
-#include "intrinsics_enum.h"
-#include "interpreter/interpreter_common.h"
-
-namespace art {
-namespace interpreter {
-
-
-#define BINARY_INTRINSIC(name, op, get1, get2, set) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- result_register->set(op(shadow_frame->get1, shadow_frame->get2)); \
- return true; \
-}
-
-#define BINARY_II_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVReg(arg[0]), GetVReg(arg[1]), set)
-
-#define BINARY_JJ_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVRegLong(arg[2]), set)
-
-#define BINARY_JI_INTRINSIC(name, op, set) \
- BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVReg(arg[2]), set)
-
-#define UNARY_INTRINSIC(name, op, get, set) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- result_register->set(op(shadow_frame->get(arg[0]))); \
- return true; \
-}
-
-
-// java.lang.Integer.reverse(I)I
-UNARY_INTRINSIC(MterpIntegerReverse, ReverseBits32, GetVReg, SetI);
-
-// java.lang.Integer.reverseBytes(I)I
-UNARY_INTRINSIC(MterpIntegerReverseBytes, BSWAP, GetVReg, SetI);
-
-// java.lang.Integer.bitCount(I)I
-UNARY_INTRINSIC(MterpIntegerBitCount, POPCOUNT, GetVReg, SetI);
-
-// java.lang.Integer.compare(II)I
-BINARY_II_INTRINSIC(MterpIntegerCompare, Compare, SetI);
-
-// java.lang.Integer.highestOneBit(I)I
-UNARY_INTRINSIC(MterpIntegerHighestOneBit, HighestOneBitValue, GetVReg, SetI);
-
-// java.lang.Integer.LowestOneBit(I)I
-UNARY_INTRINSIC(MterpIntegerLowestOneBit, LowestOneBitValue, GetVReg, SetI);
-
-// java.lang.Integer.numberOfLeadingZeros(I)I
-UNARY_INTRINSIC(MterpIntegerNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVReg, SetI);
-
-// java.lang.Integer.numberOfTrailingZeros(I)I
-UNARY_INTRINSIC(MterpIntegerNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVReg, SetI);
-
-// java.lang.Integer.rotateRight(II)I
-BINARY_II_INTRINSIC(MterpIntegerRotateRight, (Rot<int32_t, false>), SetI);
-
-// java.lang.Integer.rotateLeft(II)I
-BINARY_II_INTRINSIC(MterpIntegerRotateLeft, (Rot<int32_t, true>), SetI);
-
-// java.lang.Integer.signum(I)I
-UNARY_INTRINSIC(MterpIntegerSignum, Signum, GetVReg, SetI);
-
-// java.lang.Long.reverse(J)J
-UNARY_INTRINSIC(MterpLongReverse, ReverseBits64, GetVRegLong, SetJ);
-
-// java.lang.Long.reverseBytes(J)J
-UNARY_INTRINSIC(MterpLongReverseBytes, BSWAP, GetVRegLong, SetJ);
-
-// java.lang.Long.bitCount(J)I
-UNARY_INTRINSIC(MterpLongBitCount, POPCOUNT, GetVRegLong, SetI);
-
-// java.lang.Long.compare(JJ)I
-BINARY_JJ_INTRINSIC(MterpLongCompare, Compare, SetI);
-
-// java.lang.Long.highestOneBit(J)J
-UNARY_INTRINSIC(MterpLongHighestOneBit, HighestOneBitValue, GetVRegLong, SetJ);
-
-// java.lang.Long.lowestOneBit(J)J
-UNARY_INTRINSIC(MterpLongLowestOneBit, LowestOneBitValue, GetVRegLong, SetJ);
-
-// java.lang.Long.numberOfLeadingZeros(J)I
-UNARY_INTRINSIC(MterpLongNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVRegLong, SetJ);
-
-// java.lang.Long.numberOfTrailingZeros(J)I
-UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ);
-
-// java.lang.Long.rotateRight(JI)J
-BINARY_JI_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ);
-
-// java.lang.Long.rotateLeft(JI)J
-BINARY_JI_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ);
-
-// java.lang.Long.signum(J)I
-UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI);
-
-// java.lang.Short.reverseBytes(S)S
-UNARY_INTRINSIC(MterpShortReverseBytes, BSWAP, GetVRegShort, SetS);
-
-// java.lang.Math.min(II)I
-BINARY_II_INTRINSIC(MterpMathMinIntInt, std::min, SetI);
-
-// java.lang.Math.min(JJ)J
-BINARY_JJ_INTRINSIC(MterpMathMinLongLong, std::min, SetJ);
-
-// java.lang.Math.max(II)I
-BINARY_II_INTRINSIC(MterpMathMaxIntInt, std::max, SetI);
-
-// java.lang.Math.max(JJ)J
-BINARY_JJ_INTRINSIC(MterpMathMaxLongLong, std::max, SetJ);
-
-// java.lang.Math.abs(I)I
-UNARY_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI);
-
-// java.lang.Math.abs(J)J
-UNARY_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ);
-
-// java.lang.Math.abs(F)F
-UNARY_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI);
-
-// java.lang.Math.abs(D)D
-UNARY_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ);
-
-// java.lang.Math.sqrt(D)D
-UNARY_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD);
-
-// java.lang.Math.ceil(D)D
-UNARY_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD);
-
-// java.lang.Math.floor(D)D
-UNARY_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD);
-
-// java.lang.Math.sin(D)D
-UNARY_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD);
-
-// java.lang.Math.cos(D)D
-UNARY_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD);
-
-// java.lang.Math.tan(D)D
-UNARY_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD);
-
-// java.lang.Math.asin(D)D
-UNARY_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD);
-
-// java.lang.Math.acos(D)D
-UNARY_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD);
-
-// java.lang.Math.atan(D)D
-UNARY_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD);
-
-// java.lang.String.charAt(I)C
-static ALWAYS_INLINE bool MterpStringCharAt(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- int length = str->GetLength();
- int index = shadow_frame->GetVReg(arg[1]);
- uint16_t res;
- if (UNLIKELY(index < 0) || (index >= length)) {
- return false; // Punt and let non-intrinsic version deal with the throw.
- }
- if (str->IsCompressed()) {
- res = str->GetValueCompressed()[index];
- } else {
- res = str->GetValue()[index];
- }
- result_register->SetC(res);
- return true;
-}
-
-// java.lang.String.compareTo(Ljava/lang/string)I
-static ALWAYS_INLINE bool MterpStringCompareTo(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- ObjPtr<mirror::Object> arg1 = shadow_frame->GetVRegReference(arg[1]);
- if (arg1 == nullptr) {
- return false;
- }
- result_register->SetI(str->CompareTo(arg1->AsString()));
- return true;
-}
-
-#define STRING_INDEXOF_INTRINSIC(name, starting_pos) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
- int ch = shadow_frame->GetVReg(arg[1]); \
- if (ch >= 0x10000) { \
- /* Punt if supplementary char. */ \
- return false; \
- } \
- result_register->SetI(str->FastIndexOf(ch, starting_pos)); \
- return true; \
-}
-
-// java.lang.String.indexOf(I)I
-STRING_INDEXOF_INTRINSIC(StringIndexOf, 0);
-
-// java.lang.String.indexOf(II)I
-STRING_INDEXOF_INTRINSIC(StringIndexOfAfter, shadow_frame->GetVReg(arg[2]));
-
-#define SIMPLE_STRING_INTRINSIC(name, operation) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result_register) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \
- inst->GetVarArgs(arg, inst_data); \
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString(); \
- result_register->operation; \
- return true; \
-}
-
-// java.lang.String.isEmpty()Z
-SIMPLE_STRING_INTRINSIC(StringIsEmpty, SetZ(str->GetLength() == 0))
-
-// java.lang.String.length()I
-SIMPLE_STRING_INTRINSIC(StringLength, SetI(str->GetLength()))
-
-// java.lang.String.getCharsNoCheck(II[CI)V
-static ALWAYS_INLINE bool MterpStringGetCharsNoCheck(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Start, end & index already checked by caller - won't throw. Destination is uncompressed.
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- int32_t start = shadow_frame->GetVReg(arg[1]);
- int32_t end = shadow_frame->GetVReg(arg[2]);
- int32_t index = shadow_frame->GetVReg(arg[4]);
- ObjPtr<mirror::CharArray> array = shadow_frame->GetVRegReference(arg[3])->AsCharArray();
- uint16_t* dst = array->GetData() + index;
- int32_t len = (end - start);
- if (str->IsCompressed()) {
- const uint8_t* src_8 = str->GetValueCompressed() + start;
- for (int i = 0; i < len; i++) {
- dst[i] = src_8[i];
- }
- } else {
- uint16_t* src_16 = str->GetValue() + start;
- memcpy(dst, src_16, len * sizeof(uint16_t));
- }
- return true;
-}
-
-// java.lang.String.equalsLjava/lang/Object;)Z
-static ALWAYS_INLINE bool MterpStringEquals(ShadowFrame* shadow_frame,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t arg[Instruction::kMaxVarArgRegs] = {};
- inst->GetVarArgs(arg, inst_data);
- ObjPtr<mirror::String> str = shadow_frame->GetVRegReference(arg[0])->AsString();
- ObjPtr<mirror::Object> obj = shadow_frame->GetVRegReference(arg[1]);
- bool res = false; // Assume not equal.
- if ((obj != nullptr) && obj->IsString()) {
- ObjPtr<mirror::String> str2 = obj->AsString();
- if (str->GetCount() == str2->GetCount()) {
- // Length & compression status are same. Can use block compare.
- void* bytes1;
- void* bytes2;
- int len = str->GetLength();
- if (str->IsCompressed()) {
- bytes1 = str->GetValueCompressed();
- bytes2 = str2->GetValueCompressed();
- } else {
- len *= sizeof(uint16_t);
- bytes1 = str->GetValue();
- bytes2 = str2->GetValue();
- }
- res = (memcmp(bytes1, bytes2, len) == 0);
- }
- }
- result_register->SetZ(res);
- return true;
-}
-
-#define VARHANDLE_FENCE_INTRINSIC(name, std_memory_operation) \
-static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED, \
- const Instruction* inst ATTRIBUTE_UNUSED, \
- uint16_t inst_data ATTRIBUTE_UNUSED, \
- JValue* result_register ATTRIBUTE_UNUSED) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- std::atomic_thread_fence(std_memory_operation); \
- return true; \
-}
-
-// The VarHandle fence methods are static (unlike jdk.internal.misc.Unsafe versions).
-// The fences for the LoadLoadFence and StoreStoreFence are stronger
-// than strictly required, but the impact should be marginal.
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleFullFence, std::memory_order_seq_cst)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleAcquireFence, std::memory_order_acquire)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleReleaseFence, std::memory_order_release)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleLoadLoadFence, std::memory_order_acquire)
-VARHANDLE_FENCE_INTRINSIC(MterpVarHandleStoreStoreFence, std::memory_order_release)
-
-#define METHOD_HANDLE_INVOKE_INTRINSIC(name) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- if (inst->Opcode() == Instruction::INVOKE_POLYMORPHIC) { \
- return DoInvokePolymorphic<false>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
- } else { \
- return DoInvokePolymorphic<true>(Thread::Current(), *shadow_frame, inst, inst_data, result); \
- } \
-}
-
-METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvokeExact)
-METHOD_HANDLE_INVOKE_INTRINSIC(MethodHandleInvoke)
-
-#define VAR_HANDLE_ACCESSOR_INTRINSIC(name) \
-static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \
- const Instruction* inst, \
- uint16_t inst_data, \
- JValue* result) \
- REQUIRES_SHARED(Locks::mutator_lock_) { \
- return Do##name(Thread::Current(), *shadow_frame, inst, inst_data, result); \
-}
-
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchange)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndExchangeRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleCompareAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGet);
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAdd)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndAddRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAnd)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseAndRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOr)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseOrRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXor)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndBitwiseXorRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetAndSetRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetOpaque)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleGetVolatile)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetOpaque)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetRelease)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleSetVolatile)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSet)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetAcquire)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetPlain)
-VAR_HANDLE_ACCESSOR_INTRINSIC(VarHandleWeakCompareAndSetRelease)
-
-static ALWAYS_INLINE bool MterpReachabilityFence(ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
- const Instruction* inst ATTRIBUTE_UNUSED,
- uint16_t inst_data ATTRIBUTE_UNUSED,
- JValue* result_register ATTRIBUTE_UNUSED)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- // Do nothing; Its only purpose is to keep the argument reference live
- // at preceding suspend points. That's automatic in the interpreter.
- return true;
-}
-
-// Macro to help keep track of what's left to implement.
-#define UNIMPLEMENTED_CASE(name) \
- case Intrinsics::k##name: \
- res = false; \
- break;
-
-#define INTRINSIC_CASE(name) \
- case Intrinsics::k##name: \
- res = Mterp##name(shadow_frame, inst, inst_data, result_register); \
- break;
-
-bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
- ArtMethod* const called_method,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- Intrinsics intrinsic = static_cast<Intrinsics>(called_method->GetIntrinsic());
- bool res = false; // Assume failure
- switch (intrinsic) {
- UNIMPLEMENTED_CASE(DoubleDoubleToRawLongBits /* (D)J */)
- UNIMPLEMENTED_CASE(DoubleDoubleToLongBits /* (D)J */)
- UNIMPLEMENTED_CASE(DoubleIsInfinite /* (D)Z */)
- UNIMPLEMENTED_CASE(DoubleIsNaN /* (D)Z */)
- UNIMPLEMENTED_CASE(DoubleLongBitsToDouble /* (J)D */)
- UNIMPLEMENTED_CASE(FloatFloatToRawIntBits /* (F)I */)
- UNIMPLEMENTED_CASE(FloatFloatToIntBits /* (F)I */)
- UNIMPLEMENTED_CASE(FloatIsInfinite /* (F)Z */)
- UNIMPLEMENTED_CASE(FloatIsNaN /* (F)Z */)
- UNIMPLEMENTED_CASE(FloatIntBitsToFloat /* (I)F */)
- UNIMPLEMENTED_CASE(IntegerDivideUnsigned /* (II)I */)
- UNIMPLEMENTED_CASE(LongDivideUnsigned /* (JJ)J */)
- INTRINSIC_CASE(IntegerReverse)
- INTRINSIC_CASE(IntegerReverseBytes)
- INTRINSIC_CASE(IntegerBitCount)
- INTRINSIC_CASE(IntegerCompare)
- INTRINSIC_CASE(IntegerHighestOneBit)
- INTRINSIC_CASE(IntegerLowestOneBit)
- INTRINSIC_CASE(IntegerNumberOfLeadingZeros)
- INTRINSIC_CASE(IntegerNumberOfTrailingZeros)
- INTRINSIC_CASE(IntegerRotateRight)
- INTRINSIC_CASE(IntegerRotateLeft)
- INTRINSIC_CASE(IntegerSignum)
- INTRINSIC_CASE(LongReverse)
- INTRINSIC_CASE(LongReverseBytes)
- INTRINSIC_CASE(LongBitCount)
- INTRINSIC_CASE(LongCompare)
- INTRINSIC_CASE(LongHighestOneBit)
- INTRINSIC_CASE(LongLowestOneBit)
- INTRINSIC_CASE(LongNumberOfLeadingZeros)
- INTRINSIC_CASE(LongNumberOfTrailingZeros)
- INTRINSIC_CASE(LongRotateRight)
- INTRINSIC_CASE(LongRotateLeft)
- INTRINSIC_CASE(LongSignum)
- INTRINSIC_CASE(ShortReverseBytes)
- INTRINSIC_CASE(MathAbsDouble)
- INTRINSIC_CASE(MathAbsFloat)
- INTRINSIC_CASE(MathAbsLong)
- INTRINSIC_CASE(MathAbsInt)
- UNIMPLEMENTED_CASE(MathFmaDouble /* (DDD)D */)
- UNIMPLEMENTED_CASE(MathFmaFloat /* (FFF)F */)
- UNIMPLEMENTED_CASE(MathMinDoubleDouble /* (DD)D */)
- UNIMPLEMENTED_CASE(MathMinFloatFloat /* (FF)F */)
- INTRINSIC_CASE(MathMinLongLong)
- INTRINSIC_CASE(MathMinIntInt)
- UNIMPLEMENTED_CASE(MathMaxDoubleDouble /* (DD)D */)
- UNIMPLEMENTED_CASE(MathMaxFloatFloat /* (FF)F */)
- INTRINSIC_CASE(MathMaxLongLong)
- INTRINSIC_CASE(MathMaxIntInt)
- INTRINSIC_CASE(MathCos)
- INTRINSIC_CASE(MathSin)
- INTRINSIC_CASE(MathAcos)
- INTRINSIC_CASE(MathAsin)
- INTRINSIC_CASE(MathAtan)
- UNIMPLEMENTED_CASE(MathAtan2 /* (DD)D */)
- UNIMPLEMENTED_CASE(MathCbrt /* (D)D */)
- UNIMPLEMENTED_CASE(MathCosh /* (D)D */)
- UNIMPLEMENTED_CASE(MathExp /* (D)D */)
- UNIMPLEMENTED_CASE(MathExpm1 /* (D)D */)
- UNIMPLEMENTED_CASE(MathHypot /* (DD)D */)
- UNIMPLEMENTED_CASE(MathLog /* (D)D */)
- UNIMPLEMENTED_CASE(MathLog10 /* (D)D */)
- UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */)
- UNIMPLEMENTED_CASE(MathPow /* (DD)D */)
- UNIMPLEMENTED_CASE(MathSinh /* (D)D */)
- INTRINSIC_CASE(MathTan)
- UNIMPLEMENTED_CASE(MathTanh /* (D)D */)
- INTRINSIC_CASE(MathSqrt)
- INTRINSIC_CASE(MathCeil)
- INTRINSIC_CASE(MathFloor)
- UNIMPLEMENTED_CASE(MathRint /* (D)D */)
- UNIMPLEMENTED_CASE(MathRoundDouble /* (D)J */)
- UNIMPLEMENTED_CASE(MathRoundFloat /* (F)I */)
- UNIMPLEMENTED_CASE(MathMultiplyHigh /* (JJ)J */)
- UNIMPLEMENTED_CASE(SystemArrayCopyByte /* ([BI[BII)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopyChar /* ([CI[CII)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopyInt /* ([II[III)V */)
- UNIMPLEMENTED_CASE(SystemArrayCopy /* (Ljava/lang/Object;ILjava/lang/Object;II)V */)
- UNIMPLEMENTED_CASE(ThreadCurrentThread /* ()Ljava/lang/Thread; */)
- UNIMPLEMENTED_CASE(MemoryPeekByte /* (J)B */)
- UNIMPLEMENTED_CASE(MemoryPeekIntNative /* (J)I */)
- UNIMPLEMENTED_CASE(MemoryPeekLongNative /* (J)J */)
- UNIMPLEMENTED_CASE(MemoryPeekShortNative /* (J)S */)
- UNIMPLEMENTED_CASE(MemoryPokeByte /* (JB)V */)
- UNIMPLEMENTED_CASE(MemoryPokeIntNative /* (JI)V */)
- UNIMPLEMENTED_CASE(MemoryPokeLongNative /* (JJ)V */)
- UNIMPLEMENTED_CASE(MemoryPokeShortNative /* (JS)V */)
- INTRINSIC_CASE(ReachabilityFence /* (Ljava/lang/Object;)V */)
- INTRINSIC_CASE(StringCharAt)
- INTRINSIC_CASE(StringCompareTo)
- INTRINSIC_CASE(StringEquals)
- INTRINSIC_CASE(StringGetCharsNoCheck)
- INTRINSIC_CASE(StringIndexOf)
- INTRINSIC_CASE(StringIndexOfAfter)
- UNIMPLEMENTED_CASE(StringStringIndexOf /* (Ljava/lang/String;)I */)
- UNIMPLEMENTED_CASE(StringStringIndexOfAfter /* (Ljava/lang/String;I)I */)
- INTRINSIC_CASE(StringIsEmpty)
- INTRINSIC_CASE(StringLength)
- UNIMPLEMENTED_CASE(StringNewStringFromBytes /* ([BIII)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringNewStringFromChars /* (II[C)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringNewStringFromString /* (Ljava/lang/String;)Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(StringBufferAppend /* (Ljava/lang/String;)Ljava/lang/StringBuffer; */)
- UNIMPLEMENTED_CASE(StringBufferLength /* ()I */)
- UNIMPLEMENTED_CASE(StringBufferToString /* ()Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendObject /* (Ljava/lang/Object;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendString /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(
- StringBuilderAppendCharSequence /* (Ljava/lang/CharSequence;)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendCharArray /* ([C)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendBoolean /* (Z)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendChar /* (C)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendInt /* (I)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendLong /* (J)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendFloat /* (F)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderAppendDouble /* (D)Ljava/lang/StringBuilder; */)
- UNIMPLEMENTED_CASE(StringBuilderLength /* ()I */)
- UNIMPLEMENTED_CASE(StringBuilderToString /* ()Ljava/lang/String; */)
- UNIMPLEMENTED_CASE(UnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(UnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(UnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(UnsafeGet /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(UnsafeGetVolatile /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(UnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeGetLong /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(UnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(UnsafePut /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutOrdered /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutVolatile /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(UnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(UnsafePutLong /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(UnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(UnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(UnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(UnsafeLoadFence /* ()V */)
- UNIMPLEMENTED_CASE(UnsafeStoreFence /* ()V */)
- UNIMPLEMENTED_CASE(UnsafeFullFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetInt /* (Ljava/lang/Object;JII)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetLong /* (Ljava/lang/Object;JJJ)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeCompareAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(JdkUnsafeGet /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetVolatile /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAcquire /* (Ljava/lang/Object;J)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetObjectAcquire /* (Ljava/lang/Object;J)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLong /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetLongAcquire /* (Ljava/lang/Object;J)J */)
- UNIMPLEMENTED_CASE(JdkUnsafePut /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutOrdered /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutVolatile /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutRelease /* (Ljava/lang/Object;JI)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutObjectRelease /* (Ljava/lang/Object;JLjava/lang/Object;)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLong /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafePutLongRelease /* (Ljava/lang/Object;JJ)V */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */)
- UNIMPLEMENTED_CASE(JdkUnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(JdkUnsafeLoadFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeStoreFence /* ()V */)
- UNIMPLEMENTED_CASE(JdkUnsafeFullFence /* ()V */)
- UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */)
- UNIMPLEMENTED_CASE(ReferenceRefersTo /* (Ljava/lang/Object;)Z */)
- UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */)
- UNIMPLEMENTED_CASE(ThreadInterrupted /* ()Z */)
- UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
- UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
- UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
- UNIMPLEMENTED_CASE(FP16Compare /* (SS)I */)
- UNIMPLEMENTED_CASE(FP16ToFloat /* (S)F */)
- UNIMPLEMENTED_CASE(FP16ToHalf /* (F)S */)
- UNIMPLEMENTED_CASE(FP16Floor /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Ceil /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Rint /* (S)S */)
- UNIMPLEMENTED_CASE(FP16Greater /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16GreaterEquals /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16Less /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16LessEquals /* (SS)Z */)
- UNIMPLEMENTED_CASE(FP16Min /* (SS)S */)
- UNIMPLEMENTED_CASE(FP16Max /* (SS)S */)
- INTRINSIC_CASE(VarHandleFullFence)
- INTRINSIC_CASE(VarHandleAcquireFence)
- INTRINSIC_CASE(VarHandleReleaseFence)
- INTRINSIC_CASE(VarHandleLoadLoadFence)
- INTRINSIC_CASE(VarHandleStoreStoreFence)
- INTRINSIC_CASE(MethodHandleInvokeExact)
- INTRINSIC_CASE(MethodHandleInvoke)
- INTRINSIC_CASE(VarHandleCompareAndExchange)
- INTRINSIC_CASE(VarHandleCompareAndExchangeAcquire)
- INTRINSIC_CASE(VarHandleCompareAndExchangeRelease)
- INTRINSIC_CASE(VarHandleCompareAndSet)
- INTRINSIC_CASE(VarHandleGet)
- INTRINSIC_CASE(VarHandleGetAcquire)
- INTRINSIC_CASE(VarHandleGetAndAdd)
- INTRINSIC_CASE(VarHandleGetAndAddAcquire)
- INTRINSIC_CASE(VarHandleGetAndAddRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAnd)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAndAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseAndRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOr)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOrAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseOrRelease)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXor)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXorAcquire)
- INTRINSIC_CASE(VarHandleGetAndBitwiseXorRelease)
- INTRINSIC_CASE(VarHandleGetAndSet)
- INTRINSIC_CASE(VarHandleGetAndSetAcquire)
- INTRINSIC_CASE(VarHandleGetAndSetRelease)
- INTRINSIC_CASE(VarHandleGetOpaque)
- INTRINSIC_CASE(VarHandleGetVolatile)
- INTRINSIC_CASE(VarHandleSet)
- INTRINSIC_CASE(VarHandleSetOpaque)
- INTRINSIC_CASE(VarHandleSetRelease)
- INTRINSIC_CASE(VarHandleSetVolatile)
- INTRINSIC_CASE(VarHandleWeakCompareAndSet)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetAcquire)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetPlain)
- INTRINSIC_CASE(VarHandleWeakCompareAndSetRelease)
- case Intrinsics::kNone:
- res = false;
- break;
- // Note: no default case to ensure we catch any newly added intrinsics.
- }
- return res;
-}
-
-} // namespace interpreter
-} // namespace art
diff --git a/runtime/interpreter/interpreter_intrinsics.h b/runtime/interpreter/interpreter_intrinsics.h
deleted file mode 100644
index 2a23002d05..0000000000
--- a/runtime/interpreter/interpreter_intrinsics.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
-#define ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
-
-#include "jvalue.h"
-
-namespace art {
-
-class ArtMethod;
-class Instruction;
-class ShadowFrame;
-
-namespace interpreter {
-
-// Invokes to methods identified as intrinics are routed here. If there is
-// no interpreter implementation, return false and a normal invoke will proceed.
-bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
- ArtMethod* const called_method,
- const Instruction* inst,
- uint16_t inst_data,
- JValue* result_register);
-
-} // namespace interpreter
-} // namespace art
-
-#endif // ART_RUNTIME_INTERPRETER_INTERPRETER_INTRINSICS_H_
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index d95c507698..215194ed21 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -144,7 +144,7 @@ class InstructionHandler {
if (!CheckForceReturn()) {
return false;
}
- if (UNLIKELY(Instrumentation()->HasDexPcListeners())) {
+ if (UNLIKELY(shadow_frame_.GetNotifyDexPcMoveEvents())) {
uint8_t opcode = inst_->Opcode(inst_data_);
bool is_move_result_object = (opcode == Instruction::MOVE_RESULT_OBJECT);
JValue* save_ref = is_move_result_object ? &ctx_->result_register : nullptr;
@@ -353,7 +353,7 @@ class InstructionHandler {
template<InvokeType type, bool is_range>
HANDLER_ATTRIBUTES bool HandleInvoke() {
- bool success = DoInvoke<type, is_range, do_access_check, /*is_mterp=*/ false>(
+ bool success = DoInvoke<type, is_range, do_access_check>(
Self(), shadow_frame_, inst_, inst_data_, ResultRegister());
return PossiblyHandlePendingExceptionOnInvoke(!success);
}
@@ -1816,7 +1816,7 @@ class InstructionHandler {
#define OPCODE_CASE(OPCODE, OPCODE_NAME, NAME, FORMAT, i, a, e, v) \
template<bool do_access_check, bool transaction_active> \
-ASAN_NO_INLINE static bool OP_##OPCODE_NAME( \
+ASAN_NO_INLINE NO_STACK_PROTECTOR static bool OP_##OPCODE_NAME( \
SwitchImplContext* ctx, \
const instrumentation::Instrumentation* instrumentation, \
Thread* self, \
@@ -1834,6 +1834,7 @@ DEX_INSTRUCTION_LIST(OPCODE_CASE)
#undef OPCODE_CASE
template<bool do_access_check, bool transaction_active>
+NO_STACK_PROTECTOR
void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
Thread* self = ctx->self;
const CodeItemDataAccessor& accessor = ctx->accessor;
diff --git a/runtime/interpreter/mterp/arm64ng/main.S b/runtime/interpreter/mterp/arm64ng/main.S
index 89de81f5e4..81d6b7bd77 100644
--- a/runtime/interpreter/mterp/arm64ng/main.S
+++ b/runtime/interpreter/mterp/arm64ng/main.S
@@ -1590,6 +1590,14 @@ END \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ ldr wip, [x0, ART_METHOD_DECLARING_CLASS_OFFSET]
+ ldrb wip, [ip, MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET]
+ cmp ip, #MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE
+ bcs ExecuteNterpImpl
+ b art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
sub x16, sp, #STACK_OVERFLOW_RESERVED_BYTES
diff --git a/runtime/interpreter/mterp/armng/main.S b/runtime/interpreter/mterp/armng/main.S
index 310a3fd8f1..f89db40637 100644
--- a/runtime/interpreter/mterp/armng/main.S
+++ b/runtime/interpreter/mterp/armng/main.S
@@ -1608,6 +1608,14 @@ END \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ ldr ip, [r0, ART_METHOD_DECLARING_CLASS_OFFSET]
+ ldrb ip, [ip, MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET]
+ cmp ip, #MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE
+ bcs ExecuteNterpImpl
+ b art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
sub ip, sp, #STACK_OVERFLOW_RESERVED_BYTES
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
index d70a846b7c..ef916a9aa2 100644
--- a/runtime/interpreter/mterp/nterp.cc
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -26,7 +26,6 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "interpreter/interpreter_cache-inl.h"
#include "interpreter/interpreter_common.h"
-#include "interpreter/interpreter_intrinsics.h"
#include "interpreter/shadow_frame-inl.h"
#include "mirror/string-alloc-inl.h"
#include "nterp_helpers.h"
@@ -35,7 +34,7 @@ namespace art {
namespace interpreter {
bool IsNterpSupported() {
- return !kPoisonHeapReferences && kUseReadBarrier;
+ return !kPoisonHeapReferences;
}
bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -58,10 +57,17 @@ bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
// The entrypoint for nterp, which ArtMethods can directly point to.
extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+// Another entrypoint, which does a clinit check at entry.
+extern "C" void ExecuteNterpWithClinitImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
const void* GetNterpEntryPoint() {
return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
}
+const void* GetNterpWithClinitEntryPoint() {
+ return reinterpret_cast<const void*>(interpreter::ExecuteNterpWithClinitImpl);
+}
+
/*
* Verify some constants used by the nterp interpreter.
*/
@@ -89,13 +95,12 @@ inline void UpdateHotness(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock
}
template<typename T>
-inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
- DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
+inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T value) {
self->GetInterpreterCache()->Set(self, dex_pc_ptr, value);
}
template<typename T>
-inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
+inline void UpdateCache(Thread* self, const uint16_t* dex_pc_ptr, T* value) {
UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
}
@@ -245,7 +250,7 @@ extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_
}
FLATTEN
-extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
+extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, const uint16_t* dex_pc_ptr)
REQUIRES_SHARED(Locks::mutator_lock_) {
UpdateHotness(caller);
const Instruction* inst = Instruction::At(dex_pc_ptr);
@@ -432,13 +437,14 @@ extern "C" size_t NterpGetStaticField(Thread* self,
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t field_index = inst->VRegB_21c();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ bool is_put = IsInstructionSPut(inst->Opcode());
ArtField* resolved_field = ResolveFieldWithAccessChecks(
self,
class_linker,
field_index,
caller,
/* is_static */ true,
- /* is_put */ IsInstructionSPut(inst->Opcode()),
+ is_put,
resolve_field_type);
if (resolved_field == nullptr) {
@@ -461,7 +467,16 @@ extern "C" size_t NterpGetStaticField(Thread* self,
// check for it.
return reinterpret_cast<size_t>(resolved_field) | 1;
} else {
- UpdateCache(self, dex_pc_ptr, resolved_field);
+ // Try to resolve the field type even if we were not requested to. Only if
+ // the field type is successfully resolved can we update the cache. If we
+ // fail to resolve the type, we clear the exception to keep interpreter
+ // semantics of not throwing when null is stored.
+ if (is_put && resolve_field_type == 0 && resolved_field->ResolveType() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ } else {
+ UpdateCache(self, dex_pc_ptr, resolved_field);
+ }
return reinterpret_cast<size_t>(resolved_field);
}
}
@@ -475,13 +490,14 @@ extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t field_index = inst->VRegC_22c();
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ bool is_put = IsInstructionIPut(inst->Opcode());
ArtField* resolved_field = ResolveFieldWithAccessChecks(
self,
class_linker,
field_index,
caller,
/* is_static */ false,
- /* is_put */ IsInstructionIPut(inst->Opcode()),
+ is_put,
resolve_field_type);
if (resolved_field == nullptr) {
DCHECK(self->IsExceptionPending());
@@ -492,7 +508,16 @@ extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
// of volatile.
return -resolved_field->GetOffset().Uint32Value();
}
- UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
+ // Try to resolve the field type even if we were not requested to. Only if
+ // the field type is successfully resolved can we update the cache. If we
+ // fail to resolve the type, we clear the exception to keep interpreter
+ // semantics of not throwing when null is stored.
+ if (is_put && resolve_field_type == 0 && resolved_field->ResolveType() == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ self->ClearException();
+ } else {
+ UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
+ }
return resolved_field->GetOffset().Uint32Value();
}
diff --git a/runtime/interpreter/mterp/nterp.h b/runtime/interpreter/mterp/nterp.h
index 1590b280e9..4d5af393bc 100644
--- a/runtime/interpreter/mterp/nterp.h
+++ b/runtime/interpreter/mterp/nterp.h
@@ -32,6 +32,7 @@ void CheckNterpAsmConstants();
bool IsNterpSupported();
bool CanRuntimeUseNterp();
const void* GetNterpEntryPoint();
+const void* GetNterpWithClinitEntryPoint();
constexpr uint16_t kNterpHotnessValue = 0;
diff --git a/runtime/interpreter/mterp/x86_64ng/main.S b/runtime/interpreter/mterp/x86_64ng/main.S
index bd191c09ec..3e476db953 100644
--- a/runtime/interpreter/mterp/x86_64ng/main.S
+++ b/runtime/interpreter/mterp/x86_64ng/main.S
@@ -1694,6 +1694,13 @@ END_FUNCTION \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%rdi), %r10d
+ cmpb $$(MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%r10d)
+ jae ExecuteNterpImpl
+ jmp art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
.cfi_def_cfa rsp, 8
diff --git a/runtime/interpreter/mterp/x86ng/main.S b/runtime/interpreter/mterp/x86ng/main.S
index db8519b8f7..7872520384 100644
--- a/runtime/interpreter/mterp/x86ng/main.S
+++ b/runtime/interpreter/mterp/x86ng/main.S
@@ -1757,6 +1757,15 @@ END_FUNCTION \name
* rest method parameters
*/
+OAT_ENTRY ExecuteNterpWithClinitImpl, EndExecuteNterpWithClinitImpl
+ push %esi
+ movl ART_METHOD_DECLARING_CLASS_OFFSET(%eax), %esi
+ cmpb $$(MIRROR_CLASS_IS_VISIBLY_INITIALIZED_VALUE), MIRROR_CLASS_IS_VISIBLY_INITIALIZED_OFFSET(%esi)
+ pop %esi
+ jae ExecuteNterpImpl
+ jmp art_quick_resolution_trampoline
+EndExecuteNterpWithClinitImpl:
+
OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
.cfi_startproc
.cfi_def_cfa esp, 4
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 8cb2b33a07..be93dfac84 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -54,7 +54,7 @@ class ShadowFrame {
// We have been requested to notify when this frame gets popped.
kNotifyFramePop = 1 << 0,
// We have been asked to pop this frame off the stack as soon as possible.
- kForcePopFrame = 1 << 1,
+ kForcePopFrame = 1 << 1,
// We have been asked to re-execute the last instruction.
kForceRetryInst = 1 << 2,
// Mark that we expect the next frame to retry the last instruction (used by instrumentation and
@@ -62,6 +62,9 @@ class ShadowFrame {
kSkipMethodExitEvents = 1 << 3,
// Used to suppress exception events caused by other instrumentation events.
kSkipNextExceptionEvent = 1 << 4,
+ // Used to specify if DexPCMoveEvents have to be reported. These events will
+ // only be reported if the method has a breakpoint set.
+ kNotifyDexPcMoveEvents = 1 << 5,
};
public:
@@ -169,14 +172,14 @@ class ShadowFrame {
int64_t GetVRegLong(size_t i) const {
DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
- typedef const int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ using unaligned_int64 __attribute__((aligned(4))) = const int64_t;
return *reinterpret_cast<unaligned_int64*>(vreg);
}
double GetVRegDouble(size_t i) const {
DCHECK_LT(i + 1, NumberOfVRegs());
const uint32_t* vreg = &vregs_[i];
- typedef const double unaligned_double __attribute__ ((aligned (4)));
+ using unaligned_double __attribute__((aligned(4))) = const double;
return *reinterpret_cast<unaligned_double*>(vreg);
}
@@ -221,7 +224,7 @@ class ShadowFrame {
void SetVRegLong(size_t i, int64_t val) {
DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
- typedef int64_t unaligned_int64 __attribute__ ((aligned (4)));
+ using unaligned_int64 __attribute__((aligned(4))) = int64_t;
*reinterpret_cast<unaligned_int64*>(vreg) = val;
// This is needed for moving collectors since these can update the vreg references if they
// happen to agree with references in the reference array.
@@ -232,7 +235,7 @@ class ShadowFrame {
void SetVRegDouble(size_t i, double val) {
DCHECK_LT(i + 1, NumberOfVRegs());
uint32_t* vreg = &vregs_[i];
- typedef double unaligned_double __attribute__ ((aligned (4)));
+ using unaligned_double __attribute__((aligned(4))) = double;
*reinterpret_cast<unaligned_double*>(vreg) = val;
// This is needed for moving collectors since these can update the vreg references if they
// happen to agree with references in the reference array.
@@ -373,6 +376,14 @@ class ShadowFrame {
UpdateFrameFlag(enable, FrameFlags::kSkipNextExceptionEvent);
}
+ bool GetNotifyDexPcMoveEvents() const {
+ return GetFrameFlag(FrameFlags::kNotifyDexPcMoveEvents);
+ }
+
+ void SetNotifyDexPcMoveEvents(bool enable) {
+ UpdateFrameFlag(enable, FrameFlags::kNotifyDexPcMoveEvents);
+ }
+
void CheckConsistentVRegs() const {
if (kIsDebugBuild) {
// A shadow frame visible to GC requires the following rule: for a given vreg,
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 62051ee9db..4d3e0304c0 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1557,7 +1557,7 @@ void UnstartedRuntime::UnstartedJdkUnsafeCompareAndSwapObject(
mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 5);
// Must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
mirror::HeapReference<mirror::Object>* field_addr =
@@ -2163,6 +2163,7 @@ using JNIHandler = void(*)(Thread* self,
uint32_t* args,
JValue* result);
+// NOLINTNEXTLINE
#define ONE_PLUS(ShortNameIgnored, DescriptorIgnored, NameIgnored, SignatureIgnored) 1 +
static constexpr size_t kInvokeHandlersSize = UNSTARTED_RUNTIME_DIRECT_LIST(ONE_PLUS) 0;
static constexpr size_t kJniHandlersSize = UNSTARTED_RUNTIME_JNI_LIST(ONE_PLUS) 0;
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 75a692e48d..70948b8565 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -420,11 +420,13 @@ TEST_F(UnstartedRuntimeTest, StringInit) {
shadow_frame->SetVRegReference(0, reference_empty_string.Get());
shadow_frame->SetVRegReference(1, string_arg.Get());
- interpreter::DoCall<false, false>(method,
+ ArtMethod* factory = WellKnownClasses::StringInitToStringFactory(method);
+ interpreter::DoCall<false, false>(factory,
self,
*shadow_frame,
Instruction::At(inst_data),
inst_data[0],
+ /* string_init= */ true,
&result);
ObjPtr<mirror::String> string_result = down_cast<mirror::String*>(result.GetL());
EXPECT_EQ(string_arg->GetLength(), string_result->GetLength());
@@ -1024,6 +1026,7 @@ TEST_F(UnstartedRuntimeTest, FloatConversion) {
*shadow_frame,
Instruction::At(inst_data),
inst_data[0],
+ /* string_init= */ false,
&result);
ObjPtr<mirror::String> string_result = down_cast<mirror::String*>(result.GetL());
ASSERT_TRUE(string_result != nullptr);
@@ -1179,6 +1182,7 @@ class UnstartedClassForNameTest : public UnstartedRuntimeTest {
*shadow_frame,
Instruction::At(inst_data),
inst_data[0],
+ /* string_init= */ false,
&result);
CHECK(!self->IsExceptionPending());
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6d634ae120..239f207e46 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -208,7 +208,6 @@ Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
// Jit GC for now (b/147208992).
if (code_cache->GetGarbageCollectCode()) {
code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
- !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled() &&
!jit->JitAtFirstUse());
}
@@ -259,10 +258,14 @@ bool Jit::LoadCompilerLibrary(std::string* error_msg) {
return true;
}
-bool Jit::CompileMethod(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind,
- bool prejit) {
+bool Jit::CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ CHECK(GetCodeCache()->IsMethodBeingCompiled(method, compilation_kind));
+ }
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -279,9 +282,8 @@ bool Jit::CompileMethod(ArtMethod* method,
compilation_kind = CompilationKind::kOptimized;
}
- RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
// Don't compile the method if it has breakpoints.
- if (cb->IsMethodBeingInspected(method)) {
+ if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
VLOG(jit) << "JIT not compiling " << method->PrettyMethod()
<< " due to not being safe to jit according to runtime-callbacks. For example, there"
<< " could be breakpoints in this method.";
@@ -323,7 +325,7 @@ bool Jit::CompileMethod(ArtMethod* method,
<< ArtMethod::PrettyMethod(method_to_compile)
<< " kind=" << compilation_kind;
bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, compilation_kind);
- code_cache_->DoneCompiling(method_to_compile, self, compilation_kind);
+ code_cache_->DoneCompiling(method_to_compile, self);
if (!success) {
VLOG(jit) << "Failed to compile method "
<< ArtMethod::PrettyMethod(method_to_compile)
@@ -568,12 +570,11 @@ bool Jit::MaybeDoOnStackReplacement(Thread* thread,
// Before allowing the jump, make sure no code is actively inspecting the method to avoid
// jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
// disable OSR when single stepping, but that's currently hard to know at this point.
- if (Runtime::Current()->GetInstrumentation()->InterpreterStubsInstalled() ||
- Runtime::Current()->GetInstrumentation()->IsDeoptimized(method) ||
- thread->IsForceInterpreter() ||
- method->GetDeclaringClass()->IsObsoleteObject() ||
- Dbg::IsForcedInterpreterNeededForUpcall(thread, method) ||
- Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
+ // Currently, HaveLocalsChanged is not frame specific. It is possible to make it frame specific
+ // to allow OSR of frames that don't have any locals changed but it isn't worth the additional
+ // complexity.
+ if (Runtime::Current()->GetInstrumentation()->NeedsSlowInterpreterForMethod(thread, method) ||
+ Runtime::Current()->GetRuntimeCallbacks()->HaveLocalsChanged()) {
return false;
}
@@ -748,6 +749,51 @@ void Jit::NotifyZygoteCompilationDone() {
child_mapping_methods.Reset();
}
+class ScopedCompilation {
+ public:
+ ScopedCompilation(ScopedCompilation&& other) noexcept :
+ jit_(other.jit_),
+ method_(other.method_),
+ compilation_kind_(other.compilation_kind_),
+ owns_compilation_(other.owns_compilation_) {
+ other.owns_compilation_ = false;
+ }
+
+ ScopedCompilation(Jit* jit, ArtMethod* method, CompilationKind compilation_kind)
+ : jit_(jit),
+ method_(method),
+ compilation_kind_(compilation_kind),
+ owns_compilation_(true) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ // We don't want to enqueue any new tasks when thread pool has stopped. This simplifies
+ // the implementation of redefinition feature in jvmti.
+ if (jit_->GetThreadPool() == nullptr ||
+ !jit_->GetThreadPool()->HasStarted(Thread::Current()) ||
+ jit_->GetCodeCache()->IsMethodBeingCompiled(method_, compilation_kind_)) {
+ owns_compilation_ = false;
+ return;
+ }
+ jit_->GetCodeCache()->AddMethodBeingCompiled(method_, compilation_kind_);
+ }
+
+ bool OwnsCompilation() const {
+ return owns_compilation_;
+ }
+
+ ~ScopedCompilation() {
+ if (owns_compilation_) {
+ MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+ jit_->GetCodeCache()->RemoveMethodBeingCompiled(method_, compilation_kind_);
+ }
+ }
+
+ private:
+ Jit* const jit_;
+ ArtMethod* const method_;
+ const CompilationKind compilation_kind_;
+ bool owns_compilation_;
+};
+
class JitCompileTask final : public Task {
public:
enum class TaskKind {
@@ -755,25 +801,16 @@ class JitCompileTask final : public Task {
kPreCompile,
};
- JitCompileTask(ArtMethod* method, TaskKind task_kind, CompilationKind compilation_kind)
- : method_(method), kind_(task_kind), compilation_kind_(compilation_kind), klass_(nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
- // until compilation is done.
- // When we precompile, this is either with boot classpath methods, or main
- // class loader methods, so we don't need to keep a global reference.
- if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
- kind_ != TaskKind::kPreCompile) {
- klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
- CHECK(klass_ != nullptr);
- }
- }
-
- ~JitCompileTask() {
- if (klass_ != nullptr) {
- ScopedObjectAccess soa(Thread::Current());
- soa.Vm()->DeleteGlobalRef(soa.Self(), klass_);
- }
+ JitCompileTask(ArtMethod* method,
+ TaskKind task_kind,
+ CompilationKind compilation_kind,
+ ScopedCompilation&& sc)
+ : method_(method),
+ kind_(task_kind),
+ compilation_kind_(compilation_kind),
+ scoped_compilation_(std::move(sc)) {
+ DCHECK(scoped_compilation_.OwnsCompilation());
+ DCHECK(!sc.OwnsCompilation());
}
void Run(Thread* self) override {
@@ -782,7 +819,7 @@ class JitCompileTask final : public Task {
switch (kind_) {
case TaskKind::kCompile:
case TaskKind::kPreCompile: {
- Runtime::Current()->GetJit()->CompileMethod(
+ Runtime::Current()->GetJit()->CompileMethodInternal(
method_,
self,
compilation_kind_,
@@ -802,7 +839,7 @@ class JitCompileTask final : public Task {
ArtMethod* const method_;
const TaskKind kind_;
const CompilationKind compilation_kind_;
- jobject klass_;
+ ScopedCompilation scoped_compilation_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
@@ -1290,6 +1327,21 @@ void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& de
}
}
+void Jit::AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return;
+ }
+ JitCompileTask::TaskKind task_kind = precompile
+ ? JitCompileTask::TaskKind::kPreCompile
+ : JitCompileTask::TaskKind::kCompile;
+ thread_pool_->AddTask(
+ self, new JitCompileTask(method, task_kind, compilation_kind, std::move(sc)));
+}
+
bool Jit::CompileMethodFromProfile(Thread* self,
ClassLinker* class_linker,
uint32_t method_idx,
@@ -1310,21 +1362,27 @@ bool Jit::CompileMethodFromProfile(Thread* self,
// Already seen by another profile.
return false;
}
+ CompilationKind compilation_kind = CompilationKind::kOptimized;
const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
class_linker->IsQuickGenericJniStub(entry_point) ||
- (entry_point == interpreter::GetNterpEntryPoint()) ||
- // We explicitly check for the stub. The trampoline is for methods backed by
- // a .oat file that has a compiled version of the method.
+ class_linker->IsNterpEntryPoint(entry_point) ||
+ // We explicitly check for the resolution stub, and not the resolution trampoline.
+ // The trampoline is for methods backed by a .oat file that has a compiled version of
+ // the method.
(entry_point == GetQuickResolutionStub())) {
VLOG(jit) << "JIT Zygote processing method " << ArtMethod::PrettyMethod(method)
<< " from profile";
method->SetPreCompiled();
+ ScopedCompilation sc(this, method, compilation_kind);
+ if (!sc.OwnsCompilation()) {
+ return false;
+ }
if (!add_to_queue) {
- CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ true);
+ CompileMethodInternal(method, self, compilation_kind, /* prejit= */ true);
} else {
Task* task = new JitCompileTask(
- method, JitCompileTask::TaskKind::kPreCompile, CompilationKind::kOptimized);
+ method, JitCompileTask::TaskKind::kPreCompile, compilation_kind, std::move(sc));
if (compile_after_boot) {
AddPostBootTask(self, task);
} else {
@@ -1475,11 +1533,7 @@ void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
// hotness threshold. If we're not only using the baseline compiler, enqueue a compilation
// task that will compile optimize the method.
if (!options_->UseBaselineCompiler()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
}
}
@@ -1499,23 +1553,17 @@ class ScopedSetRuntimeThread {
bool was_runtime_thread_;
};
-void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
+void Jit::MethodEntered(Thread* self, ArtMethod* method) {
Runtime* runtime = Runtime::Current();
if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
if (np_method->IsCompilable()) {
- // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
- // conflicts with jitzygote optimizations.
- JitCompileTask compile_task(
- method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOptimized);
- // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
- ScopedSetRuntimeThread ssrt(thread);
- compile_task.Run(thread);
+ CompileMethod(method, self, CompilationKind::kOptimized, /* prejit= */ false);
}
return;
}
- AddSamples(thread, method);
+ AddSamples(self, method);
}
void Jit::WaitForCompilationToFinish(Thread* self) {
@@ -1620,7 +1668,6 @@ void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
// Jit GC for now (b/147208992).
code_cache_->SetGarbageCollectCode(
!jit_compiler_->GenerateDebugInfo() &&
- !runtime->GetInstrumentation()->AreExitStubsInstalled() &&
!JitAtFirstUse());
if (is_system_server && runtime->HasImageWithProfile()) {
@@ -1745,9 +1792,7 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
if (!method->IsNative() && !code_cache_->IsOsrCompiled(method)) {
// If we already have compiled code for it, nterp may be stuck in a loop.
// Compile OSR.
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kOsr));
+ AddCompileTask(self, method, CompilationKind::kOsr);
}
return;
}
@@ -1764,7 +1809,7 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
return;
}
- static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0xff;
+ static constexpr size_t kIndividualSharedMethodHotnessThreshold = 0x3f;
if (method->IsMemorySharedMethod()) {
MutexLock mu(self, lock_);
auto it = shared_method_counters_.find(method);
@@ -1781,16 +1826,26 @@ void Jit::MaybeEnqueueCompilation(ArtMethod* method, Thread* self) {
}
if (!method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method, JitCompileTask::TaskKind::kCompile, CompilationKind::kBaseline));
+ AddCompileTask(self, method, CompilationKind::kBaseline);
} else {
- thread_pool_->AddTask(
- self,
- new JitCompileTask(method,
- JitCompileTask::TaskKind::kCompile,
- CompilationKind::kOptimized));
+ AddCompileTask(self, method, CompilationKind::kOptimized);
+ }
+}
+
+bool Jit::CompileMethod(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit) {
+ ScopedCompilation sc(this, method, compilation_kind);
+ // TODO: all current users of this method expect us to wait if it is being compiled.
+ if (!sc.OwnsCompilation()) {
+ return false;
}
+ // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
+ ScopedSetRuntimeThread ssrt(self);
+ // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
+ // conflicts with jitzygote optimizations.
+ return CompileMethodInternal(method, self, compilation_kind, prejit);
}
} // namespace jit
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b439c8ee9e..fd92451054 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -53,6 +53,7 @@ class String;
namespace jit {
class JitCodeCache;
+class JitCompileTask;
class JitMemoryRegion;
class JitOptions;
@@ -461,6 +462,17 @@ class Jit {
static bool BindCompilerMethods(std::string* error_msg);
+ void AddCompileTask(Thread* self,
+ ArtMethod* method,
+ CompilationKind compilation_kind,
+ bool precompile = false);
+
+ bool CompileMethodInternal(ArtMethod* method,
+ Thread* self,
+ CompilationKind compilation_kind,
+ bool prejit)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// JIT compiler
static void* jit_library_handle_;
static JitCompilerInterface* jit_compiler_;
@@ -507,6 +519,8 @@ class Jit {
// between the zygote and apps.
std::map<ArtMethod*, uint16_t> shared_method_counters_;
+ friend class art::jit::JitCompileTask;
+
DISALLOW_COPY_AND_ASSIGN(Jit);
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 0b34688ff3..39f165d7e2 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -40,7 +40,7 @@
#include "entrypoints/entrypoint_utils-inl.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "gc/accounting/bitmap-inl.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "gc/scoped_gc_critical_section.h"
#include "handle.h"
#include "handle_scope-inl.h"
@@ -422,7 +422,6 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
// TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method
// out of the weak access/creation pause. b/32167580
if (new_object != nullptr && new_object != object) {
- DCHECK(new_object->IsString());
roots[i] = GcRoot<mirror::Object>(new_object);
}
} else {
@@ -560,7 +559,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
}
bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const {
- return kUseReadBarrier
+ return gUseReadBarrier
? self->GetWeakRefAccessEnabled()
: is_weak_access_enabled_.load(std::memory_order_seq_cst);
}
@@ -583,13 +582,13 @@ void JitCodeCache::BroadcastForInlineCacheAccess() {
}
void JitCodeCache::AllowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(true, std::memory_order_seq_cst);
BroadcastForInlineCacheAccess();
}
void JitCodeCache::DisallowInlineCacheAccess() {
- DCHECK(!kUseReadBarrier);
+ DCHECK(!gUseReadBarrier);
is_weak_access_enabled_.store(false, std::memory_order_seq_cst);
}
@@ -1594,10 +1593,35 @@ bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
return osr_code_map_.find(method) != osr_code_map_.end();
}
+void JitCodeCache::VisitRoots(RootVisitor* visitor) {
+ Thread* self = Thread::Current();
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ if (heap->CurrentCollectorType() != gc::CollectorType::kCollectorTypeCMC
+ || !heap->MarkCompactCollector()->IsCompacting(self)) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ UnbufferedRootVisitor root_visitor(visitor, RootInfo(kRootStickyClass));
+ for (ArtMethod* method : current_optimized_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_baseline_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ for (ArtMethod* method : current_osr_compilations_) {
+ method->VisitRoots(root_visitor, kRuntimePointerSize);
+ }
+ }
+}
+
bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
Thread* self,
CompilationKind compilation_kind,
bool prejit) {
+ if (kIsDebugBuild) {
+ MutexLock mu(self, *Locks::jit_lock_);
+ // Note: the compilation kind may have been adjusted after what was passed initially.
+ // We really just want to check that the method is indeed being compiled.
+ CHECK(IsMethodBeingCompiled(method));
+ }
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
OatQuickMethodHeader* method_header =
@@ -1686,13 +1710,8 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
}
}
}
- MutexLock mu(self, *Locks::jit_lock_);
- if (IsMethodBeingCompiled(method, compilation_kind)) {
- return false;
- }
- AddMethodBeingCompiled(method, compilation_kind);
- return true;
}
+ return true;
}
ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
@@ -1715,9 +1734,7 @@ void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
it->second->DecrementInlineUse();
}
-void JitCodeCache::DoneCompiling(ArtMethod* method,
- Thread* self,
- CompilationKind compilation_kind) {
+void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self) {
DCHECK_EQ(Thread::Current(), self);
MutexLock mu(self, *Locks::jit_lock_);
if (UNLIKELY(method->IsNative())) {
@@ -1729,8 +1746,6 @@ void JitCodeCache::DoneCompiling(ArtMethod* method,
// Failed to compile; the JNI compiler never fails, but the cache may be full.
jni_stubs_map_.erase(it); // Remove the entry added in NotifyCompilationOf().
} // else Commit() updated entrypoints of all methods in the JniStubData.
- } else {
- RemoveMethodBeingCompiled(method, compilation_kind);
}
}
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fb861a4d82..a534ba9094 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -215,7 +215,7 @@ class JitCodeCache {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
- void DoneCompiling(ArtMethod* method, Thread* self, CompilationKind compilation_kind)
+ void DoneCompiling(ArtMethod* method, Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::jit_lock_);
@@ -403,6 +403,20 @@ class JitCodeCache {
ProfilingInfo* GetProfilingInfo(ArtMethod* method, Thread* self);
void ResetHotnessCounter(ArtMethod* method, Thread* self);
+ void VisitRoots(RootVisitor* visitor);
+
+ // Return whether `method` is being compiled with the given mode.
+ bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Remove `method` from the list of methods meing compiled with the given mode.
+ void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
+ // Record that `method` is being compiled with the given mode.
+ void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
+ REQUIRES(Locks::jit_lock_);
+
private:
JitCodeCache();
@@ -492,18 +506,6 @@ class JitCodeCache {
REQUIRES(!Locks::jit_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Record that `method` is being compiled with the given mode.
- void AddMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Remove `method` from the list of methods meing compiled with the given mode.
- void RemoveMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
- // Return whether `method` is being compiled with the given mode.
- bool IsMethodBeingCompiled(ArtMethod* method, CompilationKind compilation_kind)
- REQUIRES(Locks::jit_lock_);
-
// Return whether `method` is being compiled in any mode.
bool IsMethodBeingCompiled(ArtMethod* method) REQUIRES(Locks::jit_lock_);
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
index 56407f58c0..3f43aca932 100644
--- a/runtime/jit/jit_memory_region.cc
+++ b/runtime/jit/jit_memory_region.cc
@@ -27,7 +27,7 @@
#include "base/membarrier.h"
#include "base/memfd.h"
#include "base/systrace.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "jit/jit_scoped_code_cache_write.h"
#include "oat_quick_method_header.h"
#include "palette/palette.h"
diff --git a/runtime/jni/java_vm_ext-inl.h b/runtime/jni/java_vm_ext-inl.h
index 29cdf1b773..c98a5532f6 100644
--- a/runtime/jni/java_vm_ext-inl.h
+++ b/runtime/jni/java_vm_ext-inl.h
@@ -26,7 +26,7 @@ namespace art {
inline bool JavaVMExt::MayAccessWeakGlobals(Thread* self) const {
DCHECK(self != nullptr);
- return kUseReadBarrier
+ return gUseReadBarrier
? self->GetWeakRefAccessEnabled()
: allow_accessing_weak_globals_.load(std::memory_order_seq_cst);
}
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index f41b6c06fa..39d5729698 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -729,8 +729,8 @@ jweak JavaVMExt::AddWeakGlobalRef(Thread* self, ObjPtr<mirror::Object> obj) {
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// CMS needs this to block for concurrent reference processing because an object allocated during
// the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
- // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
- if (!kUseReadBarrier) {
+ // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant.
+ if (!gUseReadBarrier) {
WaitForWeakGlobalsAccess(self);
}
std::string error_msg;
@@ -809,7 +809,7 @@ void JavaVMExt::DumpForSigQuit(std::ostream& os) {
}
void JavaVMExt::DisallowNewWeakGlobals() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* const self = Thread::Current();
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
// DisallowNewWeakGlobals is only called by CMS during the pause. It is required to have the
@@ -820,7 +820,7 @@ void JavaVMExt::DisallowNewWeakGlobals() {
}
void JavaVMExt::AllowNewWeakGlobals() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::jni_weak_globals_lock_);
allow_accessing_weak_globals_.store(true, std::memory_order_seq_cst);
@@ -876,7 +876,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, I
return DecodeWeakGlobal(self, ref);
}
// self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal().
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK(allow_accessing_weak_globals_.load(std::memory_order_seq_cst));
}
return weak_globals_.SynchronizedGet(ref);
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index e3153fdace..7a7644e485 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -1950,6 +1950,7 @@ class JNI {
return InvokeWithJValues(soa, nullptr, mid, args).GetD();
}
+ NO_STACK_PROTECTOR
static void CallStaticVoidMethod(JNIEnv* env, jclass, jmethodID mid, ...) {
va_list ap;
va_start(ap, mid);
@@ -1959,6 +1960,7 @@ class JNI {
InvokeWithVarArgs(soa, nullptr, mid, ap);
}
+ NO_STACK_PROTECTOR
static void CallStaticVoidMethodV(JNIEnv* env, jclass, jmethodID mid, va_list args) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(mid);
ScopedObjectAccess soa(env);
@@ -2176,14 +2178,13 @@ class JNI {
if (heap->IsMovableObject(s)) {
StackHandleScope<1> hs(soa.Self());
HandleWrapperObjPtr<mirror::String> h(hs.NewHandleWrapper(&s));
- if (!kUseReadBarrier) {
- heap->IncrementDisableMovingGC(soa.Self());
- } else {
- // For the CC collector, we only need to wait for the thread flip rather
- // than the whole GC to occur thanks to the to-space invariant.
- heap->IncrementDisableThreadFlip(soa.Self());
- }
+ // For the CC and CMC collector, we only need to wait for the thread flip rather
+ // than the whole GC to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
}
+ // Ensure that the string doesn't cause userfaults in case passed on to
+ // the kernel.
+ heap->EnsureObjectUserfaulted(s);
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
@@ -2199,11 +2200,7 @@ class JNI {
gc::Heap* heap = Runtime::Current()->GetHeap();
ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_string);
if (!s->IsCompressed() && heap->IsMovableObject(s)) {
- if (!kUseReadBarrier) {
- heap->DecrementDisableMovingGC(soa.Self());
- } else {
- heap->DecrementDisableThreadFlip(soa.Self());
- }
+ heap->DecrementDisableThreadFlip(soa.Self());
}
// TODO: For uncompressed strings GetStringCritical() always returns `s->GetValue()`.
// Should we report an error if the user passes a different `chars`?
@@ -2366,16 +2363,14 @@ class JNI {
}
gc::Heap* heap = Runtime::Current()->GetHeap();
if (heap->IsMovableObject(array)) {
- if (!kUseReadBarrier) {
- heap->IncrementDisableMovingGC(soa.Self());
- } else {
- // For the CC collector, we only need to wait for the thread flip rather than the whole GC
- // to occur thanks to the to-space invariant.
- heap->IncrementDisableThreadFlip(soa.Self());
- }
+ // For the CC and CMC collector, we only need to wait for the thread flip rather
+ // than the whole GC to occur thanks to the to-space invariant.
+ heap->IncrementDisableThreadFlip(soa.Self());
// Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
array = soa.Decode<mirror::Array>(java_array);
}
+ // Ensure that the array doesn't cause userfaults in case passed on to the kernel.
+ heap->EnsureObjectUserfaulted(array);
if (is_copy != nullptr) {
*is_copy = JNI_FALSE;
}
@@ -2967,11 +2962,7 @@ class JNI {
delete[] reinterpret_cast<uint64_t*>(elements);
} else if (heap->IsMovableObject(array)) {
// Non copy to a movable object must means that we had disabled the moving GC.
- if (!kUseReadBarrier) {
- heap->DecrementDisableMovingGC(soa.Self());
- } else {
- heap->DecrementDisableThreadFlip(soa.Self());
- }
+ heap->DecrementDisableThreadFlip(soa.Self());
}
}
}
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 84f45c2dc6..599a5994df 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -183,8 +183,7 @@ class LockWord {
LockState GetState() const {
CheckReadBarrierState();
- if ((!kUseReadBarrier && UNLIKELY(value_ == 0)) ||
- (kUseReadBarrier && UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0))) {
+ if (UNLIKELY((value_ & kGCStateMaskShiftedToggled) == 0)) {
return kUnlocked;
} else {
uint32_t internal_state = (value_ >> kStateShift) & kStateMask;
@@ -288,7 +287,7 @@ class LockWord {
void CheckReadBarrierState() const {
if (kIsDebugBuild && ((value_ >> kStateShift) & kStateMask) != kStateForwardingAddress) {
uint32_t rb_state = ReadBarrierState();
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK_EQ(rb_state, 0U);
} else {
DCHECK(rb_state == ReadBarrier::NonGrayState() ||
diff --git a/runtime/managed_stack.h b/runtime/managed_stack.h
index 04a27fe656..0e7dfe3742 100644
--- a/runtime/managed_stack.h
+++ b/runtime/managed_stack.h
@@ -43,6 +43,8 @@ template <typename T> class StackReference;
// code.
class PACKED(4) ManagedStack {
public:
+ static size_t constexpr kTaggedJniSpMask = 0x3;
+
ManagedStack()
: tagged_top_quick_frame_(TaggedTopQuickFrame::CreateNotTagged(nullptr)),
link_(nullptr),
@@ -75,8 +77,12 @@ class PACKED(4) ManagedStack {
return tagged_top_quick_frame_.GetSp();
}
- bool GetTopQuickFrameTag() const {
- return tagged_top_quick_frame_.GetTag();
+ bool GetTopQuickFrameGenericJniTag() const {
+ return tagged_top_quick_frame_.GetGenericJniTag();
+ }
+
+ bool GetTopQuickFrameJitJniTag() const {
+ return tagged_top_quick_frame_.GetJitJniTag();
}
bool HasTopQuickFrame() const {
@@ -89,10 +95,10 @@ class PACKED(4) ManagedStack {
tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateNotTagged(top);
}
- void SetTopQuickFrameTagged(ArtMethod** top) {
+ void SetTopQuickFrameGenericJniTagged(ArtMethod** top) {
DCHECK(top_shadow_frame_ == nullptr);
DCHECK_ALIGNED(top, 4u);
- tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateTagged(top);
+ tagged_top_quick_frame_ = TaggedTopQuickFrame::CreateGenericJniTagged(top);
}
static constexpr size_t TaggedTopQuickFrameOffset() {
@@ -129,26 +135,30 @@ class PACKED(4) ManagedStack {
return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp));
}
- static TaggedTopQuickFrame CreateTagged(ArtMethod** sp) {
+ static TaggedTopQuickFrame CreateGenericJniTagged(ArtMethod** sp) {
DCHECK_ALIGNED(sp, 4u);
return TaggedTopQuickFrame(reinterpret_cast<uintptr_t>(sp) | 1u);
}
// Get SP known to be not tagged and non-null.
ArtMethod** GetSpKnownNotTagged() const {
- DCHECK(!GetTag());
+ DCHECK(!GetGenericJniTag() && !GetJitJniTag());
DCHECK_NE(tagged_sp_, 0u);
return reinterpret_cast<ArtMethod**>(tagged_sp_);
}
ArtMethod** GetSp() const {
- return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(1u));
+ return reinterpret_cast<ArtMethod**>(tagged_sp_ & ~static_cast<uintptr_t>(kTaggedJniSpMask));
}
- bool GetTag() const {
+ bool GetGenericJniTag() const {
return (tagged_sp_ & 1u) != 0u;
}
+ bool GetJitJniTag() const {
+ return (tagged_sp_ & 2u) != 0u;
+ }
+
uintptr_t GetTaggedSp() const {
return tagged_sp_;
}
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 1327a24905..286e0978bc 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -625,6 +625,10 @@ bool MethodHandleFieldAccess(Thread* self,
case mirror::MethodHandle::kInstanceGet: {
size_t obj_reg = operands->GetOperand(0);
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
+ if (obj == nullptr) {
+ ThrowNullPointerException("Receiver is null");
+ return false;
+ }
MethodHandleFieldGet(self, shadow_frame, obj, field, field_type, result);
return true;
}
@@ -648,6 +652,10 @@ bool MethodHandleFieldAccess(Thread* self,
callsite_type->GetPTypes()->Get(kPTypeIndex)->GetPrimitiveType(),
value_reg);
ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(obj_reg);
+ if (obj == nullptr) {
+ ThrowNullPointerException("Receiver is null");
+ return false;
+ }
return MethodHandleFieldPut(self, shadow_frame, obj, field, field_type, value);
}
case mirror::MethodHandle::kStaticPut: {
diff --git a/runtime/metrics/reporter.cc b/runtime/metrics/reporter.cc
index a44066e487..28ca997cec 100644
--- a/runtime/metrics/reporter.cc
+++ b/runtime/metrics/reporter.cc
@@ -126,10 +126,17 @@ void MetricsReporter::BackgroundThreadRun() {
// Configure the backends
if (config_.dump_to_logcat) {
- backends_.emplace_back(new LogBackend(LogSeverity::INFO));
+ backends_.emplace_back(new LogBackend(std::make_unique<TextFormatter>(), LogSeverity::INFO));
}
if (config_.dump_to_file.has_value()) {
- backends_.emplace_back(new FileBackend(config_.dump_to_file.value()));
+ std::unique_ptr<MetricsFormatter> formatter;
+ if (config_.metrics_format == "xml") {
+ formatter = std::make_unique<XmlFormatter>();
+ } else {
+ formatter = std::make_unique<TextFormatter>();
+ }
+
+ backends_.emplace_back(new FileBackend(std::move(formatter), config_.dump_to_file.value()));
}
if (config_.dump_to_statsd) {
auto backend = CreateStatsdBackend();
@@ -291,6 +298,7 @@ ReportingConfig ReportingConfig::FromFlags(bool is_system_server) {
.dump_to_logcat = gFlags.MetricsWriteToLogcat(),
.dump_to_file = gFlags.MetricsWriteToFile.GetValueOptional(),
.dump_to_statsd = gFlags.MetricsWriteToStatsd(),
+ .metrics_format = gFlags.MetricsFormat(),
.period_spec = period_spec,
.reporting_num_mods = reporting_num_mods,
.reporting_mods = reporting_mods,
diff --git a/runtime/metrics/reporter.h b/runtime/metrics/reporter.h
index daeaf1fa18..af9e0ca151 100644
--- a/runtime/metrics/reporter.h
+++ b/runtime/metrics/reporter.h
@@ -78,6 +78,9 @@ struct ReportingConfig {
// If set, provides a file name to enable metrics logging to a file.
std::optional<std::string> dump_to_file;
+ // Provides the desired output format for metrics written to a file.
+ std::string metrics_format;
+
// The reporting period configuration.
std::optional<ReportingPeriodSpec> period_spec;
diff --git a/runtime/metrics/reporter_test.cc b/runtime/metrics/reporter_test.cc
index 3807c77991..7d9377ad32 100644
--- a/runtime/metrics/reporter_test.cc
+++ b/runtime/metrics/reporter_test.cc
@@ -174,9 +174,9 @@ class MetricsReporterTest : public CommonRuntimeTest {
CompilationReason reason = CompilationReason::kUnknown) {
// TODO: we should iterate through all the other metrics to make sure they were not
// reported. However, we don't have an easy to use iteration mechanism over metrics yet.
- // We should ads one
+ // We should add one
ASSERT_EQ(backend_->GetReports().size(), size);
- for (auto report : backend_->GetReports()) {
+ for (const TestBackend::Report& report : backend_->GetReports()) {
ASSERT_EQ(report.data.Get(DatumId::kClassVerificationCount), with_metrics ? 2u : 0u);
ASSERT_EQ(report.data.Get(DatumId::kJitMethodCompileCount), with_metrics ? 1u : 0u);
}
@@ -411,7 +411,7 @@ class ReportingPeriodSpecTest : public testing::Test {
const std::string& spec_str,
bool startup_first,
bool continuous,
- std::vector<uint32_t> periods) {
+ const std::vector<uint32_t>& periods) {
Verify(spec_str, true, startup_first, continuous, periods);
}
@@ -420,7 +420,7 @@ class ReportingPeriodSpecTest : public testing::Test {
bool valid,
bool startup_first,
bool continuous,
- std::vector<uint32_t> periods) {
+ const std::vector<uint32_t>& periods) {
std::string error_msg;
std::optional<ReportingPeriodSpec> spec = ReportingPeriodSpec::Parse(spec_str, &error_msg);
diff --git a/runtime/metrics/statsd.cc b/runtime/metrics/statsd.cc
index f68d50730f..78c3622cbb 100644
--- a/runtime/metrics/statsd.cc
+++ b/runtime/metrics/statsd.cc
@@ -106,6 +106,30 @@ constexpr std::optional<int32_t> EncodeDatumId(DatumId datum_id) {
case DatumId::kFullGcTracingThroughputAvg:
return std::make_optional(
statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_FULL_HEAP_TRACING_THROUGHPUT_AVG_MB_PER_SEC);
+ case DatumId::kGcWorldStopTime:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_WORLD_STOP_TIME_US);
+ case DatumId::kGcWorldStopCount:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_WORLD_STOP_COUNT);
+ case DatumId::kYoungGcScannedBytes:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_YOUNG_GENERATION_COLLECTION_SCANNED_BYTES);
+ case DatumId::kYoungGcFreedBytes:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_YOUNG_GENERATION_COLLECTION_FREED_BYTES);
+ case DatumId::kYoungGcDuration:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_YOUNG_GENERATION_COLLECTION_DURATION_MS);
+ case DatumId::kFullGcScannedBytes:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_FULL_HEAP_COLLECTION_SCANNED_BYTES);
+ case DatumId::kFullGcFreedBytes:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_FULL_HEAP_COLLECTION_FREED_BYTES);
+ case DatumId::kFullGcDuration:
+ return std::make_optional(
+ statsd::ART_DATUM_REPORTED__KIND__ART_DATUM_GC_FULL_HEAP_COLLECTION_DURATION_MS);
}
}
@@ -226,8 +250,8 @@ class StatsdBackend : public MetricsBackend {
EncodeCompileFilter(session_data_.compiler_filter),
EncodeCompilationReason(session_data_.compilation_reason),
current_timestamp_,
- /*thread_type=*/0, // TODO: collect and report thread type (0 means UNKNOWN, but that
- // constant is not present in all branches)
+ 0, // TODO: collect and report thread type (0 means UNKNOWN, but that
+ // constant is not present in all branches)
datum_id.value(),
static_cast<int64_t>(value),
statsd::ART_DATUM_REPORTED__DEX_METADATA_TYPE__ART_DEX_METADATA_TYPE_UNKNOWN,
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index b0e77b45a6..a7faa376cd 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -36,12 +36,11 @@ inline uint32_t Array::ClassSize(PointerSize pointer_size) {
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size);
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline size_t Array::SizeOf() {
- // No read barrier is needed for reading a constant primitive field through
- // constant reference field chain. See ReadBarrierOption.
size_t component_size_shift =
- GetClass<kVerifyFlags, kWithoutReadBarrier>()->GetComponentSizeShift();
+ GetClass<kVerifyFlags, kReadBarrierOption>()
+ ->template GetComponentSizeShift<kReadBarrierOption>();
// Don't need to check this since we already check this in GetClass.
int32_t component_count =
GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>();
@@ -98,7 +97,7 @@ inline void PrimitiveArray<T>::SetWithoutChecks(int32_t i, T value) {
if (kTransactionActive) {
Runtime::Current()->RecordWriteArray(this, i, GetWithoutChecks(i));
}
- DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
+ DCHECK(CheckIsValidIndex<kVerifyFlags>(i)) << i << " " << GetLength<kVerifyFlags>();
GetData()[i] = value;
}
// Backward copy where elements are of aligned appropriately for T. Count is in T sized units.
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 4bf9deebfe..dfe7d475c1 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -58,7 +58,8 @@ class MANAGED Array : public Object {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index b6bd22eb76..77f78c5156 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -1077,10 +1077,9 @@ inline size_t Class::GetComponentSize() {
return 1U << GetComponentSizeShift();
}
+template <ReadBarrierOption kReadBarrierOption>
inline size_t Class::GetComponentSizeShift() {
- // No read barrier is needed for reading a constant primitive field through
- // constant reference field. See ReadBarrierOption.
- return GetComponentType<kDefaultVerifyFlags, kWithoutReadBarrier>()->GetPrimitiveTypeSizeShift();
+ return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
}
inline bool Class::IsObjectClass() {
@@ -1106,11 +1105,9 @@ inline bool Class::IsArrayClass() {
return GetComponentType<kVerifyFlags, kWithoutReadBarrier>() != nullptr;
}
-template<VerifyObjectFlags kVerifyFlags>
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline bool Class::IsObjectArrayClass() {
- // We do not need a read barrier here as the primitive type is constant,
- // both from-space and to-space component type classes shall yield the same result.
- const ObjPtr<Class> component_type = GetComponentType<kVerifyFlags, kWithoutReadBarrier>();
+ const ObjPtr<Class> component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
constexpr VerifyObjectFlags kNewFlags = RemoveThisFlags(kVerifyFlags);
return component_type != nullptr && !component_type->IsPrimitive<kNewFlags>();
}
diff --git a/runtime/mirror/class-refvisitor-inl.h b/runtime/mirror/class-refvisitor-inl.h
index 8c85387d6a..ee5c11f99f 100644
--- a/runtime/mirror/class-refvisitor-inl.h
+++ b/runtime/mirror/class-refvisitor-inl.h
@@ -51,22 +51,39 @@ inline void Class::VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
}
}
-template<ReadBarrierOption kReadBarrierOption, class Visitor>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, class Visitor>
void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
VisitFields<kReadBarrierOption>([&](ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) {
field->VisitRoots(visitor);
- if (kIsDebugBuild && IsResolved()) {
+ if (kIsDebugBuild && !gUseUserfaultfd && IsResolved()) {
CHECK_EQ(field->GetDeclaringClass<kReadBarrierOption>(), this)
<< GetStatus() << field->GetDeclaringClass()->PrettyClass() << " != " << PrettyClass();
}
});
// Don't use VisitMethods because we don't want to hit the class-ext methods twice.
for (ArtMethod& method : GetMethods(pointer_size)) {
- method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ method.VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
+ }
+ ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (!ext.IsNull()) {
+ ext->VisitNativeRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
+ }
+}
+
+template<ReadBarrierOption kReadBarrierOption>
+void Class::VisitObsoleteDexCaches(DexCacheVisitor& visitor) {
+ ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+ if (!ext.IsNull()) {
+ ext->VisitDexCaches<kDefaultVerifyFlags, kReadBarrierOption>(visitor);
}
+}
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void Class::VisitObsoleteClass(Visitor& visitor) {
ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
if (!ext.IsNull()) {
- ext->VisitNativeRoots<kReadBarrierOption, Visitor>(visitor, pointer_size);
+ ObjPtr<Class> klass = ext->GetObsoleteClass<kDefaultVerifyFlags, kReadBarrierOption>();
+ visitor(klass);
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 90efce556f..97af90a082 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -64,6 +64,7 @@ class Signature;
template<typename T> class StrideIterator;
template<size_t kNumReferences> class PACKED(4) StackHandleScope;
class Thread;
+class DexCacheVisitor;
namespace mirror {
@@ -236,6 +237,15 @@ class MANAGED Class final : public Object {
// Set access flags, recording the change if running inside a Transaction.
void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint32_t flags = GetAccessFlags();
+ SetAccessFlags(flags | kAccInBootImageAndNotInPreloadedClasses);
+ }
+
+ ALWAYS_INLINE bool IsInBootImageAndNotInPreloadedClasses() REQUIRES_SHARED(Locks::mutator_lock_) {
+ return (GetAccessFlags() & kAccInBootImageAndNotInPreloadedClasses) != 0;
+ }
+
// Returns true if the class is an enum.
ALWAYS_INLINE bool IsEnum() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccEnum) != 0;
@@ -486,6 +496,7 @@ class MANAGED Class final : public Object {
size_t GetComponentSize() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
size_t GetComponentSizeShift() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -495,7 +506,8 @@ class MANAGED Class final : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_);
- template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithoutReadBarrier>
ALWAYS_INLINE bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -553,7 +565,7 @@ class MANAGED Class final : public Object {
// The size of java.lang.Class.class.
static uint32_t ClassClassSize(PointerSize pointer_size) {
// The number of vtable entries in java.lang.Class.
- uint32_t vtable_entries = Object::kVTableLength + 67;
+ uint32_t vtable_entries = Object::kVTableLength + 72;
return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size);
}
@@ -570,6 +582,9 @@ class MANAGED Class final : public Object {
static constexpr MemberOffset ObjectSizeAllocFastPathOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
}
+ static constexpr MemberOffset ClinitThreadIdOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_);
+ }
ALWAYS_INLINE void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1170,10 +1185,19 @@ class MANAGED Class final : public Object {
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ class Visitor>
void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Visit obsolete dex caches possibly stored in ext_data_
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ void VisitObsoleteDexCaches(DexCacheVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ void VisitObsoleteClass(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Visit ArtMethods directly owned by this class.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitMethods(Visitor visitor, PointerSize pointer_size)
@@ -1417,7 +1441,7 @@ class MANAGED Class final : public Object {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// 'Class' Object Fields
- // Order governed by java field ordering. See art::ClassLinker::LinkFields.
+ // Order governed by java field ordering. See art::ClassLinker::LinkFieldsHelper::LinkFields.
// Defining class loader, or null for the "bootstrap" system loader.
HeapReference<ClassLoader> class_loader_;
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
index ddd46b9bcb..9d6ac433e8 100644
--- a/runtime/mirror/class_ext-inl.h
+++ b/runtime/mirror/class_ext-inl.h
@@ -23,6 +23,7 @@
#include "art_method-inl.h"
#include "base/enums.h"
#include "base/globals.h"
+#include "class_linker.h"
#include "handle_scope.h"
#include "jni/jni_internal.h"
#include "jni_id_type.h"
@@ -148,8 +149,9 @@ inline ObjPtr<Throwable> ClassExt::GetErroneousStateError() {
return GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(ClassExt, erroneous_state_error_));
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline ObjPtr<ObjectArray<DexCache>> ClassExt::GetObsoleteDexCaches() {
- return GetFieldObject<ObjectArray<DexCache>>(
+ return GetFieldObject<ObjectArray<DexCache>, kVerifyFlags, kReadBarrierOption>(
OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_));
}
@@ -164,13 +166,25 @@ inline ObjPtr<Object> ClassExt::GetOriginalDexFile() {
return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(ClassExt, original_dex_file_));
}
-template<ReadBarrierOption kReadBarrierOption, class Visitor>
+template<ReadBarrierOption kReadBarrierOption, bool kVisitProxyMethod, class Visitor>
void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
VisitMethods<kReadBarrierOption>([&](ArtMethod* method) {
- method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+ method->VisitRoots<kReadBarrierOption, kVisitProxyMethod>(visitor, pointer_size);
}, pointer_size);
}
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+void ClassExt::VisitDexCaches(DexCacheVisitor& visitor) {
+ ObjPtr<ObjectArray<DexCache>> arr(GetObsoleteDexCaches<kVerifyFlags, kReadBarrierOption>());
+ if (!arr.IsNull()) {
+ int32_t len = arr->GetLength();
+ for (int32_t i = 0; i < len; i++) {
+ ObjPtr<mirror::DexCache> dex_cache = arr->Get<kVerifyFlags, kReadBarrierOption>(i);
+ visitor.Visit(dex_cache);
+ }
+ }
+}
+
template<ReadBarrierOption kReadBarrierOption, class Visitor>
void ClassExt::VisitMethods(Visitor visitor, PointerSize pointer_size) {
ObjPtr<PointerArray> arr(GetObsoleteMethods<kDefaultVerifyFlags, kReadBarrierOption>());
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 4ce3b100c0..b025eb21af 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -27,6 +27,7 @@
namespace art {
struct ClassExtOffsets;
+class DexCacheVisitor;
namespace mirror {
@@ -46,6 +47,8 @@ class MANAGED ClassExt : public Object {
ObjPtr<Throwable> GetErroneousStateError() REQUIRES_SHARED(Locks::mutator_lock_);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<ObjectArray<DexCache>> GetObsoleteDexCaches() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -126,10 +129,21 @@ class MANAGED ClassExt : public Object {
static bool ExtendObsoleteArrays(Handle<ClassExt> h_this, Thread* self, uint32_t increase)
REQUIRES_SHARED(Locks::mutator_lock_);
- template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ bool kVisitProxyMethod = true,
+ class Visitor>
inline void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // NO_THREAD_SAFETY_ANALYSIS for dex_lock and heap_bitmap_lock_ as both are at
+ // higher lock-level than class-table's lock, which is already acquired and
+ // is at lower (kClassLoaderClassesLock) level.
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ inline void VisitDexCaches(DexCacheVisitor& visitor)
+ NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
inline void VisitMethods(Visitor visitor, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -156,6 +170,10 @@ class MANAGED ClassExt : public Object {
bool EnsureJniIdsArrayPresent(MemberOffset off, size_t count)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Backing store of user-defined values pertaining to a class.
+ // Maintained by the ClassValue class.
+ HeapReference<Object> class_value_map_;
+
// The saved error for this class being erroneous.
HeapReference<Throwable> erroneous_state_error_;
@@ -181,9 +199,10 @@ class MANAGED ClassExt : public Object {
// classes sfields_ array or '0' if no id has been assigned to that field yet.
HeapReference<PointerArray> static_jfield_ids_;
+ int32_t pre_redefine_class_def_index_;
+
// Native pointer to DexFile and ClassDef index of this class before it was JVMTI-redefined.
int64_t pre_redefine_dex_file_ptr_;
- int32_t pre_redefine_class_def_index_;
friend struct art::ClassExtOffsets; // for verifying offset information
DISALLOW_IMPLICIT_CONSTRUCTORS(ClassExt);
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 2791fe33a5..b937c2cb95 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -60,7 +60,7 @@ T* DexCache::AllocArray(MemberOffset obj_offset, MemberOffset num_offset, size_t
return nullptr;
}
mirror::DexCache* dex_cache = this;
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// Several code paths use DexCache without read-barrier for performance.
// We have to check the "to-space" object here to avoid allocating twice.
dex_cache = reinterpret_cast<DexCache*>(ReadBarrier::Mark(dex_cache));
@@ -405,20 +405,27 @@ inline void DexCache::VisitReferences(ObjPtr<Class> klass, const Visitor& visito
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
// Visit arrays after.
if (kVisitNativeRoots) {
- VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
- GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
+ VisitNativeRoots<kVerifyFlags, kReadBarrierOption>(visitor);
+ }
+}
+
+template <VerifyObjectFlags kVerifyFlags,
+ ReadBarrierOption kReadBarrierOption,
+ typename Visitor>
+inline void DexCache::VisitNativeRoots(const Visitor& visitor) {
+ VisitDexCachePairs<String, kReadBarrierOption, Visitor>(
+ GetStrings<kVerifyFlags>(), NumStrings<kVerifyFlags>(), visitor);
- VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
- GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
+ VisitDexCachePairs<Class, kReadBarrierOption, Visitor>(
+ GetResolvedTypes<kVerifyFlags>(), NumResolvedTypes<kVerifyFlags>(), visitor);
- VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
- GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
+ VisitDexCachePairs<MethodType, kReadBarrierOption, Visitor>(
+ GetResolvedMethodTypes<kVerifyFlags>(), NumResolvedMethodTypes<kVerifyFlags>(), visitor);
- GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
- size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
- for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) {
- visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
- }
+ GcRoot<mirror::CallSite>* resolved_call_sites = GetResolvedCallSites<kVerifyFlags>();
+ size_t num_call_sites = NumResolvedCallSites<kVerifyFlags>();
+ for (size_t i = 0; resolved_call_sites != nullptr && i != num_call_sites; ++i) {
+ visitor.VisitRootIfNonNull(resolved_call_sites[i].AddressWithoutBarrier());
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 6701405ab3..78c6bb566d 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -444,6 +444,12 @@ class MANAGED DexCache final : public Object {
ObjPtr<ClassLoader> GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+ template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
+ typename Visitor>
+ void VisitNativeRoots(const Visitor& visitor)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+
private:
// Allocate new array in linear alloc and save it in the given fields.
template<typename T, size_t kMaxCacheSize>
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c679fded32..318a811fa1 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -104,7 +104,7 @@ inline void Object::Wait(Thread* self, int64_t ms, int32_t ns) {
}
inline uint32_t Object::GetMarkBit() {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return GetLockWord(false).MarkBitState();
}
@@ -880,7 +880,7 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
// inheritance hierarchy and find reference offsets the hard way. In the static case, just
// consider this class.
for (ObjPtr<Class> klass = kIsStatic
- ? AsClass<kVerifyFlags>()
+ ? ObjPtr<Class>::DownCast(this)
: GetClass<kVerifyFlags, kReadBarrierOption>();
klass != nullptr;
klass = kIsStatic ? nullptr : klass->GetSuperClass<kVerifyFlags, kReadBarrierOption>()) {
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index f98c433cdd..5251953859 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -90,6 +90,104 @@ inline void Object::VisitReferences(const Visitor& visitor,
}
}
+// Could be called with from-space address of the object as we access klass and
+// length (in case of arrays/strings) and we don't want to cause cascading faults.
+template <bool kFetchObjSize,
+ bool kVisitNativeRoots,
+ VerifyObjectFlags kVerifyFlags,
+ ReadBarrierOption kReadBarrierOption,
+ typename Visitor>
+inline size_t Object::VisitRefsForCompaction(const Visitor& visitor,
+ MemberOffset begin,
+ MemberOffset end) {
+ constexpr VerifyObjectFlags kSizeOfFlags = RemoveThisFlags(kVerifyFlags);
+ size_t size;
+ // We want to continue using pre-compact klass to avoid cascading faults.
+ ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
+ DCHECK(klass != nullptr) << "obj=" << this;
+ const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
+ if (LIKELY(class_flags == kClassFlagNormal)) {
+ DCHECK((!klass->IsVariableSize<kVerifyFlags>()));
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ DCHECK((!klass->IsClassClass<kVerifyFlags>()));
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
+ DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
+ DCHECK((!klass->IsArrayClass<kVerifyFlags>()));
+ } else {
+ if ((class_flags & kClassFlagNoReferenceFields) == 0) {
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
+ if (class_flags == kClassFlagClass) {
+ DCHECK((klass->IsClassClass<kVerifyFlags>()));
+ ObjPtr<Class> as_klass = ObjPtr<Class>::DownCast(this);
+ as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
+ visitor);
+ size = kFetchObjSize ? as_klass->SizeOf<kSizeOfFlags>() : 0;
+ } else if (class_flags == kClassFlagObjectArray) {
+ DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ ObjPtr<ObjectArray<Object>> obj_arr = ObjPtr<ObjectArray<Object>>::DownCast(this);
+ obj_arr->VisitReferences(visitor, begin, end);
+ size = kFetchObjSize ? obj_arr->SizeOf<kSizeOfFlags, kReadBarrierOption>() : 0;
+ } else if ((class_flags & kClassFlagReference) != 0) {
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ // Visit referent also as this is about updating the reference only.
+ // There is no reference processing happening here.
+ visitor(this, mirror::Reference::ReferentOffset(), /* is_static= */ false);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ } else if (class_flags == kClassFlagDexCache) {
+ ObjPtr<DexCache> const dex_cache = ObjPtr<DexCache>::DownCast(this);
+ dex_cache->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ } else {
+ ObjPtr<ClassLoader> const class_loader = ObjPtr<ClassLoader>::DownCast(this);
+ class_loader->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ }
+ } else {
+ DCHECK((!klass->IsClassClass<kVerifyFlags>()));
+ DCHECK((!klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ if ((class_flags & kClassFlagString) != 0) {
+ size = kFetchObjSize ? static_cast<String*>(this)->SizeOf<kSizeOfFlags>() : 0;
+ } else if (klass->IsArrayClass<kVerifyFlags>()) {
+ // TODO: We can optimize this by implementing a SizeOf() version which takes
+ // component-size-shift as an argument, thereby avoiding multiple loads of
+ // component_type.
+ size = kFetchObjSize
+ ? static_cast<Array*>(this)->SizeOf<kSizeOfFlags, kReadBarrierOption>()
+ : 0;
+ } else {
+ DCHECK_EQ(class_flags, kClassFlagNoReferenceFields)
+ << "class_flags: " << std::hex << class_flags;
+ // Only possibility left is of a normal klass instance with no references.
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ }
+
+ if (kIsDebugBuild) {
+ // String still has instance fields for reflection purposes but these don't exist in
+ // actual string instances.
+ if (!klass->IsStringClass<kVerifyFlags>()) {
+ size_t total_reference_instance_fields = 0;
+ ObjPtr<Class> super_class = klass;
+ do {
+ total_reference_instance_fields +=
+ super_class->NumReferenceInstanceFields<kVerifyFlags>();
+ super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
+ } while (super_class != nullptr);
+ // The only reference field should be the object's class. This field is handled at the
+ // beginning of the function.
+ CHECK_EQ(total_reference_instance_fields, 1u);
+ }
+ }
+ }
+ }
+ visitor(this, ClassOffset(), /* is_static= */ false);
+ return size;
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index ede1c66577..bb9e85dd0e 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -115,7 +115,7 @@ ObjPtr<Object> Object::CopyObject(ObjPtr<mirror::Object> dest,
}
}
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We need a RB here. After copying the whole object above, copy references fields one by one
// again with a RB to make sure there are no from space refs. TODO: Optimize this later?
CopyReferenceFieldsWithReadBarrierVisitor visitor(dest);
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index ac7274588d..0ba545becc 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -647,6 +647,17 @@ class MANAGED LOCKABLE Object {
typename JavaLangRefVisitor = VoidFunctor>
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
+ // VisitReferences version for compaction. It is invoked with from-space
+ // object so that portions of the object, like klass and length (for arrays),
+ // can be accessed without causing cascading faults.
+ template <bool kFetchObjSize = true,
+ bool kVisitNativeRoots = false,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithFromSpaceBarrier,
+ typename Visitor>
+ size_t VisitRefsForCompaction(const Visitor& visitor,
+ MemberOffset begin,
+ MemberOffset end) NO_THREAD_SAFETY_ANALYSIS;
ArtField* FindFieldByOffset(MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index e4fe03b357..87f24eb230 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -121,7 +121,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos,
if (copy_forward) {
// Forward copy.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -146,7 +146,7 @@ inline void ObjectArray<T>::AssignableMemmove(int32_t dst_pos,
} else {
// Backward copy.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -196,7 +196,7 @@ inline void ObjectArray<T>::AssignableMemcpy(int32_t dst_pos,
// We can't use memmove since it does not handle read barriers and may do by per byte copying.
// See b/32012820.
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -244,7 +244,7 @@ inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos,
ObjPtr<T> o = nullptr;
int i = 0;
bool baker_non_gray_case = false;
- if (kUseReadBarrier && kUseBakerReadBarrier) {
+ if (gUseReadBarrier && kUseBakerReadBarrier) {
uintptr_t fake_address_dependency;
if (!ReadBarrier::IsGray(src.Ptr(), &fake_address_dependency)) {
baker_non_gray_case = true;
@@ -327,7 +327,20 @@ template<class T> template<typename Visitor>
inline void ObjectArray<T>::VisitReferences(const Visitor& visitor) {
const size_t length = static_cast<size_t>(GetLength());
for (size_t i = 0; i < length; ++i) {
- visitor(this, OffsetOfElement(i), false);
+ visitor(this, OffsetOfElement(i), /* is_static= */ false);
+ }
+}
+
+template<class T> template<typename Visitor>
+inline void ObjectArray<T>::VisitReferences(const Visitor& visitor,
+ MemberOffset begin,
+ MemberOffset end) {
+ const size_t length = static_cast<size_t>(GetLength());
+ begin = std::max(begin, OffsetOfElement(0));
+ end = std::min(end, OffsetOfElement(length));
+ while (begin < end) {
+ visitor(this, begin, /* is_static= */ false, /*is_obj_array*/ true);
+ begin += kHeapReferenceSize;
}
}
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index a20c86b82e..9a53708018 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -150,6 +150,10 @@ class MANAGED ObjectArray: public Array {
// REQUIRES_SHARED(Locks::mutator_lock_).
template<typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
+ template<typename Visitor>
+ void VisitReferences(const Visitor& visitor,
+ MemberOffset begin,
+ MemberOffset end) NO_THREAD_SAFETY_ANALYSIS;
friend class Object; // For VisitReferences
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray);
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 386244d643..4c3a5dc978 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -50,6 +50,7 @@ constexpr bool IsMirroredDescriptor(std::string_view desc) {
vis("Ljava/lang/ClassNotFoundException;") \
vis("Ljava/lang/DexCache;") \
vis("Ljava/lang/Object;") \
+ vis("Ljava/lang/StackFrameInfo;") \
vis("Ljava/lang/StackTraceElement;") \
vis("Ljava/lang/String;") \
vis("Ljava/lang/Throwable;") \
diff --git a/runtime/mirror/stack_frame_info.cc b/runtime/mirror/stack_frame_info.cc
new file mode 100644
index 0000000000..dd3e8f7010
--- /dev/null
+++ b/runtime/mirror/stack_frame_info.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "stack_frame_info.h"
+
+#include "class-alloc-inl.h"
+#include "class.h"
+#include "class_root-inl.h"
+#include "gc/accounting/card_table-inl.h"
+#include "handle_scope-inl.h"
+#include "object-inl.h"
+#include "string.h"
+
+namespace art {
+namespace mirror {
+
+void StackFrameInfo::AssignFields(Handle<Class> declaring_class,
+ Handle<MethodType> method_type,
+ Handle<String> method_name,
+ Handle<String> file_name,
+ int32_t line_number,
+ int32_t dex_pc) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFields<true>(declaring_class.Get(), method_type.Get(), method_name.Get(),
+ file_name.Get(), line_number, dex_pc);
+ } else {
+ SetFields<false>(declaring_class.Get(), method_type.Get(), method_name.Get(),
+ file_name.Get(), line_number, dex_pc);
+ }
+}
+
+template<bool kTransactionActive>
+void StackFrameInfo::SetFields(ObjPtr<Class> declaring_class,
+ ObjPtr<MethodType> method_type,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
+ int32_t line_number,
+ int32_t bci) {
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, declaring_class_),
+ declaring_class);
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, method_type_),
+ method_type);
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, method_name_),
+ method_name);
+ SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, file_name_),
+ file_name);
+ SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, line_number_),
+ line_number);
+ SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackFrameInfo, bci_),
+ bci);
+}
+
+} // namespace mirror
+} // namespace art
diff --git a/runtime/mirror/stack_frame_info.h b/runtime/mirror/stack_frame_info.h
new file mode 100644
index 0000000000..24f8c8f1e9
--- /dev/null
+++ b/runtime/mirror/stack_frame_info.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_STACK_FRAME_INFO_H_
+#define ART_RUNTIME_MIRROR_STACK_FRAME_INFO_H_
+
+#include "method_type.h"
+#include "object.h"
+#include "stack_trace_element.h"
+
+namespace art {
+
+template<class T> class Handle;
+struct StackFrameInfoOffsets;
+
+namespace mirror {
+
+// C++ mirror of java.lang.StackFrameInfo
+class MANAGED StackFrameInfo final : public Object {
+ public:
+ MIRROR_CLASS("Ljava/lang/StackFrameInfo;");
+
+ void AssignFields(Handle<Class> declaring_class,
+ Handle<MethodType> method_type,
+ Handle<String> method_name,
+ Handle<String> file_name,
+ int32_t line_number,
+ int32_t dex_pc)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+ // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+ HeapReference<Class> declaring_class_;
+ HeapReference<String> file_name_;
+ HeapReference<String> method_name_;
+ HeapReference<Class> method_type_;
+ HeapReference<StackTraceElement> ste_;
+ int32_t bci_;
+ int32_t line_number_;
+ bool retain_class_ref_;
+
+ template<bool kTransactionActive>
+ void SetFields(ObjPtr<Class> declaring_class,
+ ObjPtr<MethodType> method_type,
+ ObjPtr<String> method_name,
+ ObjPtr<String> file_name,
+ int32_t line_number,
+ int32_t bci)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
+ friend struct art::StackFrameInfoOffsets; // for verifying offset information
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrameInfo);
+};
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_STACK_FRAME_INFO_H_
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index 5903872284..8fb4415704 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -35,9 +35,9 @@ inline uint32_t String::ClassSize(PointerSize pointer_size) {
// lambda$codePoints$1$CharSequence
// which were virtual functions in standalone desugar, becomes
// direct functions with D8 desugaring.
- uint32_t vtable_entries = Object::kVTableLength + 60;
+ uint32_t vtable_entries = Object::kVTableLength + 64;
#else
- uint32_t vtable_entries = Object::kVTableLength + 62;
+ uint32_t vtable_entries = Object::kVTableLength + 66;
#endif
return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 2, pointer_size);
}
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index d36a2abadc..68d329d15b 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -205,7 +205,7 @@ static ObjPtr<Class> GetReturnType(VarHandle::AccessModeTemplate access_mode_tem
// Method to insert a read barrier for accessors to reference fields.
inline void ReadBarrierForVarHandleAccess(ObjPtr<Object> obj, MemberOffset field_offset)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// We need to ensure that the reference stored in the field is a to-space one before attempting
// the CompareAndSet/CompareAndExchange/Exchange operation otherwise it will fail incorrectly
// if obj is in the process of being moved.
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0cad79b6e3..4e64c95b8c 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1139,7 +1139,7 @@ ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self,
lock_word.GCState()));
// Only this thread pays attention to the count. Thus there is no need for stronger
// than relaxed memory ordering.
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
h_obj->SetLockWord(thin_locked, /* as_volatile= */ false);
AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
return h_obj.Get(); // Success!
@@ -1239,7 +1239,7 @@ bool Monitor::MonitorExit(Thread* self, ObjPtr<mirror::Object> obj) {
} else {
new_lw = LockWord::FromDefault(lock_word.GCState());
}
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
DCHECK_EQ(new_lw.ReadBarrierState(), 0U);
// TODO: This really only needs memory_order_release, but we currently have
// no way to specify that. In fact there seem to be no legitimate uses of SetLockWord
@@ -1409,7 +1409,7 @@ ThreadState Monitor::FetchState(const Thread* thread,
{
ObjPtr<mirror::Object> lock_object = thread->GetMonitorEnterObject();
if (lock_object != nullptr) {
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and "pretty_object" may be a from-space (stale) ref, in
// which case the GetLockOwnerThreadId() call below will crash. So explicitly mark/forward
@@ -1613,13 +1613,13 @@ MonitorList::~MonitorList() {
}
void MonitorList::DisallowNewMonitors() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
MutexLock mu(Thread::Current(), monitor_list_lock_);
allow_new_monitors_ = false;
}
void MonitorList::AllowNewMonitors() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
Thread* self = Thread::Current();
MutexLock mu(self, monitor_list_lock_);
allow_new_monitors_ = true;
@@ -1637,8 +1637,8 @@ void MonitorList::Add(Monitor* m) {
MutexLock mu(self, monitor_list_lock_);
// CMS needs this to block for concurrent reference processing because an object allocated during
// the GC won't be marked and concurrent reference processing would incorrectly clear the JNI weak
- // ref. But CC (kUseReadBarrier == true) doesn't because of the to-space invariant.
- while (!kUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
+ // ref. But CC (gUseReadBarrier == true) doesn't because of the to-space invariant.
+ while (!gUseReadBarrier && UNLIKELY(!allow_new_monitors_)) {
// Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
// presence of threads blocking for weak ref access.
self->CheckEmptyCheckpointFromWeakRefAccess(&monitor_list_lock_);
diff --git a/runtime/monitor_objects_stack_visitor.cc b/runtime/monitor_objects_stack_visitor.cc
index 2e75e37bd1..524c0ec62f 100644
--- a/runtime/monitor_objects_stack_visitor.cc
+++ b/runtime/monitor_objects_stack_visitor.cc
@@ -90,7 +90,7 @@ bool MonitorObjectsStackVisitor::VisitFrame() {
void MonitorObjectsStackVisitor::VisitLockedObject(ObjPtr<mirror::Object> o, void* context) {
MonitorObjectsStackVisitor* self = reinterpret_cast<MonitorObjectsStackVisitor*>(context);
if (o != nullptr) {
- if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Thread::Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the
// IdentityHashCode call below will crash. So explicitly mark/forward it here.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index db5d420035..e75afd25f8 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -41,7 +41,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "dex/dex_file-inl.h"
#include "dex/dex_file_types.h"
#include "gc/accounting/card_table-inl.h"
-#include "gc/allocator/dlmalloc.h"
+#include "gc/allocator/art-dlmalloc.h"
#include "gc/heap.h"
#include "gc/space/dlmalloc_space.h"
#include "gc/space/image_space.h"
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index da42e61ce1..6434e63c98 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -32,6 +32,7 @@
#include "hidden_api.h"
#include "jni/jni_internal.h"
#include "mirror/class-alloc-inl.h"
+#include "mirror/class_ext.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/field.h"
@@ -750,6 +751,71 @@ static jclass Class_getDeclaringClass(JNIEnv* env, jobject javaThis) {
return soa.AddLocalReference<jclass>(annotations::GetDeclaringClass(klass));
}
+static jclass Class_getNestHostFromAnnotation(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsObsoleteObject()) {
+ ThrowRuntimeException("Obsolete Object!");
+ return nullptr;
+ }
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ ObjPtr<mirror::Class> hostClass = annotations::GetNestHost(klass);
+ if (hostClass == nullptr) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jclass>(hostClass);
+}
+
+static jobjectArray Class_getNestMembersFromAnnotation(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsObsoleteObject()) {
+ ThrowRuntimeException("Obsolete Object!");
+ return nullptr;
+ }
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ ObjPtr<mirror::ObjectArray<mirror::Class>> classes = annotations::GetNestMembers(klass);
+ if (classes == nullptr) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobjectArray>(classes);
+}
+
+static jobjectArray Class_getPermittedSubclassesFromAnnotation(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+ if (klass->IsObsoleteObject()) {
+ ThrowRuntimeException("Obsolete Object!");
+ return nullptr;
+ }
+ if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
+ return nullptr;
+ }
+ ObjPtr<mirror::ObjectArray<mirror::Class>> classes = annotations::GetPermittedSubclasses(klass);
+ if (classes == nullptr) {
+ return nullptr;
+ }
+ return soa.AddLocalReference<jobjectArray>(classes);
+}
+
+static jobject Class_ensureExtDataPresent(JNIEnv* env, jobject javaThis) {
+ ScopedFastNativeObjectAccess soa(env);
+ StackHandleScope<2> hs(soa.Self());
+ Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+
+ ObjPtr<mirror::Object> extDataPtr =
+ mirror::Class::EnsureExtDataPresent(klass, Thread::Current());
+
+ return soa.AddLocalReference<jobject>(extDataPtr);
+}
+
static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
ScopedFastNativeObjectAccess soa(env);
StackHandleScope<4> hs(soa.Self());
@@ -841,6 +907,7 @@ static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Class, classForName,
"(Ljava/lang/String;ZLjava/lang/ClassLoader;)Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Class, ensureExtDataPresent, "()Ldalvik/system/ClassExt;"),
FAST_NATIVE_METHOD(Class, getDeclaredAnnotation,
"(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"),
FAST_NATIVE_METHOD(Class, getDeclaredAnnotations, "()[Ljava/lang/annotation/Annotation;"),
@@ -865,6 +932,9 @@ static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(Class, getInterfacesInternal, "()[Ljava/lang/Class;"),
FAST_NATIVE_METHOD(Class, getPrimitiveClass, "(Ljava/lang/String;)Ljava/lang/Class;"),
FAST_NATIVE_METHOD(Class, getNameNative, "()Ljava/lang/String;"),
+ FAST_NATIVE_METHOD(Class, getNestHostFromAnnotation, "()Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Class, getNestMembersFromAnnotation, "()[Ljava/lang/Class;"),
+ FAST_NATIVE_METHOD(Class, getPermittedSubclassesFromAnnotation, "()[Ljava/lang/Class;"),
FAST_NATIVE_METHOD(Class, getPublicDeclaredFields, "()[Ljava/lang/reflect/Field;"),
FAST_NATIVE_METHOD(Class, getSignatureAnnotation, "()[Ljava/lang/String;"),
FAST_NATIVE_METHOD(Class, isAnonymousClass, "()Z"),
diff --git a/runtime/native/java_lang_StackStreamFactory.cc b/runtime/native/java_lang_StackStreamFactory.cc
new file mode 100644
index 0000000000..f876c1014b
--- /dev/null
+++ b/runtime/native/java_lang_StackStreamFactory.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "java_lang_StackStreamFactory.h"
+
+#include "nativehelper/jni_macros.h"
+
+#include "jni/jni_internal.h"
+#include "native_util.h"
+#include "scoped_fast_native_object_access-inl.h"
+#include "thread.h"
+
+namespace art {
+
+static jobject StackStreamFactory_nativeGetStackAnchor(JNIEnv* env, jclass) {
+ ScopedFastNativeObjectAccess soa(env);
+ return soa.Self()->CreateInternalStackTrace(soa);
+}
+
+static jint StackStreamFactory_nativeFetchStackFrameInfo(JNIEnv* env, jclass,
+ jlong mode, jobject anchor, jint startLevel, jint batchSize, jint startBufferIndex,
+ jobjectArray frameBuffer) {
+ if (anchor == nullptr) {
+ return startLevel;
+ }
+ ScopedFastNativeObjectAccess soa(env);
+ return Thread::InternalStackTraceToStackFrameInfoArray(soa, mode, anchor,
+ startLevel, batchSize, startBufferIndex, frameBuffer);
+}
+
+static JNINativeMethod gMethods[] = {
+ FAST_NATIVE_METHOD(StackStreamFactory, nativeGetStackAnchor, "()Ljava/lang/Object;"),
+ FAST_NATIVE_METHOD(StackStreamFactory, nativeFetchStackFrameInfo, "(JLjava/lang/Object;III[Ljava/lang/Object;)I"),
+};
+
+void register_java_lang_StackStreamFactory(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("java/lang/StackStreamFactory");
+}
+
+} // namespace art
diff --git a/runtime/native/java_lang_StackStreamFactory.h b/runtime/native/java_lang_StackStreamFactory.h
new file mode 100644
index 0000000000..2216871ebf
--- /dev/null
+++ b/runtime/native/java_lang_StackStreamFactory.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_STACKSTREAMFACTORY_H_
+#define ART_RUNTIME_NATIVE_JAVA_LANG_STACKSTREAMFACTORY_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_java_lang_StackStreamFactory(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_JAVA_LANG_STACKSTREAMFACTORY_H_
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
index f23010bf48..8b5635d2ec 100644
--- a/runtime/native/java_lang_ref_Reference.cc
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -37,7 +37,7 @@ static jobject Reference_getReferent(JNIEnv* env, jobject javaThis) {
}
static jboolean Reference_refersTo0(JNIEnv* env, jobject javaThis, jobject o) {
- if (kUseReadBarrier && !kUseBakerReadBarrier) {
+ if (gUseReadBarrier && !kUseBakerReadBarrier) {
// Fall back to naive implementation that may block and needlessly preserve javaThis.
return env->IsSameObject(Reference_getReferent(env, javaThis), o);
}
@@ -48,7 +48,7 @@ static jboolean Reference_refersTo0(JNIEnv* env, jobject javaThis, jobject o) {
if (referent == other) {
return JNI_TRUE;
}
- if (!kUseReadBarrier || referent.IsNull() || other.IsNull()) {
+ if (!gUseReadBarrier || referent.IsNull() || other.IsNull()) {
return JNI_FALSE;
}
// Explicitly handle the case in which referent is a from-space pointer. Don't use a
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 2c0dd806e1..706f1a61ba 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -80,6 +80,7 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
}
}
+NO_STACK_PROTECTOR
static jobject Method_invoke(JNIEnv* env, jobject javaMethod, jobject javaReceiver,
jobjectArray javaArgs) {
ScopedFastNativeObjectAccess soa(env);
diff --git a/runtime/native/jdk_internal_misc_Unsafe.cc b/runtime/native/jdk_internal_misc_Unsafe.cc
index 307a2fa8b9..e70873289c 100644
--- a/runtime/native/jdk_internal_misc_Unsafe.cc
+++ b/runtime/native/jdk_internal_misc_Unsafe.cc
@@ -99,7 +99,7 @@ static jboolean Unsafe_compareAndSetObject(JNIEnv* env, jobject, jobject javaObj
ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
// Note that the read barrier load does NOT need to be volatile.
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index e9c5af013d..1781a29a27 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -69,7 +69,7 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb
ObjPtr<mirror::Object> expectedValue = soa.Decode<mirror::Object>(javaExpectedValue);
ObjPtr<mirror::Object> newValue = soa.Decode<mirror::Object>(javaNewValue);
// JNI must use non transactional mode.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
// Need to make sure the reference stored in the field is a to-space one before attempting the
// CAS or the CAS could fail incorrectly.
// Note that the read barrier load does NOT need to be volatile.
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index b7c3665530..30b5ee6d54 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -24,8 +24,7 @@
#include "art_method.h"
// For DumpNativeStack.
-#include <backtrace/Backtrace.h>
-#include <backtrace/BacktraceMap.h>
+#include <unwindstack/AndroidUnwinder.h>
#if defined(__linux__)
@@ -69,13 +68,16 @@ using android::base::StringPrintf;
static constexpr bool kUseAddr2line = !kIsTargetBuild;
std::string FindAddr2line() {
-#ifdef ART_CLANG_PATH
+#if !defined(ART_TARGET) && !defined(ART_CLANG_PATH)
+ #error "ART_CLANG_PATH must be defined on host build"
+#endif
+#if defined(ART_CLANG_PATH)
const char* env_value = getenv("ANDROID_BUILD_TOP");
if (env_value != nullptr) {
return std::string(env_value) + "/" + ART_CLANG_PATH + "/bin/llvm-addr2line";
}
#endif
- return std::string("/usr/bin/addr2line");
+ return std::string("llvm-addr2line");
}
ALWAYS_INLINE
@@ -321,27 +323,33 @@ static bool PcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) NO_THREAD_SAFET
void DumpNativeStack(std::ostream& os,
pid_t tid,
- BacktraceMap* existing_map,
+ const char* prefix,
+ ArtMethod* current_method,
+ void* ucontext_ptr,
+ bool skip_frames) {
+ unwindstack::AndroidLocalUnwinder unwinder;
+ DumpNativeStack(os, unwinder, tid, prefix, current_method, ucontext_ptr, skip_frames);
+}
+
+void DumpNativeStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ pid_t tid,
const char* prefix,
ArtMethod* current_method,
void* ucontext_ptr,
bool skip_frames) {
// Historical note: This was disabled when running under Valgrind (b/18119146).
- BacktraceMap* map = existing_map;
- std::unique_ptr<BacktraceMap> tmp_map;
- if (map == nullptr) {
- tmp_map.reset(BacktraceMap::Create(getpid()));
- map = tmp_map.get();
+ unwindstack::AndroidUnwinderData data(!skip_frames /*show_all_frames*/);
+ bool unwind_ret;
+ if (ucontext_ptr != nullptr) {
+ unwind_ret = unwinder.Unwind(ucontext_ptr, data);
+ } else {
+ unwind_ret = unwinder.Unwind(tid, data);
}
- std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
- backtrace->SetSkipFrames(skip_frames);
- if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
- os << prefix << "(backtrace::Unwind failed for thread " << tid
- << ": " << backtrace->GetErrorString(backtrace->GetError()) << ")" << std::endl;
- return;
- } else if (backtrace->NumFrames() == 0) {
- os << prefix << "(no native stack frames for thread " << tid << ")" << std::endl;
+ if (!unwind_ret) {
+ os << prefix << "(Unwind failed for thread " << tid << ": "
+ << data.GetErrorString() << ")" << std::endl;
return;
}
@@ -356,9 +364,8 @@ void DumpNativeStack(std::ostream& os,
}
std::unique_ptr<Addr2linePipe> addr2line_state;
-
- for (Backtrace::const_iterator it = backtrace->begin();
- it != backtrace->end(); ++it) {
+ data.DemangleFunctionNames();
+ for (const unwindstack::FrameData& frame : data.frames) {
// We produce output like this:
// ] #00 pc 000075bb8 /system/lib/libc.so (unwind_backtrace_thread+536)
// In order for parsing tools to continue to function, the stack dump
@@ -367,53 +374,55 @@ void DumpNativeStack(std::ostream& os,
// The parsers require a single space before and after pc, and two spaces
// after the <RELATIVE_ADDR>. There can be any prefix data before the
// #XX. <RELATIVE_ADDR> has to be a hex number but with no 0x prefix.
- os << prefix << StringPrintf("#%02zu pc ", it->num);
+ os << prefix << StringPrintf("#%02zu pc ", frame.num);
bool try_addr2line = false;
- if (!BacktraceMap::IsValid(it->map)) {
+ if (frame.map_info == nullptr) {
os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " ???"
: "%08" PRIx64 " ???",
- it->pc);
+ frame.pc);
} else {
os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " "
: "%08" PRIx64 " ",
- it->rel_pc);
- if (it->map.name.empty()) {
- os << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
+ frame.rel_pc);
+ const std::shared_ptr<unwindstack::MapInfo>& map_info = frame.map_info;
+ if (map_info->name().empty()) {
+ os << StringPrintf("<anonymous:%" PRIx64 ">", map_info->start());
} else {
- os << it->map.name;
+ os << map_info->name().c_str();
}
- if (it->map.offset != 0) {
- os << StringPrintf(" (offset %" PRIx64 ")", it->map.offset);
+ if (map_info->elf_start_offset() != 0) {
+ os << StringPrintf(" (offset %" PRIx64 ")", map_info->elf_start_offset());
}
os << " (";
- if (!it->func_name.empty()) {
- os << it->func_name;
- if (it->func_offset != 0) {
- os << "+" << it->func_offset;
+ if (!frame.function_name.empty()) {
+ os << frame.function_name.c_str();
+ if (frame.function_offset != 0) {
+ os << "+" << frame.function_offset;
}
// Functions found using the gdb jit interface will be in an empty
// map that cannot be found using addr2line.
- if (!it->map.name.empty()) {
+ if (!map_info->name().empty()) {
try_addr2line = true;
}
} else if (current_method != nullptr &&
Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
- PcIsWithinQuickCode(current_method, it->pc)) {
+ PcIsWithinQuickCode(current_method, frame.pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << current_method->JniLongName() << "+"
- << (it->pc - reinterpret_cast<uint64_t>(start_of_code));
+ << (frame.pc - reinterpret_cast<uint64_t>(start_of_code));
} else {
os << "???";
}
os << ")";
- std::string build_id = map->GetBuildId(it->pc);
+ std::string build_id = map_info->GetPrintableBuildID();
if (!build_id.empty()) {
os << " (BuildId: " << build_id << ")";
}
}
os << std::endl;
if (try_addr2line && use_addr2line) {
- Addr2line(it->map.name, it->rel_pc, os, prefix, &addr2line_state);
+ // Guaranteed that map_info is not nullptr and name is non-empty.
+ Addr2line(frame.map_info->name(), frame.rel_pc, os, prefix, &addr2line_state);
}
}
@@ -426,7 +435,15 @@ void DumpNativeStack(std::ostream& os,
void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
pid_t tid ATTRIBUTE_UNUSED,
- BacktraceMap* existing_map ATTRIBUTE_UNUSED,
+ const char* prefix ATTRIBUTE_UNUSED,
+ ArtMethod* current_method ATTRIBUTE_UNUSED,
+ void* ucontext_ptr ATTRIBUTE_UNUSED,
+ bool skip_frames ATTRIBUTE_UNUSED) {
+}
+
+void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
+ unwindstack::AndroidLocalUnwinder& existing_map ATTRIBUTE_UNUSED,
+ pid_t tid ATTRIBUTE_UNUSED,
const char* prefix ATTRIBUTE_UNUSED,
ArtMethod* current_method ATTRIBUTE_UNUSED,
void* ucontext_ptr ATTRIBUTE_UNUSED,
diff --git a/runtime/native_stack_dump.h b/runtime/native_stack_dump.h
index 4d4b36b08e..99fb59c76a 100644
--- a/runtime/native_stack_dump.h
+++ b/runtime/native_stack_dump.h
@@ -23,7 +23,9 @@
#include "base/macros.h"
-class BacktraceMap;
+namespace unwindstack {
+class AndroidLocalUnwinder;
+} // namespace unwindstack
namespace art {
@@ -32,7 +34,15 @@ class ArtMethod;
// Dumps the native stack for thread 'tid' to 'os'.
void DumpNativeStack(std::ostream& os,
pid_t tid,
- BacktraceMap* map = nullptr,
+ const char* prefix = "",
+ ArtMethod* current_method = nullptr,
+ void* ucontext = nullptr,
+ bool skip_frames = true)
+ NO_THREAD_SAFETY_ANALYSIS;
+
+void DumpNativeStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ pid_t tid,
const char* prefix = "",
ArtMethod* current_method = nullptr,
void* ucontext = nullptr,
diff --git a/runtime/oat.h b/runtime/oat.h
index 462d41cdf0..341e70bbe9 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
- // Last oat version changed reason: Revert^4 "bss support for inlining BCP into non-BCP".
- static constexpr std::array<uint8_t, 4> kOatVersion { { '2', '2', '5', '\0' } };
+ // Last oat version changed reason: Don't use instrumentation stubs for native methods.
+ static constexpr std::array<uint8_t, 4> kOatVersion { { '2', '2', '7', '\0' } };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 63778c7b0b..e0189a9353 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -1815,7 +1815,7 @@ class OatFileBackedByVdex final : public OatFileBase {
store.Put(OatHeader::kCompilerFilter, CompilerFilter::NameOfFilter(CompilerFilter::kVerify));
store.Put(OatHeader::kCompilationReasonKey, "vdex");
store.Put(OatHeader::kConcurrentCopying,
- kUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
+ gUseReadBarrier ? OatHeader::kTrueValue : OatHeader::kFalseValue);
oat_header_.reset(OatHeader::Create(kRuntimeISA,
isa_features.get(),
number_of_dex_files,
@@ -1907,17 +1907,6 @@ OatFile* OatFile::Open(int zip_fd,
reservation,
error_msg);
if (with_dlopen != nullptr) {
- Runtime* runtime = Runtime::Current();
- // The runtime might not be available at this point if we're running
- // dex2oat or oatdump.
- if (runtime != nullptr) {
- size_t madvise_size_limit = runtime->GetMadviseWillNeedSizeOdex();
- Runtime::MadviseFileForRange(madvise_size_limit,
- with_dlopen->Size(),
- with_dlopen->Begin(),
- with_dlopen->End(),
- oat_location);
- }
return with_dlopen;
}
if (kPrintDlOpenErrorMessage) {
@@ -2252,7 +2241,7 @@ OatFile::OatClass OatDexFile::GetOatClass(uint16_t class_def_index) const {
return OatFile::OatClass(oat_file_,
ClassStatus::kNotReady,
/* type= */ OatClassType::kNoneCompiled,
- /* bitmap_size= */ 0u,
+ /* num_methods= */ 0u,
/* bitmap_pointer= */ nullptr,
/* methods_pointer= */ nullptr);
}
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index c1b1acb368..fdb4217fd5 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -270,7 +270,7 @@ class OatFile {
return OatClass(/* oat_file= */ nullptr,
ClassStatus::kErrorUnresolved,
OatClassType::kNoneCompiled,
- /* bitmap_size= */ 0,
+ /* num_methods= */ 0,
/* bitmap_pointer= */ nullptr,
/* methods_pointer= */ nullptr);
}
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 914d2dd08b..389479c5d1 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -16,15 +16,18 @@
#include "oat_file_assistant.h"
-#include <sstream>
-
#include <sys/stat.h>
-#include "zlib.h"
+
+#include <memory>
+#include <sstream>
+#include <vector>
#include "android-base/file.h"
+#include "android-base/logging.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-
+#include "arch/instruction_set.h"
+#include "base/array_ref.h"
#include "base/compiler_filter.h"
#include "base/file_utils.h"
#include "base/logging.h" // For VLOG.
@@ -44,13 +47,16 @@
#include "gc/space/image_space.h"
#include "image.h"
#include "oat.h"
+#include "oat_file_assistant_context.h"
#include "runtime.h"
#include "scoped_thread_state_change-inl.h"
#include "vdex_file.h"
+#include "zlib.h"
namespace art {
-using android::base::StringPrintf;
+using ::android::base::ConsumePrefix;
+using ::android::base::StringPrintf;
static constexpr const char* kAnonymousDexPrefix = "Anonymous-DexFile@";
static constexpr const char* kVdexExtension = ".vdex";
@@ -82,22 +88,24 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
ClassLoaderContext* context,
bool load_executable,
- bool only_load_trusted_executable)
+ bool only_load_trusted_executable,
+ OatFileAssistantContext* ofa_context)
: OatFileAssistant(dex_location,
isa,
context,
load_executable,
only_load_trusted_executable,
- /*vdex_fd=*/ -1,
- /*oat_fd=*/ -1,
- /*zip_fd=*/ -1) {}
-
+ ofa_context,
+ /*vdex_fd=*/-1,
+ /*oat_fd=*/-1,
+ /*zip_fd=*/-1) {}
OatFileAssistant::OatFileAssistant(const char* dex_location,
const InstructionSet isa,
ClassLoaderContext* context,
bool load_executable,
bool only_load_trusted_executable,
+ OatFileAssistantContext* ofa_context,
int vdex_fd,
int oat_fd,
int zip_fd)
@@ -105,12 +113,12 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
isa_(isa),
load_executable_(load_executable),
only_load_trusted_executable_(only_load_trusted_executable),
- odex_(this, /*is_oat_location=*/ false),
- oat_(this, /*is_oat_location=*/ true),
- vdex_for_odex_(this, /*is_oat_location=*/ false),
- vdex_for_oat_(this, /*is_oat_location=*/ true),
- dm_for_odex_(this, /*is_oat_location=*/ false),
- dm_for_oat_(this, /*is_oat_location=*/ true),
+ odex_(this, /*is_oat_location=*/false),
+ oat_(this, /*is_oat_location=*/true),
+ vdex_for_odex_(this, /*is_oat_location=*/false),
+ vdex_for_oat_(this, /*is_oat_location=*/true),
+ dm_for_odex_(this, /*is_oat_location=*/false),
+ dm_for_oat_(this, /*is_oat_location=*/true),
zip_fd_(zip_fd) {
CHECK(dex_location != nullptr) << "OatFileAssistant: null dex location";
CHECK_IMPLIES(load_executable, context != nullptr) << "Loading executable without a context";
@@ -127,12 +135,33 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
dex_location_.assign(dex_location);
+ Runtime* runtime = Runtime::Current();
+
+ if (load_executable_ && runtime == nullptr) {
+ LOG(WARNING) << "OatFileAssistant: Load executable specified, "
+ << "but no active runtime is found. Will not attempt to load executable.";
+ load_executable_ = false;
+ }
+
if (load_executable_ && isa != kRuntimeISA) {
LOG(WARNING) << "OatFileAssistant: Load executable specified, "
<< "but isa is not kRuntimeISA. Will not attempt to load executable.";
load_executable_ = false;
}
+ if (ofa_context == nullptr) {
+ CHECK(runtime != nullptr) << "runtime_options is not provided, and no active runtime is found.";
+ ofa_context_ = std::make_unique<OatFileAssistantContext>(runtime);
+ } else {
+ ofa_context_ = ofa_context;
+ }
+
+ if (runtime == nullptr) {
+ // We need `MemMap` for mapping files. We don't have to initialize it when there is a runtime
+ // because the runtime initializes it.
+ MemMap::Init();
+ }
+
// Get the odex filename.
std::string error_msg;
std::string odex_file_name;
@@ -159,7 +188,11 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
if (!UseFdToReadFiles()) {
// Get the oat filename.
std::string oat_file_name;
- if (DexLocationToOatFilename(dex_location_, isa_, &oat_file_name, &error_msg)) {
+ if (DexLocationToOatFilename(dex_location_,
+ isa_,
+ GetRuntimeOptions().deny_art_apex_data_files,
+ &oat_file_name,
+ &error_msg)) {
oat_.Reset(oat_file_name, /*use_fd=*/ false);
std::string vdex_file_name = GetVdexFilename(oat_file_name);
vdex_for_oat_.Reset(vdex_file_name, UseFdToReadFiles(), zip_fd, vdex_fd, oat_fd);
@@ -190,6 +223,48 @@ OatFileAssistant::OatFileAssistant(const char* dex_location,
}
}
+std::unique_ptr<OatFileAssistant> OatFileAssistant::Create(
+ const std::string& filename,
+ const std::string& isa_str,
+ const std::string& context_str,
+ bool load_executable,
+ bool only_load_trusted_executable,
+ OatFileAssistantContext* ofa_context,
+ /*out*/ std::unique_ptr<ClassLoaderContext>* context,
+ /*out*/ std::string* error_msg) {
+ InstructionSet isa = GetInstructionSetFromString(isa_str.c_str());
+ if (isa == InstructionSet::kNone) {
+ *error_msg = StringPrintf("Instruction set '%s' is invalid", isa_str.c_str());
+ return nullptr;
+ }
+
+ std::unique_ptr<ClassLoaderContext> tmp_context = ClassLoaderContext::Create(context_str.c_str());
+ if (tmp_context == nullptr) {
+ *error_msg = StringPrintf("Class loader context '%s' is invalid", context_str.c_str());
+ return nullptr;
+ }
+
+ if (!tmp_context->OpenDexFiles(android::base::Dirname(filename.c_str()),
+ /*context_fds=*/{},
+ /*only_read_checksums=*/true)) {
+ *error_msg =
+ StringPrintf("Failed to load class loader context files for '%s' with context '%s'",
+ filename.c_str(),
+ context_str.c_str());
+ return nullptr;
+ }
+
+ auto assistant = std::make_unique<OatFileAssistant>(filename.c_str(),
+ isa,
+ tmp_context.get(),
+ load_executable,
+ only_load_trusted_executable,
+ ofa_context);
+
+ *context = std::move(tmp_context);
+ return assistant;
+}
+
bool OatFileAssistant::UseFdToReadFiles() {
return zip_fd_ >= 0;
}
@@ -199,11 +274,9 @@ bool OatFileAssistant::IsInBootClassPath() {
// specified by the user. This is okay, because the boot class path should
// be the same for all ISAs.
// TODO: Can we verify the boot class path is the same for all ISAs?
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- const auto& boot_class_path = class_linker->GetBootClassPath();
- for (size_t i = 0; i < boot_class_path.size(); i++) {
- if (boot_class_path[i]->GetLocation() == dex_location_) {
+ for (const std::string& boot_class_path_location :
+ GetRuntimeOptions().boot_class_path_locations) {
+ if (boot_class_path_location == dex_location_) {
VLOG(oat) << "Dex location " << dex_location_ << " is in boot class path";
return true;
}
@@ -211,19 +284,61 @@ bool OatFileAssistant::IsInBootClassPath() {
return false;
}
-int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
+OatFileAssistant::DexOptTrigger OatFileAssistant::GetDexOptTrigger(
+ CompilerFilter::Filter target_compiler_filter, bool profile_changed, bool downgrade) {
+ if (downgrade) {
+ // The caller's intention is to downgrade the compiler filter. We should only re-compile if the
+ // target compiler filter is worse than the current one.
+ return DexOptTrigger{.targetFilterIsWorse = true};
+ }
+
+ // This is the usual case. The caller's intention is to see if a better oat file can be generated.
+ DexOptTrigger dexopt_trigger{.targetFilterIsBetter = true, .primaryBootImageBecomesUsable = true};
+ if (profile_changed && CompilerFilter::DependsOnProfile(target_compiler_filter)) {
+ // Since the profile has been changed, we should re-compile even if the compilation does not
+ // make the compiler filter better.
+ dexopt_trigger.targetFilterIsSame = true;
+ }
+ return dexopt_trigger;
+}
+
+int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
bool profile_changed,
bool downgrade) {
OatFileInfo& info = GetBestInfo();
- DexOptNeeded dexopt_needed = info.GetDexOptNeeded(target,
- profile_changed,
- downgrade);
+ DexOptNeeded dexopt_needed = info.GetDexOptNeeded(
+ target_compiler_filter, GetDexOptTrigger(target_compiler_filter, profile_changed, downgrade));
+ if (dexopt_needed != kNoDexOptNeeded && (&info == &dm_for_oat_ || &info == &dm_for_odex_)) {
+ // The usable vdex file is in the DM file. This information cannot be encoded in the integer.
+ // Return kDex2OatFromScratch so that neither the vdex in the "oat" location nor the vdex in the
+ // "odex" location will be picked by installd.
+ return kDex2OatFromScratch;
+ }
if (info.IsOatLocation() || dexopt_needed == kDex2OatFromScratch) {
return dexopt_needed;
}
return -dexopt_needed;
}
+bool OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
+ DexOptTrigger dexopt_trigger,
+ /*out*/ DexOptStatus* dexopt_status) {
+ OatFileInfo& info = GetBestInfo();
+ DexOptNeeded dexopt_needed = info.GetDexOptNeeded(target_compiler_filter, dexopt_trigger);
+ if (info.IsUseable()) {
+ if (&info == &dm_for_oat_ || &info == &dm_for_odex_) {
+ dexopt_status->location_ = kLocationDm;
+ } else if (info.IsOatLocation()) {
+ dexopt_status->location_ = kLocationOat;
+ } else {
+ dexopt_status->location_ = kLocationOdex;
+ }
+ } else {
+ dexopt_status->location_ = kLocationNoneOrError;
+ }
+ return dexopt_needed != kNoDexOptNeeded;
+}
+
bool OatFileAssistant::IsUpToDate() {
return GetBestInfo().Status() == kOatUpToDate;
}
@@ -419,9 +534,7 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
// compiled code and are otherwise okay, we should return something like
// kOatRelocationOutOfDate. If they don't contain compiled code, the read
// barrier state doesn't matter.
- const bool is_cc = file.GetOatHeader().IsConcurrentCopying();
- constexpr bool kRuntimeIsCC = kUseReadBarrier;
- if (is_cc != kRuntimeIsCC) {
+ if (file.GetOatHeader().IsConcurrentCopying() != gUseReadBarrier) {
return kOatCannotOpen;
}
@@ -443,7 +556,8 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
- if (!gc::space::ImageSpace::ValidateApexVersions(file, &error_msg)) {
+ if (!gc::space::ImageSpace::ValidateApexVersions(
+ file, GetOatFileAssistantContext()->GetApexVersions(), &error_msg)) {
VLOG(oat) << error_msg;
return kOatBootImageOutOfDate;
}
@@ -454,9 +568,9 @@ OatFileAssistant::OatStatus OatFileAssistant::GivenOatFileStatus(const OatFile&
// zip_file_only_contains_uncompressed_dex_ is only set during fetching the dex checksums.
DCHECK(required_dex_checksums_attempted_);
if (only_load_trusted_executable_ &&
- !LocationIsTrusted(file.GetLocation(), !Runtime::Current()->DenyArtApexDataFiles()) &&
- file.ContainsDexCode() &&
- zip_file_only_contains_uncompressed_dex_) {
+ !LocationIsTrusted(file.GetLocation(),
+ !GetRuntimeOptions().deny_art_apex_data_files) &&
+ file.ContainsDexCode() && zip_file_only_contains_uncompressed_dex_) {
LOG(ERROR) << "Not loading "
<< dex_location_
<< ": oat file has dex code, but APK has uncompressed dex code";
@@ -474,6 +588,11 @@ bool OatFileAssistant::AnonymousDexVdexLocation(const std::vector<const DexFile:
InstructionSet isa,
/* out */ std::string* dex_location,
/* out */ std::string* vdex_filename) {
+ // Normally, OatFileAssistant should not assume that there is an active runtime. However, we
+ // reference the runtime here. This is okay because we are in a static function that is unrelated
+ // to other parts of OatFileAssistant.
+ DCHECK(Runtime::Current() != nullptr);
+
uint32_t checksum = adler32(0L, Z_NULL, 0);
for (const DexFile::Header* header : headers) {
checksum = adler32_combine(checksum,
@@ -571,13 +690,23 @@ bool OatFileAssistant::DexLocationToOatFilename(const std::string& location,
InstructionSet isa,
std::string* oat_filename,
std::string* error_msg) {
+ DCHECK(Runtime::Current() != nullptr);
+ return DexLocationToOatFilename(
+ location, isa, Runtime::Current()->DenyArtApexDataFiles(), oat_filename, error_msg);
+}
+
+bool OatFileAssistant::DexLocationToOatFilename(const std::string& location,
+ InstructionSet isa,
+ bool deny_art_apex_data_files,
+ std::string* oat_filename,
+ std::string* error_msg) {
CHECK(oat_filename != nullptr);
CHECK(error_msg != nullptr);
// Check if `location` could have an oat file in the ART APEX data directory. If so, and the
// file exists, use it.
const std::string apex_data_file = GetApexDataOdexFilename(location, isa);
- if (!apex_data_file.empty() && !Runtime::Current()->DenyArtApexDataFiles()) {
+ if (!apex_data_file.empty() && !deny_art_apex_data_files) {
if (OS::FileExists(apex_data_file.c_str(), /*check_file_type=*/true)) {
*oat_filename = apex_data_file;
return true;
@@ -640,6 +769,105 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
+bool OatFileAssistant::ValidateBootClassPathChecksums(OatFileAssistantContext* ofa_context,
+ InstructionSet isa,
+ std::string_view oat_checksums,
+ std::string_view oat_boot_class_path,
+ /*out*/ std::string* error_msg) {
+ const std::vector<std::string>& bcp_locations =
+ ofa_context->GetRuntimeOptions().boot_class_path_locations;
+
+ if (oat_checksums.empty() || oat_boot_class_path.empty()) {
+ *error_msg = oat_checksums.empty() ? "Empty checksums" : "Empty boot class path";
+ return false;
+ }
+
+ size_t oat_bcp_size = gc::space::ImageSpace::CheckAndCountBCPComponents(
+ oat_boot_class_path, ArrayRef<const std::string>(bcp_locations), error_msg);
+ DCHECK_LE(oat_bcp_size, bcp_locations.size());
+ if (oat_bcp_size == static_cast<size_t>(-1)) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+
+ size_t bcp_index = 0;
+ size_t boot_image_index = 0;
+ bool found_d = false;
+
+ while (bcp_index < oat_bcp_size) {
+ static_assert(gc::space::ImageSpace::kImageChecksumPrefix == 'i', "Format prefix check");
+ static_assert(gc::space::ImageSpace::kDexFileChecksumPrefix == 'd', "Format prefix check");
+ if (StartsWith(oat_checksums, "i") && !found_d) {
+ const std::vector<OatFileAssistantContext::BootImageInfo>& boot_image_info_list =
+ ofa_context->GetBootImageInfoList(isa);
+ if (boot_image_index >= boot_image_info_list.size()) {
+ *error_msg = StringPrintf("Missing boot image for %s, remaining checksums: %s",
+ bcp_locations[bcp_index].c_str(),
+ std::string(oat_checksums).c_str());
+ return false;
+ }
+
+ const OatFileAssistantContext::BootImageInfo& boot_image_info =
+ boot_image_info_list[boot_image_index];
+ if (!ConsumePrefix(&oat_checksums, boot_image_info.checksum)) {
+ *error_msg = StringPrintf("Image checksum mismatch, expected %s to start with %s",
+ std::string(oat_checksums).c_str(),
+ boot_image_info.checksum.c_str());
+ return false;
+ }
+
+ bcp_index += boot_image_info.component_count;
+ boot_image_index++;
+ } else if (StartsWith(oat_checksums, "d")) {
+ found_d = true;
+ const std::vector<std::string>* bcp_checksums =
+ ofa_context->GetBcpChecksums(bcp_index, error_msg);
+ if (bcp_checksums == nullptr) {
+ return false;
+ }
+ oat_checksums.remove_prefix(1u);
+ for (const std::string& checksum : *bcp_checksums) {
+ if (!ConsumePrefix(&oat_checksums, checksum)) {
+ *error_msg = StringPrintf(
+ "Dex checksum mismatch for bootclasspath file %s, expected %s to start with %s",
+ bcp_locations[bcp_index].c_str(),
+ std::string(oat_checksums).c_str(),
+ checksum.c_str());
+ return false;
+ }
+ }
+
+ bcp_index++;
+ } else {
+ *error_msg = StringPrintf("Unexpected checksums, expected %s to start with %s",
+ std::string(oat_checksums).c_str(),
+ found_d ? "'d'" : "'i' or 'd'");
+ return false;
+ }
+
+ if (bcp_index < oat_bcp_size) {
+ if (!ConsumePrefix(&oat_checksums, ":")) {
+ if (oat_checksums.empty()) {
+ *error_msg =
+ StringPrintf("Checksum too short, missing %zu components", oat_bcp_size - bcp_index);
+ } else {
+ *error_msg = StringPrintf("Missing ':' separator at start of %s",
+ std::string(oat_checksums).c_str());
+ }
+ return false;
+ }
+ }
+ }
+
+ if (!oat_checksums.empty()) {
+ *error_msg =
+ StringPrintf("Checksum too long, unexpected tail: %s", std::string(oat_checksums).c_str());
+ return false;
+ }
+
+ return true;
+}
+
bool OatFileAssistant::ValidateBootClassPathChecksums(const OatFile& oat_file) {
// Get the checksums and the BCP from the oat file.
const char* oat_boot_class_path_checksums =
@@ -649,45 +877,26 @@ bool OatFileAssistant::ValidateBootClassPathChecksums(const OatFile& oat_file) {
if (oat_boot_class_path_checksums == nullptr || oat_boot_class_path == nullptr) {
return false;
}
- std::string_view oat_boot_class_path_checksums_view(oat_boot_class_path_checksums);
- std::string_view oat_boot_class_path_view(oat_boot_class_path);
- if (oat_boot_class_path_view == cached_boot_class_path_ &&
- oat_boot_class_path_checksums_view == cached_boot_class_path_checksums_) {
- return true;
- }
- Runtime* runtime = Runtime::Current();
std::string error_msg;
- bool result = false;
- // Fast path when the runtime boot classpath cheksums and boot classpath
- // locations directly match.
- if (oat_boot_class_path_checksums_view == runtime->GetBootClassPathChecksums() &&
- isa_ == kRuntimeISA &&
- oat_boot_class_path_view == android::base::Join(runtime->GetBootClassPathLocations(), ":")) {
- result = true;
- } else {
- result = gc::space::ImageSpace::VerifyBootClassPathChecksums(
- oat_boot_class_path_checksums_view,
- oat_boot_class_path_view,
- ArrayRef<const std::string>(runtime->GetImageLocations()),
- ArrayRef<const std::string>(runtime->GetBootClassPathLocations()),
- ArrayRef<const std::string>(runtime->GetBootClassPath()),
- ArrayRef<const int>(runtime->GetBootClassPathFds()),
- isa_,
- &error_msg);
- }
+ bool result = ValidateBootClassPathChecksums(GetOatFileAssistantContext(),
+ isa_,
+ oat_boot_class_path_checksums,
+ oat_boot_class_path,
+ &error_msg);
if (!result) {
VLOG(oat) << "Failed to verify checksums of oat file " << oat_file.GetLocation()
<< " error: " << error_msg;
return false;
}
- // This checksum has been validated, so save it.
- cached_boot_class_path_ = oat_boot_class_path_view;
- cached_boot_class_path_checksums_ = oat_boot_class_path_checksums_view;
return true;
}
+bool OatFileAssistant::IsPrimaryBootImageUsable() {
+ return !GetOatFileAssistantContext()->GetBootImageInfoList(isa_).empty();
+}
+
OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
ScopedTrace trace("GetBestInfo");
// TODO(calin): Document the side effects of class loading when
@@ -808,14 +1017,16 @@ OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() {
}
OatFileAssistant::DexOptNeeded OatFileAssistant::OatFileInfo::GetDexOptNeeded(
- CompilerFilter::Filter target,
- bool profile_changed,
- bool downgrade) {
-
+ CompilerFilter::Filter target_compiler_filter, const DexOptTrigger dexopt_trigger) {
if (IsUseable()) {
- return CompilerFilterIsOkay(target, profile_changed, downgrade)
- ? kNoDexOptNeeded
- : kDex2OatForFilter;
+ return ShouldRecompileForFilter(target_compiler_filter, dexopt_trigger) ? kDex2OatForFilter :
+ kNoDexOptNeeded;
+ }
+
+ // In this case, the oat file is not usable. If the caller doesn't seek for a better compiler
+ // filter (e.g., the caller wants to downgrade), then we should not recompile.
+ if (!dexopt_trigger.targetFilterIsBetter) {
+ return kNoDexOptNeeded;
}
if (Status() == kOatBootImageOutOfDate) {
@@ -840,7 +1051,8 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
return nullptr;
}
- if (LocationIsOnArtApexData(filename_) && Runtime::Current()->DenyArtApexDataFiles()) {
+ if (LocationIsOnArtApexData(filename_) &&
+ oat_file_assistant_->GetRuntimeOptions().deny_art_apex_data_files) {
LOG(WARNING) << "OatFileAssistant rejected file " << filename_
<< ": ART apexdata is untrusted.";
return nullptr;
@@ -934,25 +1146,24 @@ const OatFile* OatFileAssistant::OatFileInfo::GetFile() {
return file_.get();
}
-bool OatFileAssistant::OatFileInfo::CompilerFilterIsOkay(
- CompilerFilter::Filter target, bool profile_changed, bool downgrade) {
+bool OatFileAssistant::OatFileInfo::ShouldRecompileForFilter(CompilerFilter::Filter target,
+ const DexOptTrigger dexopt_trigger) {
const OatFile* file = GetFile();
- if (file == nullptr) {
- return false;
- }
+ DCHECK(file != nullptr);
CompilerFilter::Filter current = file->GetCompilerFilter();
- if (profile_changed && CompilerFilter::DependsOnProfile(current)) {
- VLOG(oat) << "Compiler filter not okay because Profile changed";
- return false;
+ if (dexopt_trigger.targetFilterIsBetter && CompilerFilter::IsBetter(target, current)) {
+ return true;
}
-
- if (downgrade) {
- return !CompilerFilter::IsBetter(current, target);
+ if (dexopt_trigger.targetFilterIsSame && current == target) {
+ return true;
+ }
+ if (dexopt_trigger.targetFilterIsWorse && CompilerFilter::IsBetter(current, target)) {
+ return true;
}
- if (CompilerFilter::DependsOnImageChecksum(current) &&
- CompilerFilter::IsAsGoodAs(current, target)) {
+ if (dexopt_trigger.primaryBootImageBecomesUsable &&
+ CompilerFilter::DependsOnImageChecksum(current)) {
// If the oat file has been compiled without an image, and the runtime is
// now running with an image loaded from disk, return that we need to
// re-compile. The recompilation will generate a better oat file, and with an app
@@ -961,13 +1172,13 @@ bool OatFileAssistant::OatFileInfo::CompilerFilterIsOkay(
file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
if (oat_boot_class_path_checksums != nullptr &&
!StartsWith(oat_boot_class_path_checksums, "i") &&
- !Runtime::Current()->HasImageWithProfile()) {
+ oat_file_assistant_->IsPrimaryBootImageUsable()) {
DCHECK(!file->GetOatHeader().RequiresImage());
- return false;
+ return true;
}
}
- return CompilerFilter::IsAsGoodAs(current, target);
+ return false;
}
bool OatFileAssistant::ClassLoaderContextIsOkay(const OatFile& oat_file) const {
@@ -1044,17 +1255,19 @@ std::unique_ptr<OatFile> OatFileAssistant::OatFileInfo::ReleaseFileForUse() {
// TODO(calin): we could provide a more refined status here
// (e.g. run from uncompressed apk, run with vdex but not oat etc). It will allow us to
// track more experiments but adds extra complexity.
-void OatFileAssistant::GetOptimizationStatus(
- const std::string& filename,
- InstructionSet isa,
- std::string* out_compilation_filter,
- std::string* out_compilation_reason) {
+void OatFileAssistant::GetOptimizationStatus(const std::string& filename,
+ InstructionSet isa,
+ std::string* out_compilation_filter,
+ std::string* out_compilation_reason,
+ OatFileAssistantContext* ofa_context) {
// It may not be possible to load an oat file executable (e.g., selinux restrictions). Load
// non-executable and check the status manually.
OatFileAssistant oat_file_assistant(filename.c_str(),
isa,
- /* context= */ nullptr,
- /*load_executable=*/ false);
+ /*context=*/nullptr,
+ /*load_executable=*/false,
+ /*only_load_trusted_executable=*/false,
+ ofa_context);
std::string out_odex_location; // unused
std::string out_odex_status; // unused
oat_file_assistant.GetOptimizationStatus(
@@ -1090,28 +1303,25 @@ void OatFileAssistant::GetOptimizationStatus(
OatStatus status = oat_file_info.Status();
const char* reason = oat_file->GetCompilationReason();
*out_compilation_reason = reason == nullptr ? "unknown" : reason;
+
+ // If the oat file is invalid, the vdex file will be picked, so the status is `kOatUpToDate`. If
+ // the vdex file is also invalid, then either `oat_file` is nullptr, or `status` is
+ // `kOatDexOutOfDate`.
+ DCHECK(status == kOatUpToDate || status == kOatDexOutOfDate);
+
switch (status) {
case kOatUpToDate:
*out_compilation_filter = CompilerFilter::NameOfFilter(oat_file->GetCompilerFilter());
*out_odex_status = "up-to-date";
return;
- case kOatCannotOpen: // This should never happen, but be robust.
- *out_compilation_filter = "error";
- *out_compilation_reason = "error";
- // This mostly happens when we cannot open the vdex file,
- // or the file is corrupt.
- *out_odex_status = "io-error-or-corruption";
- return;
-
+ case kOatCannotOpen:
case kOatBootImageOutOfDate:
- *out_compilation_filter = "run-from-apk-fallback";
- *out_odex_status = "boot-image-more-recent";
- return;
-
case kOatContextOutOfDate:
- *out_compilation_filter = "run-from-apk-fallback";
- *out_odex_status = "context-mismatch";
+ // These should never happen, but be robust.
+ *out_compilation_filter = "unexpected";
+ *out_compilation_reason = "unexpected";
+ *out_odex_status = "unexpected";
return;
case kOatDexOutOfDate:
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index c243cc3a54..ce069d2f7b 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -19,16 +19,19 @@
#include <cstdint>
#include <memory>
+#include <optional>
#include <sstream>
#include <string>
+#include <variant>
-#include "base/compiler_filter.h"
#include "arch/instruction_set.h"
+#include "base/compiler_filter.h"
#include "base/os.h"
#include "base/scoped_flock.h"
#include "base/unix_file/fd_file.h"
#include "class_loader_context.h"
#include "oat_file.h"
+#include "oat_file_assistant_context.h"
namespace art {
@@ -89,6 +92,45 @@ class OatFileAssistant {
kOatUpToDate,
};
+ // A bit field to represent the conditions where dexopt should be performed.
+ struct DexOptTrigger {
+ // Dexopt should be performed if the target compiler filter is better than the current compiler
+ // filter. See `CompilerFilter::IsBetter`.
+ bool targetFilterIsBetter : 1;
+ // Dexopt should be performed if the target compiler filter is the same as the current compiler
+ // filter.
+ bool targetFilterIsSame : 1;
+ // Dexopt should be performed if the target compiler filter is worse than the current compiler
+ // filter. See `CompilerFilter::IsBetter`.
+ bool targetFilterIsWorse : 1;
+ // Dexopt should be performed if the current oat file was compiled without a primary image,
+ // and the runtime is now running with a primary image loaded from disk.
+ bool primaryBootImageBecomesUsable : 1;
+ };
+
+ // Represents the location of the current oat file and/or vdex file.
+ enum Location {
+ // Does not exist, or an error occurs.
+ kLocationNoneOrError = 0,
+ // In the global "dalvik-cache" folder.
+ kLocationOat = 1,
+ // In the "oat" folder next to the dex file.
+ kLocationOdex = 2,
+ // In the DM file. This means the only usable file is the vdex file.
+ kLocationDm = 3,
+ };
+
+ // Represents the status of the current oat file and/or vdex file.
+ class DexOptStatus {
+ public:
+ Location GetLocation() { return location_; }
+ bool IsVdexUsable() { return location_ != kLocationNoneOrError; }
+
+ private:
+ Location location_ = kLocationNoneOrError;
+ friend class OatFileAssistant;
+ };
+
// Constructs an OatFileAssistant object to assist the oat file
// corresponding to the given dex location with the target instruction set.
//
@@ -110,11 +152,15 @@ class OatFileAssistant {
// only_load_trusted_executable should be true if the caller intends to have
// only oat files from trusted locations loaded executable. See IsTrustedLocation() for
// details on trusted locations.
+ //
+ // runtime_options should be provided with all the required fields filled if the caller intends to
+ // use OatFileAssistant without a runtime.
OatFileAssistant(const char* dex_location,
const InstructionSet isa,
ClassLoaderContext* context,
bool load_executable,
- bool only_load_trusted_executable = false);
+ bool only_load_trusted_executable = false,
+ OatFileAssistantContext* ofa_context = nullptr);
// Similar to this(const char*, const InstructionSet, bool), however, if a valid zip_fd is
// provided, vdex, oat, and zip files will be read from vdex_fd, oat_fd and zip_fd respectively.
@@ -124,10 +170,25 @@ class OatFileAssistant {
ClassLoaderContext* context,
bool load_executable,
bool only_load_trusted_executable,
+ OatFileAssistantContext* ofa_context,
int vdex_fd,
int oat_fd,
int zip_fd);
+ // A convenient factory function that accepts ISA, class loader context, and compiler filter in
+ // strings. Returns the created instance and ClassLoaderContext on success, or returns nullptr and
+ // outputs an error message if it fails to parse the input strings.
+ // The returned ClassLoaderContext must live at least as long as the OatFileAssistant.
+ static std::unique_ptr<OatFileAssistant> Create(
+ const std::string& filename,
+ const std::string& isa_str,
+ const std::string& context_str,
+ bool load_executable,
+ bool only_load_trusted_executable,
+ OatFileAssistantContext* ofa_context,
+ /*out*/ std::unique_ptr<ClassLoaderContext>* context,
+ /*out*/ std::string* error_msg);
+
// Returns true if the dex location refers to an element of the boot class
// path.
bool IsInBootClassPath();
@@ -148,10 +209,18 @@ class OatFileAssistant {
// Returns a positive status code if the status refers to the oat file in
// the oat location. Returns a negative status code if the status refers to
// the oat file in the odex location.
+ //
+ // Deprecated. Use the other overload.
int GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
bool profile_changed = false,
bool downgrade = false);
+ // Returns true if dexopt needs to be performed with respect to the given target compilation
+ // filter and dexopt trigger. Also returns the status of the current oat file and/or vdex file.
+ bool GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
+ const DexOptTrigger dexopt_trigger,
+ /*out*/ DexOptStatus* dexopt_status);
+
// Returns true if there is up-to-date code for this dex location,
// irrespective of the compiler filter of the up-to-date code.
bool IsUpToDate();
@@ -176,7 +245,7 @@ class OatFileAssistant {
// - out_compilation_reason: the optimization reason. The reason might
// be "unknown" if the compiler artifacts were not annotated during optimizations.
// - out_odex_status: a human readable refined status of the validity of the odex file.
- // E.g. up-to-date, boot-image-more-recent, apk-more-recent.
+ // Possible values are: "up-to-date", "apk-more-recent", and "io-error-no-oat".
//
// This method will try to mimic the runtime effect of loading the dex file.
// For example, if there is no usable oat file, the compiler filter will be set
@@ -189,7 +258,8 @@ class OatFileAssistant {
static void GetOptimizationStatus(const std::string& filename,
InstructionSet isa,
std::string* out_compilation_filter,
- std::string* out_compilation_reason);
+ std::string* out_compilation_reason,
+ OatFileAssistantContext* ofa_context = nullptr);
// Open and returns an image space associated with the oat file.
static std::unique_ptr<gc::space::ImageSpace> OpenImageSpace(const OatFile* oat_file);
@@ -253,8 +323,19 @@ class OatFileAssistant {
// Returns false on error, in which case error_msg describes the error and
// oat_filename is not changed.
// Neither oat_filename nor error_msg may be null.
+ //
+ // Calling this function requires an active runtime.
+ static bool DexLocationToOatFilename(const std::string& location,
+ InstructionSet isa,
+ std::string* oat_filename,
+ std::string* error_msg);
+
+ // Same as above, but also takes `deny_art_apex_data_files` from input.
+ //
+ // Calling this function does not require an active runtime.
static bool DexLocationToOatFilename(const std::string& location,
InstructionSet isa,
+ bool deny_art_apex_data_files,
std::string* oat_filename,
std::string* error_msg);
@@ -262,6 +343,8 @@ class OatFileAssistant {
// is known, creates an absolute path in that directory and tries to infer path
// of a corresponding vdex file. Otherwise only creates a basename dex_location
// from the combined checksums. Returns true if all out-arguments have been set.
+ //
+ // Calling this function requires an active runtime.
static bool AnonymousDexVdexLocation(const std::vector<const DexFile::Header*>& dex_headers,
InstructionSet isa,
/* out */ std::string* dex_location,
@@ -273,6 +356,16 @@ class OatFileAssistant {
bool ClassLoaderContextIsOkay(const OatFile& oat_file) const;
+ // Validates the boot class path checksum of an OatFile.
+ bool ValidateBootClassPathChecksums(const OatFile& oat_file);
+
+ // Validates the given bootclasspath and bootclasspath checksums found in an oat header.
+ static bool ValidateBootClassPathChecksums(OatFileAssistantContext* ofa_context,
+ InstructionSet isa,
+ std::string_view oat_checksums,
+ std::string_view oat_boot_class_path,
+ /*out*/ std::string* error_msg);
+
private:
class OatFileInfo {
public:
@@ -298,15 +391,10 @@ class OatFileAssistant {
// Returns the status of this oat file.
OatStatus Status();
- // Return the DexOptNeeded value for this oat file with respect to the
- // given target_compilation_filter.
- // profile_changed should be true to indicate the profile has recently
- // changed for this dex location.
- // downgrade should be true if the purpose of dexopt is to downgrade the
- // compiler filter.
+ // Return the DexOptNeeded value for this oat file with respect to the given target compilation
+ // filter and dexopt trigger.
DexOptNeeded GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
- bool profile_changed,
- bool downgrade);
+ const DexOptTrigger dexopt_trigger);
// Returns the loaded file.
// Loads the file if needed. Returns null if the file failed to load.
@@ -339,13 +427,10 @@ class OatFileAssistant {
std::unique_ptr<OatFile> ReleaseFileForUse();
private:
- // Returns true if the compiler filter used to generate the file is at
- // least as good as the given target filter. profile_changed should be
- // true to indicate the profile has recently changed for this dex
- // location.
- // downgrade should be true if the purpose of dexopt is to downgrade the
- // compiler filter.
- bool CompilerFilterIsOkay(CompilerFilter::Filter target, bool profile_changed, bool downgrade);
+ // Returns true if the oat file is usable but at least one dexopt trigger is matched. This
+ // function should only be called if the oat file is usable.
+ bool ShouldRecompileForFilter(CompilerFilter::Filter target,
+ const DexOptTrigger dexopt_trigger);
// Release the loaded oat file.
// Returns null if the oat file hasn't been loaded.
@@ -409,8 +494,32 @@ class OatFileAssistant {
// dex_location_ dex file.
const std::vector<uint32_t>* GetRequiredDexChecksums();
- // Validates the boot class path checksum of an OatFile.
- bool ValidateBootClassPathChecksums(const OatFile& oat_file);
+ // Returns whether there is at least one boot image usable.
+ bool IsPrimaryBootImageUsable();
+
+ // Returns the trigger for the deprecated overload of `GetDexOptNeeded`.
+ //
+ // Deprecated. Do not use in new code.
+ DexOptTrigger GetDexOptTrigger(CompilerFilter::Filter target_compiler_filter,
+ bool profile_changed,
+ bool downgrade);
+
+ // Returns the pointer to the owned or unowned instance of OatFileAssistantContext.
+ OatFileAssistantContext* GetOatFileAssistantContext() {
+ if (std::holds_alternative<OatFileAssistantContext*>(ofa_context_)) {
+ return std::get<OatFileAssistantContext*>(ofa_context_);
+ } else {
+ return std::get<std::unique_ptr<OatFileAssistantContext>>(ofa_context_).get();
+ }
+ }
+
+ // The runtime options taken from the active runtime or the input.
+ //
+ // All member functions should get runtime options from this variable rather than referencing the
+ // active runtime. This is to allow OatFileAssistant to function without an active runtime.
+ const OatFileAssistantContext::RuntimeOptions& GetRuntimeOptions() {
+ return GetOatFileAssistantContext()->GetRuntimeOptions();
+ }
std::string dex_location_;
@@ -459,8 +568,8 @@ class OatFileAssistant {
// File descriptor corresponding to apk, dex file, or zip.
int zip_fd_;
- std::string cached_boot_class_path_;
- std::string cached_boot_class_path_checksums_;
+ // Owned or unowned instance of OatFileAssistantContext.
+ std::variant<std::unique_ptr<OatFileAssistantContext>, OatFileAssistantContext*> ofa_context_;
friend class OatFileAssistantTest;
diff --git a/runtime/oat_file_assistant_context.cc b/runtime/oat_file_assistant_context.cc
new file mode 100644
index 0000000000..d282d03efa
--- /dev/null
+++ b/runtime/oat_file_assistant_context.cc
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+#include "arch/instruction_set.h"
+#include "base/array_ref.h"
+#include "base/logging.h"
+#include "class_linker.h"
+#include "dex/art_dex_file_loader.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "oat_file_assistant_context.h"
+
+namespace art {
+
+using ::android::base::StringPrintf;
+using ::art::gc::space::ImageSpace;
+
+OatFileAssistantContext::OatFileAssistantContext(
+ std::unique_ptr<OatFileAssistantContext::RuntimeOptions> runtime_options)
+ : runtime_options_(std::move(runtime_options)) {
+ DCHECK_EQ(runtime_options_->boot_class_path.size(),
+ runtime_options_->boot_class_path_locations.size());
+ DCHECK_IMPLIES(
+ runtime_options_->boot_class_path_fds != nullptr,
+ runtime_options_->boot_class_path.size() == runtime_options_->boot_class_path_fds->size());
+}
+
+OatFileAssistantContext::OatFileAssistantContext(Runtime* runtime)
+ : OatFileAssistantContext(std::make_unique<OatFileAssistantContext::RuntimeOptions>(
+ OatFileAssistantContext::RuntimeOptions{
+ .image_locations = runtime->GetImageLocations(),
+ .boot_class_path = runtime->GetBootClassPath(),
+ .boot_class_path_locations = runtime->GetBootClassPathLocations(),
+ .boot_class_path_fds = !runtime->GetBootClassPathFds().empty() ?
+ &runtime->GetBootClassPathFds() :
+ nullptr,
+ .deny_art_apex_data_files = runtime->DenyArtApexDataFiles(),
+ })) {
+ // Fetch boot image info from the runtime.
+ std::vector<BootImageInfo>& boot_image_info_list = boot_image_info_list_by_isa_[kRuntimeISA];
+ for (const ImageSpace* image_space : runtime->GetHeap()->GetBootImageSpaces()) {
+ // We only need the checksum of the first component for each boot image. They are in image
+ // spaces that have a non-zero component count.
+ if (image_space->GetComponentCount() > 0) {
+ BootImageInfo& boot_image_info = boot_image_info_list.emplace_back();
+ boot_image_info.component_count = image_space->GetComponentCount();
+ ImageSpace::AppendImageChecksum(image_space->GetComponentCount(),
+ image_space->GetImageHeader().GetImageChecksum(),
+ &boot_image_info.checksum);
+ }
+ }
+
+ // Fetch BCP checksums from the runtime.
+ size_t bcp_index = 0;
+ std::vector<std::string>* current_bcp_checksums = nullptr;
+ for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
+ if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+ DCHECK_LT(bcp_index, runtime_options_->boot_class_path.size());
+ current_bcp_checksums = &bcp_checksums_by_index_[bcp_index++];
+ }
+ DCHECK_NE(current_bcp_checksums, nullptr);
+ current_bcp_checksums->push_back(StringPrintf("/%08x", dex_file->GetLocationChecksum()));
+ }
+ DCHECK_EQ(bcp_index, runtime_options_->boot_class_path.size());
+
+ // Fetch APEX versions from the runtime.
+ apex_versions_ = runtime->GetApexVersions();
+}
+
+const OatFileAssistantContext::RuntimeOptions& OatFileAssistantContext::GetRuntimeOptions() const {
+ return *runtime_options_;
+}
+
+const std::vector<OatFileAssistantContext::BootImageInfo>&
+OatFileAssistantContext::GetBootImageInfoList(InstructionSet isa) {
+ if (auto it = boot_image_info_list_by_isa_.find(isa); it != boot_image_info_list_by_isa_.end()) {
+ return it->second;
+ }
+
+ ImageSpace::BootImageLayout layout(
+ ArrayRef<const std::string>(runtime_options_->image_locations),
+ ArrayRef<const std::string>(runtime_options_->boot_class_path),
+ ArrayRef<const std::string>(runtime_options_->boot_class_path_locations),
+ runtime_options_->boot_class_path_fds != nullptr ?
+ ArrayRef<const int>(*runtime_options_->boot_class_path_fds) :
+ ArrayRef<const int>(),
+ /*boot_class_path_image_fds=*/ArrayRef<const int>(),
+ /*boot_class_path_vdex_fds=*/ArrayRef<const int>(),
+ /*boot_class_path_oat_fds=*/ArrayRef<const int>(),
+ &GetApexVersions());
+
+ std::string error_msg;
+ if (!layout.LoadFromSystem(isa, /*allow_in_memory_compilation=*/false, &error_msg)) {
+ // At this point, `layout` contains a subset of boot images that can be loaded.
+ VLOG(oat) << "Some error occurred when loading boot images for oat file validation: "
+ << error_msg;
+ }
+
+ std::vector<BootImageInfo>& boot_image_info_list = boot_image_info_list_by_isa_[isa];
+ for (const ImageSpace::BootImageLayout::ImageChunk& chunk : layout.GetChunks()) {
+ BootImageInfo& boot_image_info = boot_image_info_list.emplace_back();
+ boot_image_info.component_count = chunk.component_count;
+ ImageSpace::AppendImageChecksum(
+ chunk.component_count, chunk.checksum, &boot_image_info.checksum);
+ }
+ return boot_image_info_list;
+}
+
+const std::vector<std::string>* OatFileAssistantContext::GetBcpChecksums(size_t bcp_index,
+ std::string* error_msg) {
+ DCHECK_LT(bcp_index, runtime_options_->boot_class_path.size());
+
+ if (auto it = bcp_checksums_by_index_.find(bcp_index); it != bcp_checksums_by_index_.end()) {
+ return &it->second;
+ }
+
+ std::vector<uint32_t> checksums;
+ std::vector<std::string> dex_locations;
+ ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.GetMultiDexChecksums(
+ runtime_options_->boot_class_path[bcp_index].c_str(),
+ &checksums,
+ &dex_locations,
+ error_msg,
+ runtime_options_->boot_class_path_fds != nullptr ?
+ (*runtime_options_->boot_class_path_fds)[bcp_index] :
+ -1)) {
+ return nullptr;
+ }
+
+ DCHECK(!checksums.empty());
+ std::vector<std::string>& bcp_checksums = bcp_checksums_by_index_[bcp_index];
+ for (uint32_t checksum : checksums) {
+ bcp_checksums.push_back(StringPrintf("/%08x", checksum));
+ }
+ return &bcp_checksums;
+}
+
+const std::string& OatFileAssistantContext::GetApexVersions() {
+ if (apex_versions_.has_value()) {
+ return apex_versions_.value();
+ }
+
+ apex_versions_ = Runtime::GetApexVersions(
+ ArrayRef<const std::string>(runtime_options_->boot_class_path_locations));
+ return apex_versions_.value();
+}
+
+} // namespace art
diff --git a/runtime/oat_file_assistant_context.h b/runtime/oat_file_assistant_context.h
new file mode 100644
index 0000000000..3288dc07c4
--- /dev/null
+++ b/runtime/oat_file_assistant_context.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_ASSISTANT_CONTEXT_H_
+#define ART_RUNTIME_OAT_FILE_ASSISTANT_CONTEXT_H_
+
+#include <optional>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "runtime.h"
+
+namespace art {
+
+// A helper class for OatFileAssistant that fetches and caches information including boot image
+// checksums, bootclasspath checksums, and APEX versions. The same instance can be reused across
+// OatFileAssistant calls on different dex files for different instruction sets.
+class OatFileAssistantContext {
+ public:
+ // Options that a runtime would take.
+ // Note that the struct only keeps references, so the caller must keep the objects alive during
+ // the lifetime of OatFileAssistant.
+ struct RuntimeOptions {
+ // Required. See `-Ximage`.
+ const std::vector<std::string>& image_locations;
+ // Required. See `-Xbootclasspath`.
+ const std::vector<std::string>& boot_class_path;
+ // Required. See `-Xbootclasspath-locations`.
+ const std::vector<std::string>& boot_class_path_locations;
+ // Optional. See `-Xbootclasspathfds`.
+ const std::vector<int>* const boot_class_path_fds = nullptr;
+ // Optional. See `-Xdeny-art-apex-data-files`.
+ const bool deny_art_apex_data_files = false;
+ };
+
+ // Information about a boot image.
+ struct BootImageInfo {
+ // Number of BCP jars covered by the boot image.
+ size_t component_count;
+ // Checksum of the boot image. The format is "i;<component_count>/<checksum_in_8_digit_hex>"
+ std::string checksum;
+ };
+
+ // Constructs OatFileAssistantContext from runtime options. Does not fetch information on
+ // construction. Information will be fetched from disk when needed.
+ explicit OatFileAssistantContext(std::unique_ptr<RuntimeOptions> runtime_options);
+ // Constructs OatFileAssistantContext from a runtime instance. Fetches as much information as
+ // possible from the runtime. The rest information will be fetched from disk when needed.
+ explicit OatFileAssistantContext(Runtime* runtime);
+ // Returns runtime options.
+ const RuntimeOptions& GetRuntimeOptions() const;
+ // Returns information about the boot image of the given instruction set.
+ const std::vector<BootImageInfo>& GetBootImageInfoList(InstructionSet isa);
+ // Returns the checksums of the dex files in the BCP jar at the given index, or nullptr on error.
+ // The format of each checksum is "/<checksum_in_8_digit_hex>".
+ const std::vector<std::string>* GetBcpChecksums(size_t bcp_index, std::string* error_msg);
+ // Returns a string that represents the apex versions of boot classpath jars. See
+ // `Runtime::apex_versions_` for the encoding format.
+ const std::string& GetApexVersions();
+
+ private:
+ std::unique_ptr<RuntimeOptions> runtime_options_;
+ std::unordered_map<InstructionSet, std::vector<BootImageInfo>> boot_image_info_list_by_isa_;
+ std::unordered_map<size_t, std::vector<std::string>> bcp_checksums_by_index_;
+ std::optional<std::string> apex_versions_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_ASSISTANT_CONTEXT_H_
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 07998dd59f..2904ecbab1 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -16,97 +16,149 @@
#include "oat_file_assistant.h"
+#include <fcntl.h>
+#include <gtest/gtest.h>
#include <sys/param.h>
+#include <functional>
+#include <iterator>
+#include <memory>
#include <string>
+#include <type_traits>
#include <vector>
-#include <fcntl.h>
-
-#include <gtest/gtest.h>
+#include "android-base/scopeguard.h"
#include "android-base/strings.h"
-
+#include "arch/instruction_set.h"
#include "art_field-inl.h"
#include "base/os.h"
#include "base/utils.h"
-#include "class_linker-inl.h"
+#include "class_linker.h"
#include "class_loader_context.h"
#include "common_runtime_test.h"
#include "dexopt_test.h"
-#include "hidden_api.h"
#include "oat.h"
#include "oat_file.h"
+#include "oat_file_assistant_context.h"
#include "oat_file_manager.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread-current-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
namespace art {
-class OatFileAssistantTest : public DexoptTest {
+class OatFileAssistantBaseTest : public DexoptTest {};
+
+class OatFileAssistantTest : public OatFileAssistantBaseTest,
+ public testing::WithParamInterface<bool> {
public:
- void VerifyOptimizationStatus(OatFileAssistant* assistant,
- const std::string& file,
- const std::string& expected_filter,
- const std::string& expected_reason,
- const std::string& expected_odex_status) {
- // Verify the static methods (called from PM for dexOptNeeded).
- std::string compilation_filter1;
- std::string compilation_reason1;
-
- OatFileAssistant::GetOptimizationStatus(
- file, kRuntimeISA, &compilation_filter1, &compilation_reason1);
-
- ASSERT_EQ(expected_filter, compilation_filter1);
- ASSERT_EQ(expected_reason, compilation_reason1);
-
- // Verify the instance methods (called at runtime for systrace).
- std::string odex_location2; // ignored
- std::string compilation_filter2;
- std::string compilation_reason2;
- std::string odex_status2;
-
- assistant->GetOptimizationStatus(
- &odex_location2,
- &compilation_filter2,
- &compilation_reason2,
- &odex_status2);
-
- ASSERT_EQ(expected_filter, compilation_filter2);
- ASSERT_EQ(expected_reason, compilation_reason2);
- ASSERT_EQ(expected_odex_status, odex_status2);
+ void SetUp() override {
+ DexoptTest::SetUp();
+ with_runtime_ = GetParam();
+ ofa_context_ = CreateOatFileAssistantContext();
}
- void VerifyOptimizationStatus(OatFileAssistant* assistant,
- const std::string& file,
- CompilerFilter::Filter expected_filter,
+ // Verifies all variants of `GetOptimizationStatus`.
+ //
+ // `expected_filter` can be either a value of `CompilerFilter::Filter` or a string.
+ // If `check_context` is true, only verifies the variants that checks class loader context.
+ template <typename T>
+ void VerifyOptimizationStatus(const std::string& file,
+ ClassLoaderContext* context,
+ const T& expected_filter,
const std::string& expected_reason,
- const std::string& expected_odex_status) {
- VerifyOptimizationStatus(
- assistant,
- file,
- CompilerFilter::NameOfFilter(expected_filter),
- expected_reason,
- expected_odex_status);
+ const std::string& expected_odex_status,
+ bool check_context = false) {
+ std::string expected_filter_name;
+ if constexpr (std::is_same_v<T, CompilerFilter::Filter>) {
+ expected_filter_name = CompilerFilter::NameOfFilter(expected_filter);
+ } else {
+ expected_filter_name = expected_filter;
+ }
+
+ // Verify the static method (called from PM for dumpsys).
+ // This variant does not check class loader context.
+ if (!check_context) {
+ std::string compilation_filter1;
+ std::string compilation_reason1;
+
+ OatFileAssistant::GetOptimizationStatus(file,
+ kRuntimeISA,
+ &compilation_filter1,
+ &compilation_reason1,
+ MaybeGetOatFileAssistantContext());
+
+ ASSERT_EQ(expected_filter_name, compilation_filter1);
+ ASSERT_EQ(expected_reason, compilation_reason1);
+ }
+
+ // Verify the instance methods (called at runtime and from artd).
+ OatFileAssistant assistant = CreateOatFileAssistant(file.c_str(), context);
+
+ std::string odex_location3; // ignored
+ std::string compilation_filter3;
+ std::string compilation_reason3;
+ std::string odex_status3;
+
+ assistant.GetOptimizationStatus(
+ &odex_location3, &compilation_filter3, &compilation_reason3, &odex_status3);
+
+ ASSERT_EQ(expected_filter_name, compilation_filter3);
+ ASSERT_EQ(expected_reason, compilation_reason3);
+ ASSERT_EQ(expected_odex_status, odex_status3);
}
- void InsertNewBootClasspathEntry() {
- std::string extra_dex_filename = GetMultiDexSrc1();
- Runtime* runtime = Runtime::Current();
- runtime->boot_class_path_.push_back(extra_dex_filename);
- if (!runtime->boot_class_path_locations_.empty()) {
- runtime->boot_class_path_locations_.push_back(extra_dex_filename);
+ bool InsertNewBootClasspathEntry(const std::string& src, std::string* error_msg) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(src.c_str(),
+ src,
+ /*verify=*/true,
+ /*verify_checksum=*/false,
+ error_msg,
+ &dex_files)) {
+ return false;
}
+
+ runtime_->AppendToBootClassPath(src, src, dex_files);
+ std::move(dex_files.begin(), dex_files.end(), std::back_inserter(opened_dex_files_));
+
+ return true;
}
- int GetDexOptNeeded(
- OatFileAssistant* assistant,
- CompilerFilter::Filter compiler_filter,
- bool profile_changed = false,
- bool downgrade = false) {
- return assistant->GetDexOptNeeded(
- compiler_filter,
- profile_changed,
- downgrade);
+ // Verifies the current version of `GetDexOptNeeded` (called from artd).
+ void VerifyGetDexOptNeeded(OatFileAssistant* assistant,
+ CompilerFilter::Filter compiler_filter,
+ OatFileAssistant::DexOptTrigger dexopt_trigger,
+ bool expected_dexopt_needed,
+ bool expected_is_vdex_usable,
+ OatFileAssistant::Location expected_location) {
+ OatFileAssistant::DexOptStatus status;
+ EXPECT_EQ(
+ assistant->GetDexOptNeeded(compiler_filter, dexopt_trigger, &status),
+ expected_dexopt_needed);
+ EXPECT_EQ(status.IsVdexUsable(), expected_is_vdex_usable);
+ EXPECT_EQ(status.GetLocation(), expected_location);
+ }
+
+ // Verifies all versions of `GetDexOptNeeded` with the default dexopt trigger.
+ void VerifyGetDexOptNeededDefault(OatFileAssistant* assistant,
+ CompilerFilter::Filter compiler_filter,
+ bool expected_dexopt_needed,
+ bool expected_is_vdex_usable,
+ OatFileAssistant::Location expected_location,
+ int expected_legacy_result) {
+ // Verify the current version (called from artd).
+ VerifyGetDexOptNeeded(assistant,
+ compiler_filter,
+ default_trigger_,
+ expected_dexopt_needed,
+ expected_is_vdex_usable,
+ expected_location);
+
+ // Verify the legacy version (called from PM).
+ EXPECT_EQ(
+ assistant->GetDexOptNeeded(compiler_filter, /*profile_changed=*/false, /*downgrade=*/false),
+ expected_legacy_result);
}
static std::unique_ptr<ClassLoaderContext> InitializeDefaultContext() {
@@ -115,7 +167,58 @@ class OatFileAssistantTest : public DexoptTest {
return context;
}
+ // Temporarily disables the pointer to the current runtime if `with_runtime_` is false.
+ // Essentially simulates an environment where there is no active runtime.
+ android::base::ScopeGuard<std::function<void()>> ScopedMaybeWithoutRuntime() {
+ if (!with_runtime_) {
+ Runtime::TestOnlySetCurrent(nullptr);
+ }
+ return android::base::make_scope_guard(
+ [this]() { Runtime::TestOnlySetCurrent(runtime_.get()); });
+ }
+
+ std::unique_ptr<OatFileAssistantContext> CreateOatFileAssistantContext() {
+ return std::make_unique<OatFileAssistantContext>(
+ std::make_unique<OatFileAssistantContext::RuntimeOptions>(
+ OatFileAssistantContext::RuntimeOptions{
+ .image_locations = runtime_->GetImageLocations(),
+ .boot_class_path = runtime_->GetBootClassPath(),
+ .boot_class_path_locations = runtime_->GetBootClassPathLocations(),
+ .boot_class_path_fds = !runtime_->GetBootClassPathFds().empty() ?
+ &runtime_->GetBootClassPathFds() :
+ nullptr,
+ .deny_art_apex_data_files = runtime_->DenyArtApexDataFiles(),
+ }));
+ }
+
+ OatFileAssistantContext* MaybeGetOatFileAssistantContext() {
+ return with_runtime_ ? nullptr : ofa_context_.get();
+ }
+
+ // A helper function to create OatFileAssistant with some default arguments.
+ OatFileAssistant CreateOatFileAssistant(const char* dex_location,
+ ClassLoaderContext* context = nullptr,
+ bool load_executable = false,
+ int vdex_fd = -1,
+ int oat_fd = -1,
+ int zip_fd = -1) {
+ return OatFileAssistant(dex_location,
+ kRuntimeISA,
+ context != nullptr ? context : default_context_.get(),
+ load_executable,
+ /*only_load_trusted_executable=*/false,
+ MaybeGetOatFileAssistantContext(),
+ vdex_fd,
+ oat_fd,
+ zip_fd);
+ }
+
std::unique_ptr<ClassLoaderContext> default_context_ = InitializeDefaultContext();
+ bool with_runtime_;
+ const OatFileAssistant::DexOptTrigger default_trigger_{.targetFilterIsBetter = true,
+ .primaryBootImageBecomesUsable = true};
+ std::unique_ptr<OatFileAssistantContext> ofa_context_;
+ std::vector<std::unique_ptr<const DexFile>> opened_dex_files_;
};
class ScopedNonWritable {
@@ -154,7 +257,7 @@ static bool IsExecutedAsRoot() {
// Case: We have a MultiDEX file and up-to-date ODEX file for it with relative
// encoded dex locations.
// Expect: The oat file status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
+TEST_P(OatFileAssistantTest, RelativeEncodedDexLocation) {
std::string dex_location = GetScratchDir() + "/RelativeEncodedDexLocation.jar";
std::string odex_location = GetOdexDir() + "/RelativeEncodedDexLocation.odex";
@@ -172,21 +275,24 @@ TEST_F(OatFileAssistantTest, RelativeEncodedDexLocation) {
std::string error_msg;
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Verify we can load both dex files.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(2u, dex_files.size());
}
-TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
+TEST_P(OatFileAssistantTest, MakeUpToDateWithContext) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string odex_location = GetOdexDir() + "/TestDex.odex";
std::string context_location = GetScratchDir() + "/ContextDex.jar";
@@ -198,8 +304,6 @@ TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
ASSERT_TRUE(context != nullptr);
ASSERT_TRUE(context->OpenDexFiles());
- OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, context.get(), false);
-
std::string error_msg;
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
@@ -207,6 +311,10 @@ TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
args.push_back("--class-loader-context=" + context_str);
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(), context.get());
+
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_NE(nullptr, oat_file.get());
ASSERT_NE(nullptr, oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
@@ -214,7 +322,7 @@ TEST_F(OatFileAssistantTest, MakeUpToDateWithContext) {
oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kClassPathKey));
}
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string odex_location = GetOdexDir() + "/TestDex.odex";
std::string context_location = GetScratchDir() + "/ContextDex.jar";
@@ -228,11 +336,6 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
std::vector<int> context_fds;
ASSERT_TRUE(relative_context->OpenDexFiles(GetScratchDir(), context_fds));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- relative_context.get(),
- false);
-
std::string error_msg;
std::vector<std::string> args;
args.push_back("--dex-file=" + dex_location);
@@ -240,29 +343,53 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithUpToDateContextRelative) {
args.push_back("--class-loader-context=PCL[" + context_location + "]");
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kDefaultCompilerFilter));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant =
+ CreateOatFileAssistant(dex_location.c_str(), relative_context.get());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kDefaultCompilerFilter,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
}
// Case: We have a DEX file, but no OAT file for it.
// Expect: The status is kDex2OatNeeded.
-TEST_F(OatFileAssistantTest, DexNoOat) {
+TEST_P(OatFileAssistantTest, DexNoOat) {
std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
Copy(GetDexSrc1(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
-
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile));
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -270,25 +397,24 @@ TEST_F(OatFileAssistantTest, DexNoOat) {
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- "run-from-apk",
- "unknown",
- "io-error-no-oat");
+ dex_location, default_context_.get(), "run-from-apk", "unknown", "io-error-no-oat");
}
// Case: We have no DEX file and no OAT file.
// Expect: Status is kNoDexOptNeeded. Loading should fail, but not crash.
-TEST_F(OatFileAssistantTest, NoDexNoOat) {
+TEST_P(OatFileAssistantTest, NoDexNoOat) {
std::string dex_location = GetScratchDir() + "/NoDexNoOat.jar";
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_FALSE(oat_file_assistant.HasDexFiles());
// Trying to get the best oat file should fail, but not crash.
@@ -298,27 +424,41 @@ TEST_F(OatFileAssistantTest, NoDexNoOat) {
// Case: We have a DEX file and an ODEX file, but no OAT file.
// Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, OdexUpToDate) {
+TEST_P(OatFileAssistantTest, OdexUpToDate) {
std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
- // Force the use of oat location by making the dex parent not writable.
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- /*load_executable=*/ false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ // Force the use of oat location by making the dex parent not writable.
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -326,39 +466,50 @@ TEST_F(OatFileAssistantTest, OdexUpToDate) {
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- CompilerFilter::kSpeed,
- "install",
- "up-to-date");
+ dex_location, default_context_.get(), CompilerFilter::kSpeed, "install", "up-to-date");
}
// Case: We have an ODEX file compiled against partial boot image.
// Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, OdexUpToDatePartialBootImage) {
+TEST_P(OatFileAssistantTest, OdexUpToDatePartialBootImage) {
std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
// Insert an extra dex file to the boot class path.
- InsertNewBootClasspathEntry();
+ std::string error_msg;
+ ASSERT_TRUE(InsertNewBootClasspathEntry(GetMultiDexSrc1(), &error_msg)) << error_msg;
- // Force the use of oat location by making the dex parent not writable.
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- /*load_executable=*/ false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ // Force the use of oat location by making the dex parent not writable.
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -366,17 +517,13 @@ TEST_F(OatFileAssistantTest, OdexUpToDatePartialBootImage) {
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- CompilerFilter::kSpeed,
- "install",
- "up-to-date");
+ dex_location, default_context_.get(), CompilerFilter::kSpeed, "install", "up-to-date");
}
// Case: We have a DEX file and a PIC ODEX file, but no OAT file. We load the dex
// file via a symlink.
// Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, OdexUpToDateSymLink) {
+TEST_P(OatFileAssistantTest, OdexUpToDateSymLink) {
std::string scratch_dir = GetScratchDir();
std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
@@ -389,19 +536,34 @@ TEST_F(OatFileAssistantTest, OdexUpToDateSymLink) {
ASSERT_EQ(0, symlink(scratch_dir.c_str(), link.c_str()));
dex_location = link + "/OdexUpToDate.jar";
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
-
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -411,7 +573,7 @@ TEST_F(OatFileAssistantTest, OdexUpToDateSymLink) {
// Case: We have a DEX file and up-to-date OAT file for it.
// Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, OatUpToDate) {
+TEST_P(OatFileAssistantTest, OatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -426,19 +588,34 @@ TEST_F(OatFileAssistantTest, OatUpToDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
-
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -446,16 +623,12 @@ TEST_F(OatFileAssistantTest, OatUpToDate) {
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- CompilerFilter::kSpeed,
- "unknown",
- "up-to-date");
+ dex_location, default_context_.get(), CompilerFilter::kSpeed, "unknown", "up-to-date");
}
// Case: Passing valid file descriptors of updated odex/vdex files along with the dex file.
// Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
@@ -470,22 +643,38 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false,
- false,
- vdex_fd.get(),
- odex_fd.get(),
- zip_fd.get());
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/false,
+ vdex_fd.get(),
+ odex_fd.get(),
+ zip_fd.get());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -495,7 +684,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithFd) {
// Case: Passing invalid odex fd and valid vdex and zip fds.
// Expect: The status should be kDex2OatForBootImage.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
std::string vdex_location = GetScratchDir() + "/OatUpToDate.vdex";
@@ -509,20 +698,32 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
android::base::unique_fd vdex_fd(open(vdex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false,
- false,
- vdex_fd.get(),
- /* oat_fd= */ -1,
- zip_fd.get());
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/false,
+ vdex_fd.get(),
+ /*oat_fd=*/-1,
+ zip_fd.get());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kEverything,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -532,7 +733,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexFd) {
// Case: Passing invalid vdex fd and valid odex and zip fds.
// Expect: The status should be kDex2OatFromScratch.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
std::string odex_location = GetScratchDir() + "/OatUpToDate.odex";
@@ -545,17 +746,21 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
android::base::unique_fd odex_fd(open(odex_location.c_str(), O_RDONLY | O_CLOEXEC));
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false,
- false,
- /* vdex_fd= */ -1,
- odex_fd.get(),
- zip_fd.get());
-
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/false,
+ /*vdex_fd=*/-1,
+ odex_fd.get(),
+ zip_fd.get());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
@@ -564,29 +769,34 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidVdexFd) {
// Case: Passing invalid vdex and odex fd with valid zip fd.
// Expect: The status is kDex2oatFromScratch.
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithInvalidOdexVdexFd) {
std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
Copy(GetDexSrc1(), dex_location);
android::base::unique_fd zip_fd(open(dex_location.c_str(), O_RDONLY | O_CLOEXEC));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false,
- false,
- /* vdex_fd= */ -1,
- /* oat_fd= */ -1,
- zip_fd);
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/false,
+ /*vdex_fd=*/-1,
+ /*oat_fd=*/-1,
+ zip_fd);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
}
// Case: We have a DEX file and up-to-date VDEX file for it, but no
// ODEX file.
-TEST_F(OatFileAssistantTest, VdexUpToDateNoOdex) {
+TEST_P(OatFileAssistantTest, VdexUpToDateNoOdex) {
std::string dex_location = GetScratchDir() + "/VdexUpToDateNoOdex.jar";
std::string odex_location = GetOdexDir() + "/VdexUpToDateNoOdex.oat";
@@ -597,30 +807,32 @@ TEST_F(OatFileAssistantTest, VdexUpToDateNoOdex) {
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
ASSERT_EQ(0, unlink(odex_location.c_str()));
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
// Make sure we don't crash in this case when we dump the status. We don't
// care what the actual dumped value is.
oat_file_assistant.GetStatusDump();
- VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- "verify",
- "vdex",
- "up-to-date");
+ VerifyOptimizationStatus(dex_location, default_context_.get(), "verify", "vdex", "up-to-date");
}
// Case: We have a DEX file and empty VDEX and ODEX files.
-TEST_F(OatFileAssistantTest, EmptyVdexOdex) {
+TEST_P(OatFileAssistantTest, EmptyVdexOdex) {
std::string dex_location = GetScratchDir() + "/EmptyVdexOdex.jar";
std::string odex_location = GetOdexDir() + "/EmptyVdexOdex.oat";
std::string vdex_location = GetOdexDir() + "/EmptyVdexOdex.vdex";
@@ -629,17 +841,20 @@ TEST_F(OatFileAssistantTest, EmptyVdexOdex) {
ScratchFile vdex_file(vdex_location.c_str());
ScratchFile odex_file(odex_location.c_str());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
}
// Case: We have a DEX file and up-to-date (OAT) VDEX file for it, but no OAT
// file.
-TEST_F(OatFileAssistantTest, VdexUpToDateNoOat) {
+TEST_P(OatFileAssistantTest, VdexUpToDateNoOat) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -650,7 +865,8 @@ TEST_F(OatFileAssistantTest, VdexUpToDateNoOat) {
std::string oat_location;
std::string error_msg;
ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
- dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
+ dex_location, kRuntimeISA, /* deny_art_apex_data_files= */false, &oat_location, &error_msg))
+ << error_msg;
Copy(GetDexSrc1(), dex_location);
GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
@@ -658,19 +874,23 @@ TEST_F(OatFileAssistantTest, VdexUpToDateNoOat) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatForFilter);
}
// Case: We have a DEX file and speed-profile OAT file for it.
// Expect: The status is kNoDexOptNeeded if the profile hasn't changed, but
// kDex2Oat if the profile has changed.
-TEST_F(OatFileAssistantTest, ProfileOatUpToDate) {
+TEST_P(OatFileAssistantTest, ProfileOatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -684,19 +904,52 @@ TEST_F(OatFileAssistantTest, ProfileOatUpToDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
-
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ default_trigger_,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat);
+ EXPECT_EQ(
+ OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, /*profile_changed=*/false));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ default_trigger_,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat);
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile, false));
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerify, /*profile_changed=*/false));
+
+ OatFileAssistant::DexOptTrigger profile_changed_trigger = default_trigger_;
+ profile_changed_trigger.targetFilterIsSame = true;
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ profile_changed_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat);
+ EXPECT_EQ(
+ OatFileAssistant::kDex2OatForFilter,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, /*profile_changed=*/true));
+
+ // We should not recompile even if `profile_changed` is true because the compiler filter should
+ // not be downgraded.
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ profile_changed_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat);
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify, false));
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile, true));
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify, true));
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerify, /*profile_changed=*/true));
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -706,7 +959,7 @@ TEST_F(OatFileAssistantTest, ProfileOatUpToDate) {
// Case: We have a MultiDEX file and up-to-date OAT file for it.
// Expect: The status is kNoDexOptNeeded and we load all dex files.
-TEST_F(OatFileAssistantTest, MultiDexOatUpToDate) {
+TEST_P(OatFileAssistantTest, MultiDexOatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -720,18 +973,25 @@ TEST_F(OatFileAssistantTest, MultiDexOatUpToDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
// Verify we can load both dex files.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(2u, dex_files.size());
@@ -739,7 +999,7 @@ TEST_F(OatFileAssistantTest, MultiDexOatUpToDate) {
// Case: We have a MultiDEX file where the non-main multdex entry is out of date.
// Expect: The status is kDex2OatNeeded.
-TEST_F(OatFileAssistantTest, MultiDexNonMainOutOfDate) {
+TEST_P(OatFileAssistantTest, MultiDexNonMainOutOfDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -759,18 +1019,21 @@ TEST_F(OatFileAssistantTest, MultiDexNonMainOutOfDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
}
// Case: We have a DEX file and an OAT file out of date with respect to the
// dex checksum.
-TEST_F(OatFileAssistantTest, OatDexOutOfDate) {
+TEST_P(OatFileAssistantTest, OatDexOutOfDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -788,14 +1051,21 @@ TEST_F(OatFileAssistantTest, OatDexOutOfDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -803,16 +1073,12 @@ TEST_F(OatFileAssistantTest, OatDexOutOfDate) {
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- "run-from-apk-fallback",
- "unknown",
- "apk-more-recent");
+ dex_location, default_context_.get(), "run-from-apk-fallback", "unknown", "apk-more-recent");
}
// Case: We have a DEX file and an (ODEX) VDEX file out of date with respect
// to the dex checksum, but no ODEX file.
-TEST_F(OatFileAssistantTest, VdexDexOutOfDate) {
+TEST_P(OatFileAssistantTest, VdexDexOutOfDate) {
std::string dex_location = GetScratchDir() + "/VdexDexOutOfDate.jar";
std::string odex_location = GetOdexDir() + "/VdexDexOutOfDate.oat";
@@ -821,18 +1087,21 @@ TEST_F(OatFileAssistantTest, VdexDexOutOfDate) {
ASSERT_EQ(0, unlink(odex_location.c_str()));
Copy(GetDexSrc2(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
}
// Case: We have a MultiDEX (ODEX) VDEX file where the non-main multidex entry
// is out of date and there is no corresponding ODEX file.
-TEST_F(OatFileAssistantTest, VdexMultiDexNonMainOutOfDate) {
+TEST_P(OatFileAssistantTest, VdexMultiDexNonMainOutOfDate) {
std::string dex_location = GetScratchDir() + "/VdexMultiDexNonMainOutOfDate.jar";
std::string odex_location = GetOdexDir() + "/VdexMultiDexNonMainOutOfDate.odex";
@@ -841,18 +1110,21 @@ TEST_F(OatFileAssistantTest, VdexMultiDexNonMainOutOfDate) {
ASSERT_EQ(0, unlink(odex_location.c_str()));
Copy(GetMultiDexSrc2(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
}
// Case: We have a DEX file and an OAT file out of date with respect to the
// boot image.
-TEST_F(OatFileAssistantTest, OatImageOutOfDate) {
+TEST_P(OatFileAssistantTest, OatImageOutOfDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -869,35 +1141,70 @@ TEST_F(OatFileAssistantTest, OatImageOutOfDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OatFileStatus());
EXPECT_TRUE(oat_file_assistant.HasDexFiles());
+ VerifyOptimizationStatus(dex_location, default_context_.get(), "verify", "vdex", "up-to-date");
+}
+
+TEST_P(OatFileAssistantTest, OatContextOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+
+ std::string context_location = GetScratchDir() + "/ContextDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+ Copy(GetDexSrc2(), context_location);
+
+ std::string error_msg;
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ args.push_back("--class-loader-context=PCL[" + context_location + "]");
+ ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
+ // Update the context by overriding the jar file.
+ Copy(GetMultiDexSrc2(), context_location);
+
+ std::unique_ptr<ClassLoaderContext> context =
+ ClassLoaderContext::Create("PCL[" + context_location + "]");
+ ASSERT_TRUE(context != nullptr);
+ ASSERT_TRUE(context->OpenDexFiles());
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
VerifyOptimizationStatus(
- &oat_file_assistant,
- dex_location,
- "verify",
- "vdex",
- "up-to-date");
+ dex_location.c_str(), context.get(), "verify", "vdex", "up-to-date", /*check_context=*/true);
}
// Case: We have a DEX file and a verify-at-runtime OAT file out of date with
// respect to the boot image.
// It shouldn't matter that the OAT file is out of date, because it is
// verify-at-runtime.
-TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) {
+TEST_P(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -914,14 +1221,21 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOat,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -930,7 +1244,7 @@ TEST_F(OatFileAssistantTest, OatVerifyAtRuntimeImageOutOfDate) {
}
// Case: We have a DEX file and an ODEX file, but no OAT file.
-TEST_F(OatFileAssistantTest, DexOdexNoOat) {
+TEST_P(OatFileAssistantTest, DexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/DexOdexNoOat.odex";
@@ -938,16 +1252,23 @@ TEST_F(OatFileAssistantTest, DexOdexNoOat) {
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
- // Verify the status.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ // Verify the status.
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -961,31 +1282,46 @@ TEST_F(OatFileAssistantTest, DexOdexNoOat) {
// Case: We have a resource-only DEX file, no ODEX file and no
// OAT file. Expect: The status is kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, ResourceOnlyDex) {
+TEST_P(OatFileAssistantTest, ResourceOnlyDex) {
std::string dex_location = GetScratchDir() + "/ResourceOnlyDex.jar";
Copy(GetResourceOnlySrc1(), dex_location);
- // Verify the status.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kVerify));
+ // Verify the status.
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
EXPECT_FALSE(oat_file_assistant.HasDexFiles());
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -995,7 +1331,7 @@ TEST_F(OatFileAssistantTest, ResourceOnlyDex) {
// Case: We have a DEX file, an ODEX file and an OAT file.
// Expect: It shouldn't crash. We should load the odex file executable.
-TEST_F(OatFileAssistantTest, OdexOatOverlap) {
+TEST_P(OatFileAssistantTest, OdexOatOverlap) {
std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
std::string odex_location = GetOdexDir() + "/OdexOatOverlap.odex";
@@ -1004,14 +1340,19 @@ TEST_F(OatFileAssistantTest, OdexOatOverlap) {
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Verify things don't go bad.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -1021,7 +1362,9 @@ TEST_F(OatFileAssistantTest, OdexOatOverlap) {
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1029,7 +1372,7 @@ TEST_F(OatFileAssistantTest, OdexOatOverlap) {
// Case: We have a DEX file and a VerifyAtRuntime ODEX file, but no OAT file.
// Expect: The status is kNoDexOptNeeded, because VerifyAtRuntime contains no code.
-TEST_F(OatFileAssistantTest, DexVerifyAtRuntimeOdexNoOat) {
+TEST_P(OatFileAssistantTest, DexVerifyAtRuntimeOdexNoOat) {
std::string dex_location = GetScratchDir() + "/DexVerifyAtRuntimeOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/DexVerifyAtRuntimeOdexNoOat.odex";
@@ -1037,16 +1380,23 @@ TEST_F(OatFileAssistantTest, DexVerifyAtRuntimeOdexNoOat) {
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kExtract);
- // Verify the status.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ // Verify the status.
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -1056,7 +1406,7 @@ TEST_F(OatFileAssistantTest, DexVerifyAtRuntimeOdexNoOat) {
// Case: We have a DEX file and up-to-date OAT file for it.
// Expect: We should load an executable dex file.
-TEST_F(OatFileAssistantTest, LoadOatUpToDate) {
+TEST_P(OatFileAssistantTest, LoadOatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -1071,15 +1421,18 @@ TEST_F(OatFileAssistantTest, LoadOatUpToDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Load the oat using an oat file assistant.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1087,7 +1440,7 @@ TEST_F(OatFileAssistantTest, LoadOatUpToDate) {
// Case: We have a DEX file and up-to-date quicken OAT file for it.
// Expect: We should still load the oat file as executable.
-TEST_F(OatFileAssistantTest, LoadExecInterpretOnlyOatUpToDate) {
+TEST_P(OatFileAssistantTest, LoadExecInterpretOnlyOatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -1102,15 +1455,18 @@ TEST_F(OatFileAssistantTest, LoadExecInterpretOnlyOatUpToDate) {
ScopedNonWritable scoped_non_writable(dex_location);
ASSERT_TRUE(scoped_non_writable.IsSuccessful());
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Load the oat using an oat file assistant.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1118,7 +1474,7 @@ TEST_F(OatFileAssistantTest, LoadExecInterpretOnlyOatUpToDate) {
// Case: We have a DEX file and up-to-date OAT file for it.
// Expect: Loading non-executable should load the oat non-executable.
-TEST_F(OatFileAssistantTest, LoadNoExecOatUpToDate) {
+TEST_P(OatFileAssistantTest, LoadNoExecOatUpToDate) {
if (IsExecutedAsRoot()) {
// We cannot simulate non writable locations when executed as root: b/38000545.
LOG(ERROR) << "Test skipped because it's running as root";
@@ -1134,15 +1490,18 @@ TEST_F(OatFileAssistantTest, LoadNoExecOatUpToDate) {
GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Load the oat using an oat file assistant.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_FALSE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1186,36 +1545,42 @@ static std::string MakePathRelative(const std::string& target) {
// Case: Non-absolute path to Dex location.
// Expect: Not sure, but it shouldn't crash.
-TEST_F(OatFileAssistantTest, NonAbsoluteDexLocation) {
+TEST_P(OatFileAssistantTest, NonAbsoluteDexLocation) {
std::string abs_dex_location = GetScratchDir() + "/NonAbsoluteDexLocation.jar";
Copy(GetDexSrc1(), abs_dex_location);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
std::string dex_location = MakePathRelative(abs_dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
}
// Case: Very short, non-existent Dex location.
// Expect: kNoDexOptNeeded.
-TEST_F(OatFileAssistantTest, ShortDexLocation) {
+TEST_P(OatFileAssistantTest, ShortDexLocation) {
std::string dex_location = "/xx";
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
EXPECT_FALSE(oat_file_assistant.HasDexFiles());
@@ -1223,17 +1588,20 @@ TEST_F(OatFileAssistantTest, ShortDexLocation) {
// Case: Non-standard extension for dex file.
// Expect: The status is kDex2OatNeeded.
-TEST_F(OatFileAssistantTest, LongDexExtension) {
+TEST_P(OatFileAssistantTest, LongDexExtension) {
std::string dex_location = GetScratchDir() + "/LongDexExtension.jarx";
Copy(GetDexSrc1(), dex_location);
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
- EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError,
+ /*expected_legacy_result=*/OatFileAssistant::kDex2OatFromScratch);
EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -1243,7 +1611,7 @@ TEST_F(OatFileAssistantTest, LongDexExtension) {
// A task to generate a dex location. Used by the RaceToGenerate test.
class RaceGenerateTask : public Task {
public:
- RaceGenerateTask(OatFileAssistantTest& test,
+ RaceGenerateTask(OatFileAssistantBaseTest& test,
const std::string& dex_location,
const std::string& oat_location,
Mutex* lock)
@@ -1251,8 +1619,7 @@ class RaceGenerateTask : public Task {
dex_location_(dex_location),
oat_location_(oat_location),
lock_(lock),
- loaded_oat_file_(nullptr)
- {}
+ loaded_oat_file_(nullptr) {}
void Run(Thread* self ATTRIBUTE_UNUSED) override {
// Load the dex files, and save a pointer to the loaded oat file, so that
@@ -1288,7 +1655,7 @@ class RaceGenerateTask : public Task {
}
private:
- OatFileAssistantTest& test_;
+ OatFileAssistantBaseTest& test_;
std::string dex_location_;
std::string oat_location_;
Mutex* lock_;
@@ -1297,7 +1664,7 @@ class RaceGenerateTask : public Task {
// Test the case where dex2oat invocations race with multiple processes trying to
// load the oat file.
-TEST_F(OatFileAssistantTest, RaceToGenerate) {
+TEST_F(OatFileAssistantBaseTest, RaceToGenerate) {
std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
std::string oat_location = GetOdexDir() + "/RaceToGenerate.oat";
@@ -1309,7 +1676,7 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
// take a while to generate.
Copy(GetLibCoreDexFileNames()[0], dex_location);
- const size_t kNumThreads = 32;
+ const size_t kNumThreads = 16;
Thread* self = Thread::Current();
ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
@@ -1336,7 +1703,7 @@ TEST_F(OatFileAssistantTest, RaceToGenerate) {
// Case: We have a DEX file and an ODEX file, and no OAT file,
// Expect: We should load the odex file executable.
-TEST_F(OatFileAssistantTest, LoadDexOdexNoOat) {
+TEST_P(OatFileAssistantTest, LoadDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadDexOdexNoOat.odex";
@@ -1344,15 +1711,18 @@ TEST_F(OatFileAssistantTest, LoadDexOdexNoOat) {
Copy(GetDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Load the oat using an executable oat file assistant.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(1u, dex_files.size());
@@ -1360,7 +1730,7 @@ TEST_F(OatFileAssistantTest, LoadDexOdexNoOat) {
// Case: We have a MultiDEX file and an ODEX file, and no OAT file.
// Expect: We should load the odex file executable.
-TEST_F(OatFileAssistantTest, LoadMultiDexOdexNoOat) {
+TEST_P(OatFileAssistantTest, LoadMultiDexOdexNoOat) {
std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
std::string odex_location = GetOdexDir() + "/LoadMultiDexOdexNoOat.odex";
@@ -1368,15 +1738,18 @@ TEST_F(OatFileAssistantTest, LoadMultiDexOdexNoOat) {
Copy(GetMultiDexSrc1(), dex_location);
GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
// Load the oat using an executable oat file assistant.
- OatFileAssistant oat_file_assistant(dex_location.c_str(),
- kRuntimeISA,
- default_context_.get(),
- true);
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str(),
+ /*context=*/nullptr,
+ /*load_executable=*/true);
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
- EXPECT_TRUE(oat_file->IsExecutable());
+ if (with_runtime_) {
+ EXPECT_TRUE(oat_file->IsExecutable());
+ }
std::vector<std::unique_ptr<const DexFile>> dex_files;
dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
EXPECT_EQ(2u, dex_files.size());
@@ -1402,7 +1775,7 @@ TEST(OatFileAssistantUtilsTest, DexLocationToOdexFilename) {
// Verify the dexopt status values from dalvik.system.DexFile
// match the OatFileAssistant::DexOptStatus values.
-TEST_F(OatFileAssistantTest, DexOptStatusValues) {
+TEST_F(OatFileAssistantBaseTest, DexOptStatusValues) {
std::pair<OatFileAssistant::DexOptNeeded, const char*> mapping[] = {
{OatFileAssistant::kNoDexOptNeeded, "NO_DEXOPT_NEEDED"},
{OatFileAssistant::kDex2OatFromScratch, "DEX2OAT_FROM_SCRATCH"},
@@ -1426,7 +1799,7 @@ TEST_F(OatFileAssistantTest, DexOptStatusValues) {
}
}
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string odex_location = GetOdexDir() + "/TestDex.odex";
@@ -1455,43 +1828,321 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
ASSERT_TRUE(updated_context != nullptr);
std::vector<int> context_fds;
ASSERT_TRUE(updated_context->OpenDexFiles("", context_fds, /*only_read_checksums*/ true));
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, updated_context.get(), false);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant =
+ CreateOatFileAssistant(dex_location.c_str(), updated_context.get());
// DexOptNeeded should advise compilation for filter when the context changes.
- EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kDefaultCompilerFilter));
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kDefaultCompilerFilter,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/-OatFileAssistant::kDex2OatForFilter);
}
{
std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
ASSERT_TRUE(updated_context != nullptr);
std::vector<int> context_fds;
- ASSERT_TRUE(updated_context->OpenDexFiles("", context_fds, /*only_read_checksums*/ true));
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, updated_context.get(), false);
- // Now check that DexOptNeeded does not advise compilation if we only extracted the file.
+ ASSERT_TRUE(updated_context->OpenDexFiles("", context_fds, /*only_read_checksums*/ true));
args.push_back("--compiler-filter=extract");
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant =
+ CreateOatFileAssistant(dex_location.c_str(), updated_context.get());
+ // Now check that DexOptNeeded does not advise compilation if we only extracted the file.
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
}
{
std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
ASSERT_TRUE(updated_context != nullptr);
std::vector<int> context_fds;
- ASSERT_TRUE(updated_context->OpenDexFiles("", context_fds, /*only_read_checksums*/ true));
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, updated_context.get(), false);
- // Now check that DexOptNeeded does not advise compilation if we only verify the file.
+ ASSERT_TRUE(updated_context->OpenDexFiles("", context_fds, /*only_read_checksums*/ true));
args.push_back("--compiler-filter=verify");
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
- GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant =
+ CreateOatFileAssistant(dex_location.c_str(), updated_context.get());
+ // Now check that DexOptNeeded does not advise compilation if we only verify the file.
+ VerifyGetDexOptNeededDefault(&oat_file_assistant,
+ CompilerFilter::kExtract,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex,
+ /*expected_legacy_result=*/OatFileAssistant::kNoDexOptNeeded);
}
}
+// Case: We have a DEX file and speed-profile ODEX file for it. The caller's intention is to
+// downgrade the compiler filter.
+// Expect: Dexopt should be performed only if the target compiler filter is worse than the current
+// one.
+TEST_P(OatFileAssistantTest, Downgrade) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ OatFileAssistant::DexOptTrigger downgrade_trigger{.targetFilterIsWorse = true};
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeed, /*profile_changed=*/false, /*downgrade=*/true));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeedProfile, /*profile_changed=*/false, /*downgrade=*/true));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kVerify, /*profile_changed=*/false, /*downgrade=*/true));
+}
+
+// Case: We have a DEX file but we don't have an ODEX file for it. The caller's intention is to
+// downgrade the compiler filter.
+// Expect: Dexopt should never be performed regardless of the target compiler filter.
+TEST_P(OatFileAssistantTest, DowngradeNoOdex) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ OatFileAssistant::DexOptTrigger downgrade_trigger{.targetFilterIsWorse = true};
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeed, /*profile_changed=*/false, /*downgrade=*/true));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeedProfile, /*profile_changed=*/false, /*downgrade=*/true));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ downgrade_trigger,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kVerify, /*profile_changed=*/false, /*downgrade=*/true));
+}
+
+// Case: We have a DEX file and speed-profile ODEX file for it. The legacy version is called with
+// both `profile_changed` and `downgrade` being true. This won't happen in the real case. Just to be
+// complete.
+// Expect: The behavior should be as `profile_changed` is false and `downgrade` is true.
+TEST_P(OatFileAssistantTest, ProfileChangedDowngrade) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeed, /*profile_changed=*/true, /*downgrade=*/true));
+
+ EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kSpeedProfile, /*profile_changed=*/true, /*downgrade=*/true));
+
+ EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
+ oat_file_assistant.GetDexOptNeeded(
+ CompilerFilter::kVerify, /*profile_changed=*/true, /*downgrade=*/true));
+}
+
+// Case: We have a DEX file and speed-profile ODEX file for it. The caller's intention is to force
+// the compilation.
+// Expect: Dexopt should be performed regardless of the target compiler filter. The VDEX file is
+// usable.
+//
+// The legacy version does not support this case. Historically, Package Manager does not take the
+// result from OatFileAssistant for forced compilation. It uses an arbitrary non-zero value instead.
+// Therefore, we don't test the legacy version here.
+TEST_P(OatFileAssistantTest, Force) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeedProfile);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ OatFileAssistant::DexOptTrigger force_trigger{.targetFilterIsBetter = true,
+ .targetFilterIsSame = true,
+ .targetFilterIsWorse = true,
+ .primaryBootImageBecomesUsable = true};
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationOdex);
+}
+
+// Case: We have a DEX file but we don't have an ODEX file for it. The caller's intention is to
+// force the compilation.
+// Expect: Dexopt should be performed regardless of the target compiler filter. No VDEX file is
+// usable.
+//
+// The legacy version does not support this case. Historically, Package Manager does not take the
+// result from OatFileAssistant for forced compilation. It uses an arbitrary non-zero value instead.
+// Therefore, we don't test the legacy version here.
+TEST_P(OatFileAssistantTest, ForceNoOdex) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+ OatFileAssistant::DexOptTrigger force_trigger{.targetFilterIsBetter = true,
+ .targetFilterIsSame = true,
+ .targetFilterIsWorse = true,
+ .primaryBootImageBecomesUsable = true};
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ force_trigger,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/false,
+ /*expected_location=*/OatFileAssistant::kLocationNoneOrError);
+}
+
+// Case: We have a DEX file and a DM file for it.
+// Expect: Dexopt should be performed if the compiler filter is better than "verify". The location
+// should be kLocationDm.
+//
+// The legacy version should return kDex2OatFromScratch if the target compiler filter is better than
+// "verify".
+TEST_P(OatFileAssistantTest, DmUpToDate) {
+ std::string dex_location = GetScratchDir() + "/TestDex.jar";
+ std::string dm_location = GetScratchDir() + "/TestDex.dm";
+ std::string odex_location = GetOdexDir() + "/TestDex.odex";
+ std::string vdex_location = GetOdexDir() + "/TestDex.vdex";
+ Copy(GetDexSrc1(), dex_location);
+
+ // Generate temporary ODEX and VDEX files in order to create the DM file from.
+ GenerateOdexForTest(
+ dex_location, odex_location, CompilerFilter::kVerify, "install", {"--copy-dex-files=false"});
+
+ CreateDexMetadata(vdex_location, dm_location);
+
+ // Cleanup the temporary files.
+ ASSERT_EQ(0, unlink(odex_location.c_str()));
+ ASSERT_EQ(0, unlink(vdex_location.c_str()));
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeed,
+ default_trigger_,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationDm);
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kSpeedProfile,
+ default_trigger_,
+ /*expected_dexopt_needed=*/true,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationDm);
+ EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile));
+
+ VerifyGetDexOptNeeded(&oat_file_assistant,
+ CompilerFilter::kVerify,
+ default_trigger_,
+ /*expected_dexopt_needed=*/false,
+ /*expected_is_vdex_usable=*/true,
+ /*expected_location=*/OatFileAssistant::kLocationDm);
+ EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+ oat_file_assistant.GetDexOptNeeded(CompilerFilter::kVerify));
+}
+
// Test that GetLocation of a dex file is the same whether the dex
// filed is backed by an oat file or not.
-TEST_F(OatFileAssistantTest, GetDexLocation) {
+TEST_F(OatFileAssistantBaseTest, GetDexLocation) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string oat_location = GetOdexDir() + "/TestDex.odex";
std::string art_location = GetOdexDir() + "/TestDex.art";
@@ -1539,7 +2190,7 @@ TEST_F(OatFileAssistantTest, GetDexLocation) {
// Test that a dex file on the platform location gets the right hiddenapi domain,
// regardless of whether it has a backing oat file.
-TEST_F(OatFileAssistantTest, SystemFrameworkDir) {
+TEST_F(OatFileAssistantBaseTest, SystemFrameworkDir) {
std::string filebase = "OatFileAssistantTestSystemFrameworkDir";
std::string dex_location = GetAndroidRoot() + "/framework/" + filebase + ".jar";
Copy(GetDexSrc1(), dex_location);
@@ -1619,7 +2270,7 @@ TEST_F(OatFileAssistantTest, SystemFrameworkDir) {
}
// Make sure OAT files that require app images are not loaded as executable.
-TEST_F(OatFileAssistantTest, LoadOatNoArt) {
+TEST_F(OatFileAssistantBaseTest, LoadOatNoArt) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string odex_location = GetOdexDir() + "/TestDex.odex";
std::string art_location = GetOdexDir() + "/TestDex.art";
@@ -1653,7 +2304,7 @@ TEST_F(OatFileAssistantTest, LoadOatNoArt) {
EXPECT_FALSE(oat_file->IsExecutable());
}
-TEST_F(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
+TEST_P(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
std::string dex_location = GetScratchDir() + "/TestDex.jar";
std::string odex_location = GetOdexDir() + "/TestDex.odex";
Copy(GetDexSrc1(), dex_location);
@@ -1667,8 +2318,9 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
args.push_back("--apex-versions=" + Runtime::Current()->GetApexVersions());
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, default_context_.get(), false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
}
@@ -1681,8 +2333,9 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
args.push_back("--apex-versions=" + Runtime::Current()->GetApexVersions().substr(0, 1));
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, default_context_.get(), false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
}
@@ -1695,12 +2348,135 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
args.push_back("--apex-versions=/1/2/3/4");
ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
- OatFileAssistant oat_file_assistant(
- dex_location.c_str(), kRuntimeISA, default_context_.get(), false);
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ OatFileAssistant oat_file_assistant = CreateOatFileAssistant(dex_location.c_str());
EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OdexFileStatus());
}
}
+TEST_P(OatFileAssistantTest, Create) {
+ std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
+ std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ std::unique_ptr<ClassLoaderContext> context;
+ std::string error_msg;
+ std::unique_ptr<OatFileAssistant> oat_file_assistant =
+ OatFileAssistant::Create(dex_location,
+ GetInstructionSetString(kRuntimeISA),
+ default_context_->EncodeContextForDex2oat(/*base_dir=*/""),
+ /*load_executable=*/false,
+ /*only_load_trusted_executable=*/true,
+ MaybeGetOatFileAssistantContext(),
+ &context,
+ &error_msg);
+ ASSERT_NE(oat_file_assistant, nullptr);
+
+ // Verify that the created instance is usable.
+ VerifyOptimizationStatus(dex_location, default_context_.get(), "speed", "install", "up-to-date");
+}
+
+TEST_P(OatFileAssistantTest, ErrorOnInvalidIsaString) {
+ std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
+ std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ std::unique_ptr<ClassLoaderContext> context;
+ std::string error_msg;
+ EXPECT_EQ(OatFileAssistant::Create(dex_location,
+ /*isa_str=*/"foo",
+ default_context_->EncodeContextForDex2oat(/*base_dir=*/""),
+ /*load_executable=*/false,
+ /*only_load_trusted_executable=*/true,
+ MaybeGetOatFileAssistantContext(),
+ &context,
+ &error_msg),
+ nullptr);
+ EXPECT_EQ(error_msg, "Instruction set 'foo' is invalid");
+}
+
+TEST_P(OatFileAssistantTest, ErrorOnInvalidContextString) {
+ std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
+ std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ std::unique_ptr<ClassLoaderContext> context;
+ std::string error_msg;
+ EXPECT_EQ(OatFileAssistant::Create(dex_location,
+ GetInstructionSetString(kRuntimeISA),
+ /*context_str=*/"foo",
+ /*load_executable=*/false,
+ /*only_load_trusted_executable=*/true,
+ MaybeGetOatFileAssistantContext(),
+ &context,
+ &error_msg),
+ nullptr);
+ EXPECT_EQ(error_msg, "Class loader context 'foo' is invalid");
+}
+
+TEST_P(OatFileAssistantTest, ErrorOnInvalidContextFile) {
+ std::string dex_location = GetScratchDir() + "/OdexUpToDate.jar";
+ std::string odex_location = GetOdexDir() + "/OdexUpToDate.odex";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location, CompilerFilter::kSpeed, "install");
+
+ // Create a broken context file.
+ std::string context_location = GetScratchDir() + "/BrokenContext.jar";
+ std::ofstream output(context_location);
+ output.close();
+
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+
+ std::unique_ptr<ClassLoaderContext> context;
+ std::string error_msg;
+ EXPECT_EQ(OatFileAssistant::Create(dex_location,
+ GetInstructionSetString(kRuntimeISA),
+ /*context_str=*/"PCL[" + context_location + "]",
+ /*load_executable=*/false,
+ /*only_load_trusted_executable=*/true,
+ MaybeGetOatFileAssistantContext(),
+ &context,
+ &error_msg),
+ nullptr);
+ EXPECT_EQ(error_msg,
+ "Failed to load class loader context files for '" + dex_location +
+ "' with context 'PCL[" + context_location + "]'");
+}
+
+// Verifies that `OatFileAssistant::ValidateBootClassPathChecksums` accepts the checksum string
+// produced by `gc::space::ImageSpace::GetBootClassPathChecksums`.
+TEST_P(OatFileAssistantTest, ValidateBootClassPathChecksums) {
+ std::string error_msg;
+ auto create_and_verify = [&]() {
+ std::string checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+ ArrayRef<gc::space::ImageSpace* const>(runtime_->GetHeap()->GetBootImageSpaces()),
+ ArrayRef<const DexFile* const>(runtime_->GetClassLinker()->GetBootClassPath()));
+ std::string bcp_locations = android::base::Join(runtime_->GetBootClassPathLocations(), ':');
+
+ ofa_context_ = CreateOatFileAssistantContext();
+ auto scoped_maybe_without_runtime = ScopedMaybeWithoutRuntime();
+ return OatFileAssistant::ValidateBootClassPathChecksums(
+ ofa_context_.get(), kRuntimeISA, checksums, bcp_locations, &error_msg);
+ };
+
+ ASSERT_TRUE(create_and_verify()) << error_msg;
+
+ for (const std::string& src : {GetDexSrc1(), GetDexSrc2()}) {
+ ASSERT_TRUE(InsertNewBootClasspathEntry(src, &error_msg)) << error_msg;
+ ASSERT_TRUE(create_and_verify()) << error_msg;
+ }
+}
+
// TODO: More Tests:
// * Test class linker falls back to unquickened dex for DexNoOat
// * Test class linker falls back to unquickened dex for MultiDexNoOat
@@ -1713,4 +2489,7 @@ TEST_F(OatFileAssistantTest, GetDexOptNeededWithApexVersions) {
// - Dex is stripped, don't have odex.
// - Oat file corrupted after status check, before reload unexecutable
// because it's unrelocated and no dex2oat
+
+INSTANTIATE_TEST_SUITE_P(WithOrWithoutRuntime, OatFileAssistantTest, testing::Values(true, false));
+
} // namespace art
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index c3a268d8b7..6f1e95ab22 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -196,11 +196,11 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
LOG(WARNING) << "Opening an oat file without a class loader. "
<< "Are you using the deprecated DexFile APIs?";
} else if (context != nullptr) {
- OatFileAssistant oat_file_assistant(dex_location,
- kRuntimeISA,
- context.get(),
- runtime->GetOatFilesExecutable(),
- only_use_system_oat_files_);
+ auto oat_file_assistant = std::make_unique<OatFileAssistant>(dex_location,
+ kRuntimeISA,
+ context.get(),
+ runtime->GetOatFilesExecutable(),
+ only_use_system_oat_files_);
// Get the current optimization status for trace debugging.
// Implementation detail note: GetOptimizationStatus will select the same
@@ -210,11 +210,8 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
std::string compilation_filter;
std::string compilation_reason;
std::string odex_status;
- oat_file_assistant.GetOptimizationStatus(
- &odex_location,
- &compilation_filter,
- &compilation_reason,
- &odex_status);
+ oat_file_assistant->GetOptimizationStatus(
+ &odex_location, &compilation_filter, &compilation_reason, &odex_status);
Runtime::Current()->GetAppInfo()->RegisterOdexStatus(
dex_location,
@@ -229,8 +226,18 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
compilation_filter.c_str(),
compilation_reason.c_str()));
+ const bool has_registered_app_info = Runtime::Current()->GetAppInfo()->HasRegisteredAppInfo();
+ const AppInfo::CodeType code_type =
+ Runtime::Current()->GetAppInfo()->GetRegisteredCodeType(dex_location);
+ // We only want to madvise primary/split dex artifacts as a startup optimization. However,
+ // as the code_type for those artifacts may not be set until the initial app info registration,
+ // we conservatively madvise everything until the app info registration is complete.
+ const bool should_madvise_vdex_and_odex = !has_registered_app_info ||
+ code_type == AppInfo::CodeType::kPrimaryApk ||
+ code_type == AppInfo::CodeType::kSplitApk;
+
// Proceed with oat file loading.
- std::unique_ptr<const OatFile> oat_file(oat_file_assistant.GetBestOatFile().release());
+ std::unique_ptr<const OatFile> oat_file(oat_file_assistant->GetBestOatFile().release());
VLOG(oat) << "OatFileAssistant(" << dex_location << ").GetBestOatFile()="
<< (oat_file != nullptr ? oat_file->GetLocation() : "")
<< " (executable=" << (oat_file != nullptr ? oat_file->IsExecutable() : false) << ")";
@@ -244,13 +251,23 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
// Load the dex files from the oat file.
bool added_image_space = false;
if (oat_file->IsExecutable()) {
+ if (should_madvise_vdex_and_odex) {
+ VLOG(oat) << "Madvising oat file: " << oat_file->GetLocation();
+ size_t madvise_size_limit = runtime->GetMadviseWillNeedSizeOdex();
+ Runtime::MadviseFileForRange(madvise_size_limit,
+ oat_file->Size(),
+ oat_file->Begin(),
+ oat_file->End(),
+ oat_file->GetLocation());
+ }
+
ScopedTrace app_image_timing("AppImage:Loading");
// We need to throw away the image space if we are debuggable but the oat-file source of the
// image is not otherwise we might get classes with inlined methods or other such things.
std::unique_ptr<gc::space::ImageSpace> image_space;
if (ShouldLoadAppImage(oat_file.get())) {
- image_space = oat_file_assistant.OpenImageSpace(oat_file.get());
+ image_space = oat_file_assistant->OpenImageSpace(oat_file.get());
}
if (image_space != nullptr) {
ScopedObjectAccess soa(self);
@@ -310,12 +327,13 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
<< oat_file->GetLocation()
<< " non-executable as it requires an image which we failed to load";
// file as non-executable.
- OatFileAssistant nonexecutable_oat_file_assistant(dex_location,
- kRuntimeISA,
- context.get(),
- /*load_executable=*/false,
- only_use_system_oat_files_);
- oat_file.reset(nonexecutable_oat_file_assistant.GetBestOatFile().release());
+ auto nonexecutable_oat_file_assistant =
+ std::make_unique<OatFileAssistant>(dex_location,
+ kRuntimeISA,
+ context.get(),
+ /*load_executable=*/false,
+ only_use_system_oat_files_);
+ oat_file.reset(nonexecutable_oat_file_assistant->GetBestOatFile().release());
// The file could be deleted concurrently (for example background
// dexopt, or secondary oat file being deleted by the app).
@@ -325,7 +343,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
}
if (oat_file != nullptr) {
- dex_files = oat_file_assistant.LoadDexFiles(*oat_file.get(), dex_location);
+ dex_files = oat_file_assistant->LoadDexFiles(*oat_file.get(), dex_location);
// Register for tracking.
for (const auto& dex_file : dex_files) {
@@ -345,7 +363,8 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (oat_file != nullptr) {
VdexFile* vdex_file = oat_file->GetVdexFile();
- if (vdex_file != nullptr) {
+ if (should_madvise_vdex_and_odex && vdex_file != nullptr) {
+ VLOG(oat) << "Madvising vdex file: " << vdex_file->GetName();
// Opened vdex file from an oat file, madvise it to its loaded state.
// TODO(b/196052575): Unify dex and vdex madvise knobs and behavior.
const size_t madvise_size_limit = Runtime::Current()->GetMadviseWillNeedSizeVdex();
@@ -365,7 +384,7 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
// If so, report an error with the current stack trace.
// Most likely the developer didn't intend to do this because it will waste
// performance and memory.
- if (oat_file_assistant.GetBestStatus() == OatFileAssistant::kOatContextOutOfDate) {
+ if (oat_file_assistant->GetBestStatus() == OatFileAssistant::kOatContextOutOfDate) {
std::set<const DexFile*> already_exists_in_classpath =
context->CheckForDuplicateDexFiles(MakeNonOwningPointerVector(dex_files));
if (!already_exists_in_classpath.empty()) {
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index e347588f3e..0bbf23fc27 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -28,6 +28,11 @@ namespace art {
class ArtMethod;
+// Size in bytes of the should_deoptimize flag on stack.
+// We just need 4 bytes for our purpose regardless of the architecture. Frame size
+// calculation will automatically do alignment for the final frame size.
+static constexpr size_t kShouldDeoptimizeFlagSize = 4;
+
// OatQuickMethodHeader precedes the raw code chunk generated by the compiler.
class PACKED(4) OatQuickMethodHeader {
public:
@@ -145,6 +150,17 @@ class PACKED(4) OatQuickMethodHeader {
return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr());
}
+ size_t GetShouldDeoptimizeFlagOffset() const {
+ DCHECK(IsOptimized());
+ QuickMethodFrameInfo frame_info = GetFrameInfo();
+ size_t frame_size = frame_info.FrameSizeInBytes();
+ size_t core_spill_size =
+ POPCOUNT(frame_info.CoreSpillMask()) * GetBytesPerGprSpillLocation(kRuntimeISA);
+ size_t fpu_spill_size =
+ POPCOUNT(frame_info.FpSpillMask()) * GetBytesPerFprSpillLocation(kRuntimeISA);
+ return frame_size - core_spill_size - fpu_spill_size - kShouldDeoptimizeFlagSize;
+ }
+
uintptr_t ToNativeQuickPc(ArtMethod* method,
const uint32_t dex_pc,
bool is_for_catch_handler,
diff --git a/runtime/offsets.h b/runtime/offsets.h
index cc18bf4f74..7974111851 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -37,12 +37,28 @@ class Offset {
constexpr size_t SizeValue() const {
return val_;
}
+ Offset& operator+=(const size_t rhs) {
+ val_ += rhs;
+ return *this;
+ }
constexpr bool operator==(Offset o) const {
return SizeValue() == o.SizeValue();
}
constexpr bool operator!=(Offset o) const {
return !(*this == o);
}
+ constexpr bool operator<(Offset o) const {
+ return SizeValue() < o.SizeValue();
+ }
+ constexpr bool operator<=(Offset o) const {
+ return !(*this > o);
+ }
+ constexpr bool operator>(Offset o) const {
+ return o < *this;
+ }
+ constexpr bool operator>=(Offset o) const {
+ return !(*this < o);
+ }
protected:
size_t val_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4d2448293c..0450e3e449 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -732,9 +732,7 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options,
Exit(0);
}
// If `boot.art` exists in the ART APEX, it will be used. Otherwise, Everything will be JITed.
- args.Set(M::Image,
- ParseStringList<':'>{{"boot.art!/apex/com.android.art/etc/boot-image.prof",
- "/nonx/boot-framework.art!/system/etc/boot-image.prof"}});
+ args.Set(M::Image, ParseStringList<':'>::Split(GetJitZygoteBootImageLocation()));
}
if (!args.Exists(M::CompilerCallbacksPtr) && !args.Exists(M::Image)) {
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 2a6929a523..8029c03315 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -16,6 +16,7 @@
#include "quick_exception_handler.h"
#include <ios>
+#include <queue>
#include "arch/context.h"
#include "art_method-inl.h"
@@ -67,12 +68,15 @@ class CatchBlockStackVisitor final : public StackVisitor {
Context* context,
Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler,
- uint32_t skip_frames)
+ uint32_t skip_frames,
+ bool skip_top_unwind_callback)
REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_(exception),
exception_handler_(exception_handler),
- skip_frames_(skip_frames) {
+ skip_frames_(skip_frames),
+ skip_unwind_callback_(skip_top_unwind_callback) {
+ DCHECK_IMPLIES(skip_unwind_callback_, skip_frames_ == 0);
}
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -95,7 +99,24 @@ class CatchBlockStackVisitor final : public StackVisitor {
DCHECK(method->IsCalleeSaveMethod());
return true;
}
- return HandleTryItems(method);
+ bool continue_stack_walk = HandleTryItems(method);
+ // Collect methods for which MethodUnwind callback needs to be invoked. MethodUnwind callback
+ // can potentially throw, so we want to call these after we find the catch block.
+ // We stop the stack walk when we find the catch block. If we are ending the stack walk we don't
+ // have to unwind this method so don't record it.
+ if (continue_stack_walk && !skip_unwind_callback_) {
+ // Skip unwind callback is only used when method exit callback has thrown an exception. In
+ // that case, we should have runtime method (artMethodExitHook) on top of stack and the
+ // second should be the method for which method exit was called.
+ DCHECK_IMPLIES(skip_unwind_callback_, GetFrameDepth() == 2);
+ unwound_methods_.push(method);
+ }
+ skip_unwind_callback_ = false;
+ return continue_stack_walk;
+ }
+
+ std::queue<ArtMethod*>& GetUnwoundMethods() {
+ return unwound_methods_;
}
private:
@@ -139,20 +160,29 @@ class CatchBlockStackVisitor final : public StackVisitor {
QuickExceptionHandler* const exception_handler_;
// The number of frames to skip searching for catches in.
uint32_t skip_frames_;
+ // The list of methods we would skip to reach the catch block. We record these to call
+ // MethodUnwind callbacks.
+ std::queue<ArtMethod*> unwound_methods_;
+ // Specifies if the unwind callback should be ignored for method at the top of the stack.
+ bool skip_unwind_callback_;
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
// Finds the appropriate exception catch after calling all method exit instrumentation functions.
-// Note that this might change the exception being thrown.
-void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
+// Note that this might change the exception being thrown. If is_method_exit_exception is true
+// skip the method unwind call for the method on top of the stack as the exception was thrown by
+// method exit callback.
+void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception,
+ bool is_method_exit_exception) {
DCHECK(!is_deoptimization_);
- instrumentation::InstrumentationStackPopper popper(self_);
+ instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
// The number of total frames we have so far popped.
uint32_t already_popped = 0;
bool popped_to_top = true;
StackHandleScope<1> hs(self_);
MutableHandle<mirror::Throwable> exception_ref(hs.NewHandle(exception));
+ bool skip_top_unwind_callback = is_method_exit_exception;
// Sending the instrumentation events (done by the InstrumentationStackPopper) can cause new
// exceptions to be thrown which will override the current exception. Therefore we need to perform
// the search for a catch in a loop until we have successfully popped all the way to a catch or
@@ -166,11 +196,15 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
}
// Walk the stack to find catch handler.
- CatchBlockStackVisitor visitor(self_, context_,
+ CatchBlockStackVisitor visitor(self_,
+ context_,
&exception_ref,
this,
- /*skip_frames=*/already_popped);
+ /*skip_frames=*/already_popped,
+ skip_top_unwind_callback);
visitor.WalkStack(true);
+ skip_top_unwind_callback = false;
+
uint32_t new_pop_count = handler_frame_depth_;
DCHECK_GE(new_pop_count, already_popped);
already_popped = new_pop_count;
@@ -195,9 +229,13 @@ void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
handler_method_header_->IsOptimized()) {
SetCatchEnvironmentForOptimizedHandler(&visitor);
}
- popped_to_top =
- popper.PopFramesTo(reinterpret_cast<uintptr_t>(handler_quick_frame_), exception_ref);
+ popped_to_top = instr->ProcessMethodUnwindCallbacks(self_,
+ visitor.GetUnwoundMethods(),
+ exception_ref);
} while (!popped_to_top);
+
+ // Pop off frames on instrumentation stack to keep it in sync with what is on the stack.
+ instr->PopInstrumentationStackUntil(self_, reinterpret_cast<uintptr_t>(handler_quick_frame_));
if (!clear_exception_) {
// Put exception back in root set with clear throw location.
self_->SetException(exception_ref.Get());
@@ -361,13 +399,15 @@ class DeoptimizeStackVisitor final : public StackVisitor {
return true;
} else if (method->IsNative()) {
// If we return from JNI with a pending exception and want to deoptimize, we need to skip
- // the native method.
- // The top method is a runtime method, the native method comes next.
- CHECK_EQ(GetFrameDepth(), 1U);
+ // the native method. The top method is a runtime method, the native method comes next.
+ // We also deoptimize due to method instrumentation reasons from method entry / exit
+ // callbacks. In these cases native method is at the top of stack.
+ CHECK((GetFrameDepth() == 1U) || (GetFrameDepth() == 0U));
callee_method_ = method;
return true;
} else if (!single_frame_deopt_ &&
- !Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
+ !Runtime::Current()->IsAsyncDeoptimizeable(GetOuterMethod(),
+ GetCurrentQuickFramePc())) {
// We hit some code that's not deoptimizeable. However, Single-frame deoptimization triggered
// from compiled code is always allowed since HDeoptimize always saves the full environment.
LOG(WARNING) << "Got request to deoptimize un-deoptimizable method "
@@ -642,7 +682,7 @@ uintptr_t QuickExceptionHandler::UpdateInstrumentationStack() {
uintptr_t return_pc = 0;
if (method_tracing_active_) {
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- return_pc = instrumentation->PopFramesForDeoptimization(
+ return_pc = instrumentation->PopInstrumentationStackUntil(
self_, reinterpret_cast<uintptr_t>(handler_quick_frame_));
}
return return_pc;
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 4ff981d8a5..9554f1d7d6 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -49,7 +49,8 @@ class QuickExceptionHandler {
// Find the catch handler for the given exception and call all required Instrumentation methods.
// Note this might result in the exception being caught being different from 'exception'.
- void FindCatch(ObjPtr<mirror::Throwable> exception) REQUIRES_SHARED(Locks::mutator_lock_);
+ void FindCatch(ObjPtr<mirror::Throwable> exception, bool is_method_exit_exception)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
// every compiled frame, we create a "copy" shadow frame that will be executed
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index b0434d8a81..ff4693f55e 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -21,6 +21,7 @@
#include "gc/accounting/read_barrier_table.h"
#include "gc/collector/concurrent_copying-inl.h"
+#include "gc/collector/mark_compact.h"
#include "gc/heap.h"
#include "mirror/object-readbarrier-inl.h"
#include "mirror/object_reference.h"
@@ -34,7 +35,7 @@ template <typename MirrorType, bool kIsVolatile, ReadBarrierOption kReadBarrierO
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
constexpr bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -91,6 +92,12 @@ inline MirrorType* ReadBarrier::Barrier(
LOG(FATAL) << "Unexpected read barrier type";
UNREACHABLE();
}
+ } else if (kReadBarrierOption == kWithFromSpaceBarrier) {
+ CHECK(gUseUserfaultfd);
+ MirrorType* old = ref_addr->template AsMirrorPtr<kIsVolatile>();
+ mirror::Object* ref =
+ Runtime::Current()->GetHeap()->MarkCompactCollector()->GetFromSpaceAddrFromBarrier(old);
+ return reinterpret_cast<MirrorType*>(ref);
} else {
// No read barrier.
return ref_addr->template AsMirrorPtr<kIsVolatile>();
@@ -102,7 +109,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source) {
MirrorType* ref = *root;
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -147,7 +154,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(mirror::CompressedReference<Mirro
GcRootSource* gc_root_source) {
MirrorType* ref = root->AsMirrorPtr();
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
- if (kUseReadBarrier && with_read_barrier) {
+ if (gUseReadBarrier && with_read_barrier) {
if (kCheckDebugDisallowReadBarrierCount) {
Thread* const self = Thread::Current();
if (self != nullptr) {
@@ -192,7 +199,7 @@ template <typename MirrorType>
inline MirrorType* ReadBarrier::IsMarked(MirrorType* ref) {
// Only read-barrier configurations can have mutators run while
// the GC is marking.
- if (!kUseReadBarrier) {
+ if (!gUseReadBarrier) {
return ref;
}
// IsMarked does not handle null, so handle it here.
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 3b89377860..be5a9a030a 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -94,7 +94,7 @@ class ReadBarrier {
// Without the holder object, and only with the read barrier configuration (no-op otherwise).
static void MaybeAssertToSpaceInvariant(mirror::Object* ref)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
AssertToSpaceInvariant(ref);
}
}
diff --git a/runtime/read_barrier_config.h b/runtime/read_barrier_config.h
index d505bedec5..e974b04797 100644
--- a/runtime/read_barrier_config.h
+++ b/runtime/read_barrier_config.h
@@ -62,17 +62,8 @@ static constexpr bool kUseTableLookupReadBarrier = true;
static constexpr bool kUseTableLookupReadBarrier = false;
#endif
-static constexpr bool kUseReadBarrier = kUseBakerReadBarrier || kUseTableLookupReadBarrier;
-
-// Debugging flag that forces the generation of read barriers, but
-// does not trigger the use of the concurrent copying GC.
-//
-// TODO: Remove this flag when the read barriers compiler
-// instrumentation is completed.
-static constexpr bool kForceReadBarrier = false;
-// TODO: Likewise, remove this flag when kForceReadBarrier is removed
-// and replace it with kUseReadBarrier.
-static constexpr bool kEmitCompilerReadBarrier = kForceReadBarrier || kUseReadBarrier;
+extern const bool gUseReadBarrier;
+extern const bool gUseUserfaultfd;
// Disabled for performance reasons.
static constexpr bool kCheckDebugDisallowReadBarrierCount = kIsDebugBuild;
diff --git a/runtime/read_barrier_option.h b/runtime/read_barrier_option.h
index d918d466c6..36fc2d27b8 100644
--- a/runtime/read_barrier_option.h
+++ b/runtime/read_barrier_option.h
@@ -84,6 +84,7 @@ namespace art {
enum ReadBarrierOption {
kWithReadBarrier, // Perform a read barrier.
kWithoutReadBarrier, // Don't perform a read barrier.
+ kWithFromSpaceBarrier, // Get the from-space address for the given to-space address. Used by CMC
};
} // namespace art
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index a7290a2919..afa49d0ab5 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -523,6 +523,7 @@ bool InvokeMethodImpl(const ScopedObjectAccessAlreadyRunnable& soa,
} // anonymous namespace
template <>
+NO_STACK_PROTECTOR
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj,
ArtMethod* method,
@@ -555,6 +556,7 @@ JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
}
template <>
+NO_STACK_PROTECTOR
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj,
jmethodID mid,
diff --git a/runtime/reflection.h b/runtime/reflection.h
index b0e27da321..13dc8e1466 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -99,6 +99,7 @@ JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnab
// num_frames is number of frames we look up for access check.
template<PointerSize pointer_size>
+NO_STACK_PROTECTOR
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject method,
jobject receiver,
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e20f883446..642e0bcc34 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -16,15 +16,13 @@
#include "runtime.h"
-// sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
-#include <sys/mount.h>
#ifdef __linux__
-#include <linux/fs.h>
#include <sys/prctl.h>
#endif
#include <fcntl.h>
#include <signal.h>
+#include <sys/mount.h>
#include <sys/syscall.h>
#if defined(__APPLE__)
@@ -125,6 +123,7 @@
#include "native/dalvik_system_ZygoteHooks.h"
#include "native/java_lang_Class.h"
#include "native/java_lang_Object.h"
+#include "native/java_lang_StackStreamFactory.h"
#include "native/java_lang_String.h"
#include "native/java_lang_StringFactory.h"
#include "native/java_lang_System.h"
@@ -152,6 +151,7 @@
#include "native_bridge_art_interface.h"
#include "native_stack_dump.h"
#include "nativehelper/scoped_local_ref.h"
+#include "nterp_helpers.h"
#include "oat.h"
#include "oat_file_manager.h"
#include "oat_quick_method_header.h"
@@ -178,6 +178,7 @@
#include "well_known_classes.h"
#ifdef ART_TARGET_ANDROID
+#include <android/api-level.h>
#include <android/set_abort_message.h>
#include "com_android_apex.h"
namespace apex = com::android::apex;
@@ -200,10 +201,6 @@ static constexpr double kLowMemoryMaxLoadFactor = 0.8;
static constexpr double kNormalMinLoadFactor = 0.4;
static constexpr double kNormalMaxLoadFactor = 0.7;
-// Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read
-// barrier config.
-static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
-
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -406,6 +403,12 @@ Runtime::~Runtime() {
if (oat_file_manager_ != nullptr) {
oat_file_manager_->WaitForWorkersToBeCreated();
}
+ // Disable GC before deleting the thread-pool and shutting down runtime as it
+ // restricts attaching new threads.
+ heap_->DisableGCForShutdown();
+ heap_->WaitForWorkersToBeCreated();
+ // Make sure to let the GC complete if it is running.
+ heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
{
ScopedTrace trace2("Wait for shutdown cond");
@@ -440,8 +443,6 @@ Runtime::~Runtime() {
self = nullptr;
}
- // Make sure to let the GC complete if it is running.
- heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
heap_->DeleteThreadPool();
if (oat_file_manager_ != nullptr) {
oat_file_manager_->DeleteThreadPool();
@@ -543,7 +544,7 @@ struct AbortState {
os << "Runtime aborting...\n";
if (Runtime::Current() == nullptr) {
os << "(Runtime does not yet exist!)\n";
- DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
+ DumpNativeStack(os, GetTid(), " native: ", nullptr);
return;
}
Thread* self = Thread::Current();
@@ -555,7 +556,7 @@ struct AbortState {
if (self == nullptr) {
os << "(Aborting thread was not attached to runtime!)\n";
- DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
+ DumpNativeStack(os, GetTid(), " native: ", nullptr);
} else {
os << "Aborting thread:\n";
if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
@@ -697,38 +698,52 @@ void Runtime::Abort(const char* msg) {
// notreached
}
-class FindNativeMethodsVisitor : public ClassVisitor {
+/**
+ * Update entrypoints (native and Java) of methods before the first fork. This
+ * helps sharing pages where ArtMethods are allocated between the zygote and
+ * forked apps.
+ */
+class UpdateMethodsPreFirstForkVisitor : public ClassVisitor {
public:
- FindNativeMethodsVisitor(Thread* self, ClassLinker* class_linker)
+ UpdateMethodsPreFirstForkVisitor(Thread* self, ClassLinker* class_linker)
: vm_(down_cast<JNIEnvExt*>(self->GetJniEnv())->GetVm()),
self_(self),
- class_linker_(class_linker) {}
+ class_linker_(class_linker),
+ can_use_nterp_(interpreter::CanRuntimeUseNterp()) {}
bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
bool is_initialized = klass->IsVisiblyInitialized();
for (ArtMethod& method : klass->GetDeclaredMethods(kRuntimePointerSize)) {
- if (method.IsNative() && (is_initialized || !NeedsClinitCheckBeforeCall(&method))) {
- const void* existing = method.GetEntryPointFromJni();
- if (method.IsCriticalNative()
- ? class_linker_->IsJniDlsymLookupCriticalStub(existing)
- : class_linker_->IsJniDlsymLookupStub(existing)) {
- const void* native_code =
- vm_->FindCodeForNativeMethod(&method, /*error_msg=*/ nullptr, /*can_suspend=*/ false);
- if (native_code != nullptr) {
- class_linker_->RegisterNative(self_, &method, native_code);
+ if (is_initialized || !NeedsClinitCheckBeforeCall(&method)) {
+ if (method.IsNative()) {
+ const void* existing = method.GetEntryPointFromJni();
+ if (method.IsCriticalNative()
+ ? class_linker_->IsJniDlsymLookupCriticalStub(existing)
+ : class_linker_->IsJniDlsymLookupStub(existing)) {
+ const void* native_code =
+ vm_->FindCodeForNativeMethod(&method, /*error_msg=*/ nullptr, /*can_suspend=*/ false);
+ if (native_code != nullptr) {
+ class_linker_->RegisterNative(self_, &method, native_code);
+ }
}
}
+ } else if (can_use_nterp_) {
+ const void* existing = method.GetEntryPointFromQuickCompiledCode();
+ if (class_linker_->IsQuickResolutionStub(existing) && CanMethodUseNterp(&method)) {
+ method.SetEntryPointFromQuickCompiledCode(interpreter::GetNterpWithClinitEntryPoint());
+ }
}
}
return true;
}
private:
- JavaVMExt* vm_;
- Thread* self_;
- ClassLinker* class_linker_;
+ JavaVMExt* const vm_;
+ Thread* const self_;
+ ClassLinker* const class_linker_;
+ const bool can_use_nterp_;
- DISALLOW_COPY_AND_ASSIGN(FindNativeMethodsVisitor);
+ DISALLOW_COPY_AND_ASSIGN(UpdateMethodsPreFirstForkVisitor);
};
void Runtime::PreZygoteFork() {
@@ -742,8 +757,7 @@ void Runtime::PreZygoteFork() {
// Ensure we call FixupStaticTrampolines on all methods that are
// initialized.
class_linker_->MakeInitializedClassesVisiblyInitialized(soa.Self(), /*wait=*/ true);
- // Update native method JNI entrypoints.
- FindNativeMethodsVisitor visitor(soa.Self(), class_linker_);
+ UpdateMethodsPreFirstForkVisitor visitor(soa.Self(), class_linker_);
class_linker_->VisitClasses(&visitor);
}
heap_->PreZygoteFork();
@@ -990,6 +1004,22 @@ bool Runtime::Start() {
}
CreateJitCodeCache(/*rwx_memory_allowed=*/true);
CreateJit();
+#ifdef ADDRESS_SANITIZER
+ // (b/238730394): In older implementations of sanitizer + glibc there is a race between
+ // pthread_create and dlopen that could cause a deadlock. pthread_create interceptor in ASAN
+ // uses dl_pthread_iterator with a callback that could request a dl_load_lock via call to
+ // __tls_get_addr [1]. dl_pthread_iterate would already hold dl_load_lock so this could cause a
+ // deadlock. __tls_get_addr needs a dl_load_lock only when there is a dlopen happening in
+ // parallel. As a workaround we wait for the pthread_create (i.e JIT thread pool creation) to
+ // finish before going to the next phase. Creating a system class loader could need a dlopen so
+ // we wait here till threads are initialized.
+ // [1] https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp#L408
+ // See this for more context: https://reviews.llvm.org/D98926
+ // TODO(b/238730394): Revisit this workaround once we migrate to musl libc.
+ if (jit_ != nullptr) {
+ jit_->GetThreadPool()->WaitForWorkersToBeCreated();
+ }
+#endif
}
// Send the start phase event. We have to wait till here as this is when the main thread peer
@@ -1128,8 +1158,8 @@ void Runtime::InitNonZygoteOrPostFork(
std::vector<std::string> jars = android::base::Split(system_server_classpath, ":");
app_info_.RegisterAppInfo("android",
jars,
- /*cur_profile_path=*/ "",
- /*ref_profile_path=*/ "",
+ /*profile_output_filename=*/ "",
+ /*ref_profile_filename=*/ "",
AppInfo::CodeType::kPrimaryApk);
}
@@ -1144,7 +1174,6 @@ void Runtime::InitNonZygoteOrPostFork(
}
// Create the thread pools.
- heap_->CreateThreadPool();
// Avoid creating the runtime thread pool for system server since it will not be used and would
// waste memory.
if (!is_system_server) {
@@ -1328,9 +1357,9 @@ static inline void CreatePreAllocatedException(Thread* self,
detailMessageField->SetObject</* kTransactionActive= */ false>(exception->Read(), message);
}
-void Runtime::InitializeApexVersions() {
+std::string Runtime::GetApexVersions(ArrayRef<const std::string> boot_class_path_locations) {
std::vector<std::string_view> bcp_apexes;
- for (std::string_view jar : Runtime::Current()->GetBootClassPathLocations()) {
+ for (std::string_view jar : boot_class_path_locations) {
std::string_view apex = ApexNameFromLocation(jar);
if (!apex.empty()) {
bcp_apexes.push_back(apex);
@@ -1338,20 +1367,20 @@ void Runtime::InitializeApexVersions() {
}
static const char* kApexFileName = "/apex/apex-info-list.xml";
// Start with empty markers.
- apex_versions_ = std::string(bcp_apexes.size(), '/');
+ std::string empty_apex_versions(bcp_apexes.size(), '/');
// When running on host or chroot, we just use empty markers.
if (!kIsTargetBuild || !OS::FileExists(kApexFileName)) {
- return;
+ return empty_apex_versions;
}
#ifdef ART_TARGET_ANDROID
if (access(kApexFileName, R_OK) != 0) {
PLOG(WARNING) << "Failed to read " << kApexFileName;
- return;
+ return empty_apex_versions;
}
auto info_list = apex::readApexInfoList(kApexFileName);
if (!info_list.has_value()) {
LOG(WARNING) << "Failed to parse " << kApexFileName;
- return;
+ return empty_apex_versions;
}
std::string result;
@@ -1375,10 +1404,17 @@ void Runtime::InitializeApexVersions() {
android::base::StringAppendF(&result, "/%" PRIu64, version);
}
}
- apex_versions_ = result;
+ return result;
+#else
+ return empty_apex_versions; // Not an Android build.
#endif
}
+void Runtime::InitializeApexVersions() {
+ apex_versions_ =
+ GetApexVersions(ArrayRef<const std::string>(Runtime::Current()->GetBootClassPathLocations()));
+}
+
void Runtime::ReloadAllFlags(const std::string& caller) {
FlagBase::ReloadAllFlags(caller);
}
@@ -1587,9 +1623,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
// If low memory mode, use 1.0 as the multiplier by default.
foreground_heap_growth_multiplier = 1.0f;
} else {
+ // Extra added to the default heap growth multiplier for concurrent GC
+ // compaction algorithms. This is done for historical reasons.
+ // TODO: remove when we revisit heap configurations.
foreground_heap_growth_multiplier =
- runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) +
- kExtraDefaultHeapGrowthMultiplier;
+ runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) + 1.0f;
}
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
@@ -1617,9 +1655,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
image_locations_,
instruction_set_,
// Override the collector type to CC if the read barrier config.
- kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
- kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
- : runtime_options.GetOrDefault(Opt::BackgroundGc),
+ gUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
+ gUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
+ : BackgroundGcOption(xgc_option.collector_type_),
runtime_options.GetOrDefault(Opt::LargeObjectSpace),
runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
runtime_options.GetOrDefault(Opt::ParallelGCThreads),
@@ -2237,6 +2275,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_java_lang_reflect_Parameter(env);
register_java_lang_reflect_Proxy(env);
register_java_lang_ref_Reference(env);
+ register_java_lang_StackStreamFactory(env);
register_java_lang_String(env);
register_java_lang_StringFactory(env);
register_java_lang_System(env);
@@ -2457,6 +2496,9 @@ void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
class_linker_->VisitRoots(visitor, flags);
jni_id_manager_->VisitRoots(visitor);
heap_->VisitAllocationRecords(visitor);
+ if (jit_ != nullptr) {
+ jit_->GetCodeCache()->VisitRoots(visitor);
+ }
if ((flags & kVisitRootFlagNewRoots) == 0) {
// Guaranteed to have no new roots in the constant roots.
VisitConstantRoots(visitor);
@@ -2585,7 +2627,7 @@ ArtMethod* Runtime::CreateCalleeSaveMethod() {
}
void Runtime::DisallowNewSystemWeaks() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
monitor_list_->DisallowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
java_vm_->DisallowNewWeakGlobals();
@@ -2601,7 +2643,7 @@ void Runtime::DisallowNewSystemWeaks() {
}
void Runtime::AllowNewSystemWeaks() {
- CHECK(!kUseReadBarrier);
+ CHECK(!gUseReadBarrier);
monitor_list_->AllowNewMonitors();
intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal); // TODO: Do this in the sweeping.
java_vm_->AllowNewWeakGlobals();
@@ -3043,12 +3085,13 @@ bool Runtime::IsVerificationSoftFail() const {
return verify_ == verifier::VerifyMode::kSoftFail;
}
-bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
+bool Runtime::IsAsyncDeoptimizeable(ArtMethod* method, uintptr_t code) const {
if (OatQuickMethodHeader::NterpMethodHeader != nullptr) {
if (OatQuickMethodHeader::NterpMethodHeader->Contains(code)) {
return true;
}
}
+
// We only support async deopt (ie the compiled code is not explicitly asking for
// deopt, but something else like the debugger) in debuggable JIT code.
// We could look at the oat file where `code` is being defined,
@@ -3056,8 +3099,14 @@ bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
// only rely on the JIT for debuggable apps.
// The JIT-zygote is not debuggable so we need to be sure to exclude code from the non-private
// region as well.
- return IsJavaDebuggable() && GetJit() != nullptr &&
- GetJit()->GetCodeCache()->PrivateRegionContainsPc(reinterpret_cast<const void*>(code));
+ if (GetJit() != nullptr &&
+ GetJit()->GetCodeCache()->PrivateRegionContainsPc(reinterpret_cast<const void*>(code))) {
+ // If the code is JITed code then check if it was compiled as debuggable.
+ const OatQuickMethodHeader* header = method->GetOatQuickMethodHeader(code);
+ return CodeInfo::IsDebuggable(header->GetOptimizedCodeInfoPtr());
+ }
+
+ return false;
}
LinearAlloc* Runtime::CreateLinearAlloc() {
@@ -3144,15 +3193,19 @@ class UpdateEntryPointsClassVisitor : public ClassVisitor {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : klass->GetMethods(pointer_size)) {
const void* code = m.GetEntryPointFromQuickCompiledCode();
+ // For java debuggable runtimes we also deoptimize native methods. For other cases (boot
+ // image profiling) we don't need to deoptimize native methods. If this changes also
+ // update Instrumentation::CanUseAotCode.
+ bool deoptimize_native_methods = Runtime::Current()->IsJavaDebuggable();
if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
- !m.IsNative() &&
+ (!m.IsNative() || deoptimize_native_methods) &&
!m.IsProxyMethod()) {
instrumentation_->InitializeMethodsCode(&m, /*aot_code=*/ nullptr);
}
if (Runtime::Current()->GetJit() != nullptr &&
Runtime::Current()->GetJit()->GetCodeCache()->IsInZygoteExecSpace(code) &&
- !m.IsNative()) {
+ (!m.IsNative() || deoptimize_native_methods)) {
DCHECK(!m.IsProxyMethod());
instrumentation_->InitializeMethodsCode(&m, /*aot_code=*/ nullptr);
}
@@ -3180,14 +3233,12 @@ void Runtime::DeoptimizeBootImage() {
// If we've already started and we are setting this runtime to debuggable,
// we patch entry points of methods in boot image to interpreter bridge, as
// boot image code may be AOT compiled as not debuggable.
- if (!GetInstrumentation()->IsForcedInterpretOnly()) {
- UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
- GetClassLinker()->VisitClasses(&visitor);
- jit::Jit* jit = GetJit();
- if (jit != nullptr) {
- // Code previously compiled may not be compiled debuggable.
- jit->GetCodeCache()->TransitionToDebuggable();
- }
+ UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
+ GetClassLinker()->VisitClasses(&visitor);
+ jit::Jit* jit = GetJit();
+ if (jit != nullptr) {
+ // Code previously compiled may not be compiled debuggable.
+ jit->GetCodeCache()->TransitionToDebuggable();
}
}
@@ -3358,6 +3409,22 @@ void Runtime::MadviseFileForRange(size_t madvise_size_limit_bytes,
const uint8_t* map_begin,
const uint8_t* map_end,
const std::string& file_name) {
+#ifdef ART_TARGET_ANDROID
+ // Short-circuit the madvise optimization for background processes. This
+ // avoids IO and memory contention with foreground processes, particularly
+ // those involving app startup.
+ // Note: We can only safely short-circuit the madvise on T+, as it requires
+ // the framework to always immediately notify ART of process states.
+ static const int kApiLevel = android_get_device_api_level();
+ const bool accurate_process_state_at_startup = kApiLevel >= __ANDROID_API_T__;
+ if (accurate_process_state_at_startup) {
+ const Runtime* runtime = Runtime::Current();
+ if (runtime != nullptr && !runtime->InJankPerceptibleProcessState()) {
+ return;
+ }
+ }
+#endif // ART_TARGET_ANDROID
+
// Ideal blockTransferSize for madvising files (128KiB)
static constexpr size_t kIdealIoTransferSizeBytes = 128*1024;
@@ -3399,6 +3466,8 @@ void Runtime::MadviseFileForRange(size_t madvise_size_limit_bytes,
}
}
+// Return whether a boot image has a profile. This means we'll need to pre-JIT
+// methods in that profile for performance.
bool Runtime::HasImageWithProfile() const {
for (gc::space::ImageSpace* space : GetHeap()->GetBootImageSpaces()) {
if (!space->GetProfileFiles().empty()) {
@@ -3408,4 +3477,18 @@ bool Runtime::HasImageWithProfile() const {
return false;
}
+void Runtime::AppendToBootClassPath(
+ const std::string& filename,
+ const std::string& location,
+ const std::vector<std::unique_ptr<const art::DexFile>>& dex_files) {
+ boot_class_path_.push_back(filename);
+ if (!boot_class_path_locations_.empty()) {
+ boot_class_path_locations_.push_back(location);
+ }
+ ScopedObjectAccess soa(Thread::Current());
+ for (const std::unique_ptr<const art::DexFile>& dex_file : dex_files) {
+ GetClassLinker()->AppendToBootClassPath(Thread::Current(), dex_file.get());
+ }
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e7b71e29f5..6f15fcedf0 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -257,6 +257,13 @@ class Runtime {
return instance_;
}
+ // Set the current runtime to be the given instance.
+ // Note that this function is not responsible for cleaning up the old instance or taking the
+ // ownership of the new instance.
+ //
+ // For test use only.
+ static void TestOnlySetCurrent(Runtime* instance) { instance_ = instance; }
+
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
@@ -295,6 +302,11 @@ class Runtime {
return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
}
+ // Dynamically add an element to boot class path.
+ void AppendToBootClassPath(const std::string& filename,
+ const std::string& location,
+ const std::vector<std::unique_ptr<const art::DexFile>>& dex_files);
+
const std::vector<int>& GetBootClassPathFds() const {
return boot_class_path_fds_;
}
@@ -498,6 +510,10 @@ class Runtime {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
}
+ static constexpr MemberOffset GetInstrumentationOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Runtime, instrumentation_));
+ }
+
InstructionSet GetInstructionSet() const {
return instruction_set_;
}
@@ -890,7 +906,8 @@ class Runtime {
// Returns if the code can be deoptimized asynchronously. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAsyncDeoptimizeable(ArtMethod* method, uintptr_t code) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
@@ -1073,6 +1090,10 @@ class Runtime {
// image rather that an image loaded from disk.
bool HasImageWithProfile() const;
+ bool GetNoSigChain() const {
+ return no_sig_chain_;
+ }
+
// Trigger a flag reload from system properties or device congfigs.
//
// Should only be called from runtime init and zygote post fork as
@@ -1084,6 +1105,10 @@ class Runtime {
// See Flags::ReloadAllFlags as well.
static void ReloadAllFlags(const std::string& caller);
+ // Parses /apex/apex-info-list.xml to build a string containing apex versions of boot classpath
+ // jars, which is encoded into .oat files.
+ static std::string GetApexVersions(ArrayRef<const std::string> boot_class_path_locations);
+
private:
static void InitPlatformSignalHandlers();
@@ -1124,8 +1149,7 @@ class Runtime {
ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
- // Parses /apex/apex-info-list.xml to initialize a string containing versions
- // of boot classpath jars and encoded into .oat files.
+ // Caches the apex versions produced by `GetApexVersions`.
void InitializeApexVersions();
// A pointer to the active runtime or null.
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index 753ac280e3..28c81a22c7 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -105,9 +105,9 @@ void RuntimeCallbacks::RemoveMethodInspectionCallback(MethodInspectionCallback*
Remove(cb, &method_inspection_callbacks_);
}
-bool RuntimeCallbacks::IsMethodBeingInspected(ArtMethod* m) {
+bool RuntimeCallbacks::HaveLocalsChanged() {
for (MethodInspectionCallback* cb : COPY(method_inspection_callbacks_)) {
- if (cb->IsMethodBeingInspected(m)) {
+ if (cb->HaveLocalsChanged()) {
return true;
}
}
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index b1a7e55ae4..98584a8587 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -143,9 +143,8 @@ class MethodInspectionCallback {
public:
virtual ~MethodInspectionCallback() {}
- // Returns true if the method is being inspected currently and the runtime should not modify it in
- // potentially dangerous ways (i.e. replace with compiled version, JIT it, etc).
- virtual bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+ // Returns true if any locals have changed. If any locals have changed we shouldn't OSR.
+ virtual bool HaveLocalsChanged() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
// Callback to let something request to be notified when reflective objects are being visited and
@@ -225,9 +224,9 @@ class RuntimeCallbacks {
void AddParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveParkCallback(ParkCallback* cb) REQUIRES_SHARED(Locks::mutator_lock_);
- // Returns true if some MethodInspectionCallback indicates the method is being inspected/depended
- // on by some code.
- bool IsMethodBeingInspected(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ // Returns true if any locals have changed. This is used to prevent OSRing frames that have
+ // some locals changed.
+ bool HaveLocalsChanged() REQUIRES_SHARED(Locks::mutator_lock_);
void AddMethodInspectionCallback(MethodInspectionCallback* cb)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/runtime_common.h b/runtime/runtime_common.h
index 925594ef29..ec08907dae 100644
--- a/runtime/runtime_common.h
+++ b/runtime/runtime_common.h
@@ -42,7 +42,7 @@ struct Backtrace {
void Dump(std::ostream& os) const {
// This is a backtrace from a crash, do not skip any frames in case the
// crash is in the unwinder itself.
- DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_, false);
+ DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_, false);
}
private:
// Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 76d16579bb..6721834705 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -80,7 +80,7 @@ RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (bool, AlwaysLogExplicitGcs, true)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
-RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
+RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab)
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, true)
RUNTIME_OPTIONS_KEY (bool, UseProfiledJitCompilation, false)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 50a96d0ddf..5ee20e5fd3 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -129,7 +129,7 @@ uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
} else if (cur_oat_quick_method_header_->IsOptimized()) {
StackMap* stack_map = GetCurrentStackMap();
- DCHECK(stack_map->IsValid());
+ CHECK(stack_map->IsValid()) << "StackMap not found for " << std::hex << cur_quick_frame_pc_;
return stack_map->GetDexPc();
} else {
DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
@@ -780,7 +780,6 @@ void StackVisitor::WalkStack(bool include_transitions) {
DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
}
CHECK_EQ(cur_depth_, 0U);
- size_t inlined_frames_count = 0;
for (const ManagedStack* current_fragment = thread_->GetManagedStack();
current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
@@ -788,6 +787,12 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_quick_frame_ = current_fragment->GetTopQuickFrame();
cur_quick_frame_pc_ = 0;
DCHECK(cur_oat_quick_method_header_ == nullptr);
+
+ if (kDebugStackWalk) {
+ LOG(INFO) << "Tid=" << thread_-> GetThreadId()
+ << ", ManagedStack fragement: " << current_fragment;
+ }
+
if (cur_quick_frame_ != nullptr) { // Handle quick stack frames.
// Can't be both a shadow and a quick fragment.
DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
@@ -800,10 +805,20 @@ void StackVisitor::WalkStack(bool include_transitions) {
// between GenericJNI frame and JIT-compiled JNI stub; the entrypoint may have
// changed since the frame was entered. The top quick frame tag indicates
// GenericJNI here, otherwise it's either AOT-compiled or JNI-compiled JNI stub.
- if (UNLIKELY(current_fragment->GetTopQuickFrameTag())) {
+ if (UNLIKELY(current_fragment->GetTopQuickFrameGenericJniTag())) {
// The generic JNI does not have any method header.
cur_oat_quick_method_header_ = nullptr;
+ } else if (UNLIKELY(current_fragment->GetTopQuickFrameJitJniTag())) {
+ // Should be JITed code.
+ Runtime* runtime = Runtime::Current();
+ const void* code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
+ CHECK(code != nullptr) << method->PrettyMethod();
+ cur_oat_quick_method_header_ = OatQuickMethodHeader::FromCodePointer(code);
} else {
+ // We are sure we are not running GenericJni here. Though the entry point could still be
+ // GenericJnistub. The entry point is usually JITed, AOT or instrumentation stub when
+ // instrumentation is enabled. It could be lso a resolution stub if the class isn't
+ // visibly initialized yet.
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
CHECK(existing_entry_point != nullptr);
Runtime* runtime = Runtime::Current();
@@ -819,7 +834,11 @@ void StackVisitor::WalkStack(bool include_transitions) {
if (code != nullptr) {
cur_oat_quick_method_header_ = OatQuickMethodHeader::FromEntryPoint(code);
} else {
- // This must be a JITted JNI stub frame.
+ // This must be a JITted JNI stub frame. For non-debuggable runtimes we only generate
+ // JIT stubs if there are no AOT stubs for native methods. Since we checked for AOT
+ // code earlier, we must be running JITed code. For debuggable runtimes we might have
+ // JIT code even when AOT code is present but we tag SP in JITed JNI stubs
+ // in debuggable runtimes. This case is handled earlier.
CHECK(runtime->GetJit() != nullptr);
code = runtime->GetJit()->GetCodeCache()->GetJniStubCode(method);
CHECK(code != nullptr) << method->PrettyMethod();
@@ -834,8 +853,12 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
}
header_retrieved = false; // Force header retrieval in next iteration.
- ValidateFrame();
+ if (kDebugStackWalk) {
+ LOG(INFO) << "Early print: Tid=" << thread_-> GetThreadId() << ", method: "
+ << ArtMethod::PrettyMethod(method) << "@" << method;
+ }
+ ValidateFrame();
if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
&& (cur_oat_quick_method_header_ != nullptr)
&& cur_oat_quick_method_header_->IsOptimized()
@@ -854,7 +877,6 @@ void StackVisitor::WalkStack(bool include_transitions) {
return;
}
cur_depth_++;
- inlined_frames_count++;
}
}
}
@@ -908,7 +930,8 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
if (kDebugStackWalk) {
- LOG(INFO) << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
+ LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
+ << ArtMethod::PrettyMethod(method) << "@" << method << " size=" << frame_size
<< std::boolalpha
<< " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
cur_oat_quick_method_header_->IsOptimized())
@@ -928,6 +951,12 @@ void StackVisitor::WalkStack(bool include_transitions) {
cur_oat_quick_method_header_ = nullptr;
} else if (cur_shadow_frame_ != nullptr) {
do {
+ if (kDebugStackWalk) {
+ ArtMethod* method = cur_shadow_frame_->GetMethod();
+ LOG(INFO) << "Tid=" << thread_-> GetThreadId() << ", method: "
+ << ArtMethod::PrettyMethod(method) << "@" << method
+ << ", ShadowFrame";
+ }
ValidateFrame();
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
diff --git a/runtime/stack.h b/runtime/stack.h
index 1b00b54acb..bfda57b136 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -58,11 +58,6 @@ enum VRegKind {
};
std::ostream& operator<<(std::ostream& os, VRegKind rhs);
-// Size in bytes of the should_deoptimize flag on stack.
-// We just need 4 bytes for our purpose regardless of the architecture. Frame size
-// calculation will automatically do alignment for the final frame size.
-static constexpr size_t kShouldDeoptimizeFlagSize = 4;
-
/*
* Our current stack layout.
* The Dalvik registers come first, followed by the
@@ -306,6 +301,11 @@ class StackVisitor {
return *GetShouldDeoptimizeFlagAddr();
}
+ bool IsShouldDeoptimizeFlagForDebugSet() const REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t should_deopt_flag = GetShouldDeoptimizeFlag();
+ return (should_deopt_flag & static_cast<uint8_t>(DeoptimizeFlagValue::kDebug)) != 0;
+ }
+
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread,
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 7a13dbd3ac..7876a67381 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -449,6 +449,10 @@ class CodeInfo {
return (*code_info_data & kIsBaseline) != 0;
}
+ ALWAYS_INLINE static bool IsDebuggable(const uint8_t* code_info_data) {
+ return (*code_info_data & kIsDebuggable) != 0;
+ }
+
private:
// Scan backward to determine dex register locations at given stack map.
void DecodeDexRegisterMap(uint32_t stack_map_index,
@@ -495,11 +499,16 @@ class CodeInfo {
enum Flags {
kHasInlineInfo = 1 << 0,
kIsBaseline = 1 << 1,
+ kIsDebuggable = 1 << 2,
};
// The CodeInfo starts with sequence of variable-length bit-encoded integers.
+ // (Please see kVarintMax for more details about encoding).
static constexpr size_t kNumHeaders = 7;
- uint32_t flags_ = 0; // Note that the space is limited to three bits.
+ // Note that the space for flags is limited to three bits. We use a custom encoding where we
+ // encode the value inline if it is less than kVarintMax. We want to access flags without
+ // decoding the entire CodeInfo so the value of flags cannot be more than kVarintMax.
+ uint32_t flags_ = 0;
uint32_t code_size_ = 0; // The size of native PC range in bytes.
uint32_t packed_frame_size_ = 0; // Frame size in kStackAlignment units.
uint32_t core_spill_mask_ = 0;
diff --git a/runtime/subtype_check_info.h b/runtime/subtype_check_info.h
index d7345579c1..05afd76a63 100644
--- a/runtime/subtype_check_info.h
+++ b/runtime/subtype_check_info.h
@@ -153,7 +153,7 @@ struct SubtypeCheckInfo {
// Create from the depth and the bitstring+of state.
// This is done for convenience to avoid passing in "depth" everywhere,
// since our current state is almost always a function of depth.
- static SubtypeCheckInfo Create(SubtypeCheckBits compressed_value, size_t depth) {
+ static SubtypeCheckInfo Create(const SubtypeCheckBits& compressed_value, size_t depth) {
SubtypeCheckInfo io;
io.depth_ = depth;
io.bitstring_and_of_ = compressed_value;
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 324cd3787a..4110ed2851 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -373,7 +373,7 @@ inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
}
inline bool Thread::GetWeakRefAccessEnabled() const {
- CHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
DCHECK(this == Thread::Current());
WeakRefAccessState s = tls32_.weak_ref_access_enabled.load(std::memory_order_relaxed);
if (LIKELY(s == WeakRefAccessState::kVisiblyEnabled)) {
@@ -428,7 +428,7 @@ inline bool Thread::ModifySuspendCount(Thread* self,
int delta,
AtomicInteger* suspend_barrier,
SuspendReason reason) {
- if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
+ if (delta > 0 && ((gUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
// When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
// active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
while (true) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 78ba26dec0..5492cc869f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -41,6 +41,8 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
+#include "unwindstack/AndroidUnwinder.h"
+
#include "arch/context-inl.h"
#include "arch/context.h"
#include "art_field-inl.h"
@@ -84,6 +86,7 @@
#include "mirror/class_loader.h"
#include "mirror/object_array-alloc-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/stack_frame_info.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
#include "monitor_objects_stack_visitor.h"
@@ -166,7 +169,7 @@ void InitEntryPoints(JniEntryPoints* jpoints,
void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active);
void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tls32_.is_gc_marking = is_marking;
UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active= */ is_marking);
}
@@ -272,6 +275,7 @@ void Thread::PushDeoptimizationContext(const JValue& return_value,
ObjPtr<mirror::Throwable> exception,
bool from_code,
DeoptimizationMethodType method_type) {
+ DCHECK(exception != Thread::GetDeoptimizationException());
DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
return_value,
is_reference,
@@ -1390,10 +1394,15 @@ void Thread::ShortDump(std::ostream& os) const {
tls32_.num_name_readers.fetch_sub(1 /* at least memory_order_release */);
}
-void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map,
- bool force_dump_stack) const {
+void Thread::Dump(std::ostream& os, bool dump_native_stack, bool force_dump_stack) const {
+ DumpState(os);
+ DumpStack(os, dump_native_stack, force_dump_stack);
+}
+
+void Thread::Dump(std::ostream& os, unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack, bool force_dump_stack) const {
DumpState(os);
- DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack);
+ DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
}
ObjPtr<mirror::String> Thread::GetThreadName() const {
@@ -1473,7 +1482,7 @@ bool Thread::ModifySuspendCountInternal(Thread* self,
return false;
}
- if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
+ if (gUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) {
// Force retry of a suspend request if it's in the middle of a thread flip to avoid a
// deadlock. b/31683379.
return false;
@@ -1980,6 +1989,9 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
if (thread->IsStillStarting()) {
os << " (still starting up)";
}
+ if (thread->tls32_.disable_thread_flip_count != 0) {
+ os << " DisableFlipCount = " << thread->tls32_.disable_thread_flip_count;
+ }
os << "\n";
} else {
os << '"' << ::art::GetThreadName(tid) << '"'
@@ -2276,7 +2288,14 @@ void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_loc
void Thread::DumpStack(std::ostream& os,
bool dump_native_stack,
- BacktraceMap* backtrace_map,
+ bool force_dump_stack) const {
+ unwindstack::AndroidLocalUnwinder unwinder;
+ DumpStack(os, unwinder, dump_native_stack, force_dump_stack);
+}
+
+void Thread::DumpStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack,
bool force_dump_stack) const {
// TODO: we call this code when dying but may not have suspended the thread ourself. The
// IsSuspended check is therefore racy with the use for dumping (normally we inhibit
@@ -2295,7 +2314,7 @@ void Thread::DumpStack(std::ostream& os,
GetCurrentMethod(nullptr,
/*check_suspended=*/ !force_dump_stack,
/*abort_on_error=*/ !(dump_for_abort || force_dump_stack));
- DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
+ DumpNativeStack(os, unwinder, GetTid(), " native: ", method);
}
DumpJavaStack(os,
/*check_suspended=*/ !force_dump_stack,
@@ -2559,7 +2578,7 @@ void Thread::Destroy() {
}
// Mark-stack revocation must be performed at the very end. No
// checkpoint/flip-function or read-barrier should be called after this.
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
}
}
@@ -3138,6 +3157,148 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
return result;
}
+[[nodiscard]] static ObjPtr<mirror::StackFrameInfo> InitStackFrameInfo(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ ClassLinker* class_linker,
+ Handle<mirror::StackFrameInfo> stackFrameInfo,
+ ArtMethod* method,
+ uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
+ StackHandleScope<4> hs(soa.Self());
+ int32_t line_number;
+ auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
+ if (method->IsProxyMethod()) {
+ line_number = -1;
+ // source_name_object intentionally left null for proxy methods
+ } else {
+ line_number = method->GetLineNumFromDexPC(dex_pc);
+ if (line_number == -1) {
+ // Make the line_number field of StackFrameInfo hold the dex pc.
+ // source_name_object is intentionally left null if we failed to map the dex pc to
+ // a line number (most probably because there is no debug info). See b/30183883.
+ line_number = static_cast<int32_t>(dex_pc);
+ } else {
+ const char* source_file = method->GetDeclaringClassSourceFile();
+ if (source_file != nullptr) {
+ source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
+ if (source_name_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ }
+ }
+ }
+
+ Handle<mirror::Class> declaring_class_object(
+ hs.NewHandle<mirror::Class>(method->GetDeclaringClass()));
+
+ ArtMethod* interface_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+ const char* method_name = interface_method->GetName();
+ CHECK(method_name != nullptr);
+ Handle<mirror::String> method_name_object(
+ hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
+ if (method_name_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ dex::ProtoIndex proto_idx =
+ method->GetDexFile()->GetIndexForProtoId(interface_method->GetPrototype());
+ Handle<mirror::MethodType> method_type_object(hs.NewHandle<mirror::MethodType>(
+ class_linker->ResolveMethodType(soa.Self(), proto_idx, interface_method)));
+ if (method_type_object == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ stackFrameInfo->AssignFields(declaring_class_object,
+ method_type_object,
+ method_name_object,
+ source_name_object,
+ line_number,
+ static_cast<int32_t>(dex_pc));
+ return stackFrameInfo.Get();
+}
+
+constexpr jlong FILL_CLASS_REFS_ONLY = 0x2; // StackStreamFactory.FILL_CLASS_REFS_ONLY
+
+jint Thread::InternalStackTraceToStackFrameInfoArray(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ jlong mode, // See java.lang.StackStreamFactory for the mode flags
+ jobject internal,
+ jint startLevel,
+ jint batchSize,
+ jint startBufferIndex,
+ jobjectArray output_array) {
+ // Decode the internal stack trace into the depth, method trace and PC trace.
+ // Subtract one for the methods and PC trace.
+ int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1;
+ DCHECK_GE(depth, 0);
+
+ StackHandleScope<6> hs(soa.Self());
+ Handle<mirror::ObjectArray<mirror::Object>> framesOrClasses =
+ hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Object>>(output_array));
+
+ jint endBufferIndex = startBufferIndex;
+
+ if (startLevel < 0 || startLevel >= depth) {
+ return endBufferIndex;
+ }
+
+ int32_t bufferSize = framesOrClasses->GetLength();
+ if (startBufferIndex < 0 || startBufferIndex >= bufferSize) {
+ return endBufferIndex;
+ }
+
+ // The FILL_CLASS_REFS_ONLY flag is defined in AbstractStackWalker.fetchStackFrames() javadoc.
+ bool isClassArray = (mode & FILL_CLASS_REFS_ONLY) != 0;
+
+ Handle<mirror::ObjectArray<mirror::Object>> decoded_traces =
+ hs.NewHandle(soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>());
+ // Methods and dex PC trace is element 0.
+ DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
+ Handle<mirror::PointerArray> method_trace =
+ hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(decoded_traces->Get(0)));
+
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ Handle<mirror::Class> sfi_class =
+ hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/StackFrameInfo;"));
+ DCHECK(sfi_class != nullptr);
+
+ MutableHandle<mirror::StackFrameInfo> frame = hs.NewHandle<mirror::StackFrameInfo>(nullptr);
+ MutableHandle<mirror::Class> clazz = hs.NewHandle<mirror::Class>(nullptr);
+ for (uint32_t i = static_cast<uint32_t>(startLevel); i < static_cast<uint32_t>(depth); ++i) {
+ if (endBufferIndex >= startBufferIndex + batchSize || endBufferIndex >= bufferSize) {
+ break;
+ }
+
+ ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize);
+ if (isClassArray) {
+ clazz.Assign(method->GetDeclaringClass());
+ framesOrClasses->Set(endBufferIndex, clazz.Get());
+ } else {
+ // Prepare parameters for fields in StackFrameInfo
+ uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
+ i + static_cast<uint32_t>(method_trace->GetLength()) / 2, kRuntimePointerSize);
+
+ ObjPtr<mirror::Object> frameObject = framesOrClasses->Get(endBufferIndex);
+ // If libcore didn't allocate the object, we just stop here, but it's unlikely.
+ if (frameObject == nullptr || !frameObject->InstanceOf(sfi_class.Get())) {
+ break;
+ }
+ frame.Assign(ObjPtr<mirror::StackFrameInfo>::DownCast(frameObject));
+ frame.Assign(InitStackFrameInfo(soa, class_linker, frame, method, dex_pc));
+ // Break if InitStackFrameInfo fails to allocate objects or assign the fields.
+ if (frame == nullptr) {
+ break;
+ }
+ }
+
+ ++endBufferIndex;
+ }
+
+ return endBufferIndex;
+}
+
jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
// This code allocates. Do not allow it to operate with a pending exception.
if (IsExceptionPending()) {
@@ -3574,6 +3735,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
QUICK_ENTRY_POINT_INFO(pAputObject)
QUICK_ENTRY_POINT_INFO(pJniMethodStart)
QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
+ QUICK_ENTRY_POINT_INFO(pJniMethodEntryHook)
QUICK_ENTRY_POINT_INFO(pJniDecodeReferenceResult)
QUICK_ENTRY_POINT_INFO(pJniLockObject)
QUICK_ENTRY_POINT_INFO(pJniUnlockObject)
@@ -3691,11 +3853,14 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
os << offset;
}
-void Thread::QuickDeliverException() {
+void Thread::QuickDeliverException(bool is_method_exit_exception) {
// Get exception from thread.
ObjPtr<mirror::Throwable> exception = GetException();
CHECK(exception != nullptr);
if (exception == GetDeoptimizationException()) {
+ // This wasn't a real exception, so just clear it here. If there was an actual exception it
+ // will be recorded in the DeoptimizationContext and it will be restored later.
+ ClearException();
artDeoptimize(this);
UNREACHABLE();
}
@@ -3750,7 +3915,7 @@ void Thread::QuickDeliverException() {
if (Dbg::IsForcedInterpreterNeededForException(this) || force_deopt || IsForceInterpreter()) {
NthCallerVisitor visitor(this, 0, false);
visitor.WalkStack();
- if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) {
+ if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.GetOuterMethod(), visitor.caller_pc)) {
// method_type shouldn't matter due to exception handling.
const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault;
// Save the exception into the deoptimization context so it can be restored
@@ -3781,7 +3946,7 @@ void Thread::QuickDeliverException() {
// resolution.
ClearException();
QuickExceptionHandler exception_handler(this, false);
- exception_handler.FindCatch(exception);
+ exception_handler.FindCatch(exception, is_method_exit_exception);
if (exception_handler.GetClearException()) {
// Exception was cleared as part of delivery.
DCHECK(!IsExceptionPending());
@@ -3851,7 +4016,11 @@ class ReferenceMapVisitor : public StackVisitor {
// We are visiting the references in compiled frames, so we do not need
// to know the inlined frames.
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
- visitor_(visitor) {}
+ visitor_(visitor) {
+ gc::Heap* const heap = Runtime::Current()->GetHeap();
+ visit_declaring_class_ = heap->CurrentCollectorType() != gc::CollectorType::kCollectorTypeCMC
+ || !heap->MarkCompactCollector()->IsCompacting(Thread::Current());
+ }
bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
if (false) {
@@ -3896,6 +4065,9 @@ class ReferenceMapVisitor : public StackVisitor {
void VisitDeclaringClass(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS {
+ if (!visit_declaring_class_) {
+ return;
+ }
ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
if (klass != nullptr) {
@@ -3978,7 +4150,7 @@ class ReferenceMapVisitor : public StackVisitor {
// (PC shall be known thanks to the runtime frame for throwing SIOOBE).
// Note that JIT does not emit that intrinic implementation.
const void* pc = reinterpret_cast<const void*>(GetCurrentQuickFramePc());
- if (pc != 0u && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
+ if (pc != nullptr && Runtime::Current()->GetHeap()->IsInBootImageOatFile(pc)) {
return;
}
}
@@ -4189,6 +4361,7 @@ class ReferenceMapVisitor : public StackVisitor {
// Visitor for when we visit a root.
RootVisitor& visitor_;
+ bool visit_declaring_class_;
};
class RootCallbackVisitor {
@@ -4425,6 +4598,15 @@ bool Thread::HasTlab() const {
return has_tlab;
}
+void Thread::AdjustTlab(size_t slide_bytes) {
+ if (HasTlab()) {
+ tlsPtr_.thread_local_start -= slide_bytes;
+ tlsPtr_.thread_local_pos -= slide_bytes;
+ tlsPtr_.thread_local_end -= slide_bytes;
+ tlsPtr_.thread_local_limit -= slide_bytes;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const Thread& thread) {
thread.ShortDump(os);
return os;
@@ -4534,7 +4716,7 @@ bool Thread::IsAotCompiler() {
mirror::Object* Thread::GetPeerFromOtherThread() const {
DCHECK(tlsPtr_.jpeer == nullptr);
mirror::Object* peer = tlsPtr_.opeer;
- if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ if (gUseReadBarrier && Current()->GetIsGcMarking()) {
// We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
// may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
// mark/forward it here.
@@ -4606,7 +4788,9 @@ void ScopedExceptionStorage::SuppressOldException(const char* message) {
CHECK(self_->IsExceptionPending()) << *self_;
ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
excp_.Assign(self_->GetException());
- LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ if (old_suppressed != nullptr) {
+ LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+ }
self_->ClearException();
}
diff --git a/runtime/thread.h b/runtime/thread.h
index dd8b061b95..6b1c16c907 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -48,7 +48,9 @@
#include "runtime_stats.h"
#include "thread_state.h"
-class BacktraceMap;
+namespace unwindstack {
+class AndroidLocalUnwinder;
+} // namespace unwindstack
namespace art {
@@ -188,7 +190,7 @@ enum class WeakRefAccessState : int32_t {
// This should match RosAlloc::kNumThreadLocalSizeBrackets.
static constexpr size_t kNumRosAllocThreadLocalSizeBracketsInThread = 16;
-static constexpr size_t kSharedMethodHotnessThreshold = 0xffff;
+static constexpr size_t kSharedMethodHotnessThreshold = 0x1fff;
// Thread's stack layout for implicit stack overflow checks:
//
@@ -270,7 +272,11 @@ class Thread {
// Dumps the detailed thread state and the thread stack (used for SIGQUIT).
void Dump(std::ostream& os,
bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr,
+ bool force_dump_stack = false) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack = true,
bool force_dump_stack = false) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -373,11 +379,11 @@ class Thread {
void WaitForFlipFunction(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
gc::accounting::AtomicStack<mirror::Object>* GetThreadLocalMarkStack() {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return tlsPtr_.thread_local_mark_stack;
}
void SetThreadLocalMarkStack(gc::accounting::AtomicStack<mirror::Object>* stack) {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
tlsPtr_.thread_local_mark_stack = stack;
}
@@ -546,8 +552,12 @@ class Thread {
// that needs to be dealt with, false otherwise.
bool ObserveAsyncException() REQUIRES_SHARED(Locks::mutator_lock_);
- // Find catch block and perform long jump to appropriate exception handle
- NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
+ // Find catch block and perform long jump to appropriate exception handle. When
+ // is_method_exit_exception is true, the exception was thrown by the method exit callback and we
+ // should not send method unwind for the method on top of the stack since method exit callback was
+ // already called.
+ NO_RETURN void QuickDeliverException(bool is_method_exit_exception = false)
+ REQUIRES_SHARED(Locks::mutator_lock_);
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
@@ -573,8 +583,8 @@ class Thread {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
- void SetTopOfStackTagged(ArtMethod** top_method) {
- tlsPtr_.managed_stack.SetTopQuickFrameTagged(top_method);
+ void SetTopOfStackGenericJniTagged(ArtMethod** top_method) {
+ tlsPtr_.managed_stack.SetTopQuickFrameGenericJniTagged(top_method);
}
void SetTopOfShadowStack(ShadowFrame* top) {
@@ -708,6 +718,16 @@ class Thread {
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ static jint InternalStackTraceToStackFrameInfoArray(
+ const ScopedObjectAccessAlreadyRunnable& soa,
+ jlong mode, // See java.lang.StackStreamFactory for the mode flags
+ jobject internal,
+ jint startLevel,
+ jint batchSize,
+ jint startIndex,
+ jobjectArray output_array) // java.lang.StackFrameInfo[]
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -739,6 +759,13 @@ class Thread {
}
template<PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> TidOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, tid));
+ }
+
+ template<PointerSize pointer_size>
static constexpr ThreadOffset<pointer_size> InterruptedOffset() {
return ThreadOffset<pointer_size>(
OFFSETOF_MEMBER(Thread, tls32_) +
@@ -766,6 +793,13 @@ class Thread {
OFFSETOF_MEMBER(tls_32bit_sized_values, is_gc_marking));
}
+ template <PointerSize pointer_size>
+ static constexpr ThreadOffset<pointer_size> DeoptCheckRequiredOffset() {
+ return ThreadOffset<pointer_size>(
+ OFFSETOF_MEMBER(Thread, tls32_) +
+ OFFSETOF_MEMBER(tls_32bit_sized_values, is_deopt_check_required));
+ }
+
static constexpr size_t IsGcMarkingSize() {
return sizeof(tls32_.is_gc_marking);
}
@@ -1011,33 +1045,34 @@ class Thread {
}
bool GetIsGcMarking() const {
- CHECK(kUseReadBarrier);
+ CHECK(gUseReadBarrier);
return tls32_.is_gc_marking;
}
void SetIsGcMarkingAndUpdateEntrypoints(bool is_marking);
+ bool IsDeoptCheckRequired() const { return tls32_.is_deopt_check_required; }
+
+ void SetDeoptCheckRequired(bool flag) { tls32_.is_deopt_check_required = flag; }
+
bool GetWeakRefAccessEnabled() const; // Only safe for current thread.
void SetWeakRefAccessEnabled(bool enabled) {
- CHECK(kUseReadBarrier);
+ DCHECK(gUseReadBarrier);
WeakRefAccessState new_state = enabled ?
WeakRefAccessState::kEnabled : WeakRefAccessState::kDisabled;
tls32_.weak_ref_access_enabled.store(new_state, std::memory_order_release);
}
uint32_t GetDisableThreadFlipCount() const {
- CHECK(kUseReadBarrier);
return tls32_.disable_thread_flip_count;
}
void IncrementDisableThreadFlipCount() {
- CHECK(kUseReadBarrier);
++tls32_.disable_thread_flip_count;
}
void DecrementDisableThreadFlipCount() {
- CHECK(kUseReadBarrier);
DCHECK_GT(tls32_.disable_thread_flip_count, 0U);
--tls32_.disable_thread_flip_count;
}
@@ -1206,6 +1241,10 @@ class Thread {
DCHECK_LE(tlsPtr_.thread_local_end, tlsPtr_.thread_local_limit);
}
+ // Called from Concurrent mark-compact GC to slide the TLAB pointers backwards
+ // to adjust to post-compact addresses.
+ void AdjustTlab(size_t slide_bytes);
+
// Doesn't check that there is room.
mirror::Object* AllocTlab(size_t bytes);
void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
@@ -1481,7 +1520,11 @@ class Thread {
void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
void DumpStack(std::ostream& os,
bool dump_native_stack = true,
- BacktraceMap* backtrace_map = nullptr,
+ bool force_dump_stack = false) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void DumpStack(std::ostream& os,
+ unwindstack::AndroidLocalUnwinder& unwinder,
+ bool dump_native_stack = true,
bool force_dump_stack = false) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1712,6 +1755,7 @@ class Thread {
thread_exit_check_count(0),
is_transitioning_to_runnable(false),
is_gc_marking(false),
+ is_deopt_check_required(false),
weak_ref_access_enabled(WeakRefAccessState::kVisiblyEnabled),
disable_thread_flip_count(0),
user_code_suspend_count(0),
@@ -1766,6 +1810,12 @@ class Thread {
// GC roots.
bool32_t is_gc_marking;
+ // True if we need to check for deoptimization when returning from the runtime functions. This
+ // is required only when a class is redefined to prevent executing code that has field offsets
+ // embedded. For non-debuggable apps redefinition is not allowed and this flag should always be
+ // set to false.
+ bool32_t is_deopt_check_required;
+
// Thread "interrupted" status; stays raised until queried or thrown.
Atomic<bool32_t> interrupted;
@@ -2186,13 +2236,13 @@ class ScopedTransitioningToRunnable : public ValueObject {
explicit ScopedTransitioningToRunnable(Thread* self)
: self_(self) {
DCHECK_EQ(self, Thread::Current());
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
self_->SetIsTransitioningToRunnable(true);
}
}
~ScopedTransitioningToRunnable() {
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
self_->SetIsTransitioningToRunnable(false);
}
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6482e72417..6b23c349ef 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -24,9 +24,9 @@
#include <vector>
#include "android-base/stringprintf.h"
-#include "backtrace/BacktraceMap.h"
#include "nativehelper/scoped_local_ref.h"
#include "nativehelper/scoped_utf_chars.h"
+#include "unwindstack/AndroidUnwinder.h"
#include "base/aborting.h"
#include "base/histogram-inl.h"
@@ -101,12 +101,11 @@ void ThreadList::ShutDown() {
Runtime::Current()->DetachCurrentThread();
}
WaitForOtherNonDaemonThreadsToExit();
- // Disable GC and wait for GC to complete in case there are still daemon threads doing
- // allocations.
+ // The only caller of this function, ~Runtime, has already disabled GC and
+ // ensured that the last GC is finished.
gc::Heap* const heap = Runtime::Current()->GetHeap();
- heap->DisableGCForShutdown();
- // In case a GC is in progress, wait for it to finish.
- heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
+ CHECK(heap->IsGCDisabledForShutdown());
+
// TODO: there's an unaddressed race here where a thread may attach during shutdown, see
// Thread::Init.
SuspendAllDaemonThreadsForShutdown();
@@ -124,10 +123,10 @@ pid_t ThreadList::GetLockOwner() {
void ThreadList::DumpNativeStacks(std::ostream& os) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
+ unwindstack::AndroidLocalUnwinder unwinder;
for (const auto& thread : list_) {
os << "DUMPING THREAD " << thread->GetTid() << "\n";
- DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
+ DumpNativeStack(os, unwinder, thread->GetTid(), "\t");
os << "\n";
}
}
@@ -153,7 +152,7 @@ static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_s
// refactor DumpState to avoid skipping analysis.
Thread::DumpState(os, nullptr, tid);
if (dump_native_stack) {
- DumpNativeStack(os, tid, nullptr, " native: ");
+ DumpNativeStack(os, tid, " native: ");
}
os << std::endl;
}
@@ -195,11 +194,8 @@ class DumpCheckpoint final : public Closure {
// Avoid verifying count in case a thread doesn't end up passing through the barrier.
// This avoids a SIGABRT that would otherwise happen in the destructor.
barrier_(0, /*verify_count_on_shutdown=*/false),
- backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
+ unwinder_(std::vector<std::string>{}, std::vector<std::string> {"oat", "odex"}),
dump_native_stack_(dump_native_stack) {
- if (backtrace_map_ != nullptr) {
- backtrace_map_->SetSuffixesToIgnore(std::vector<std::string> { "oat", "odex" });
- }
}
void Run(Thread* thread) override {
@@ -210,7 +206,7 @@ class DumpCheckpoint final : public Closure {
std::ostringstream local_os;
{
ScopedObjectAccess soa(self);
- thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
+ thread->Dump(local_os, unwinder_, dump_native_stack_);
}
{
// Use the logging lock to ensure serialization when writing to the common ostream.
@@ -237,7 +233,7 @@ class DumpCheckpoint final : public Closure {
// The barrier to be passed through and for the requestor to wait upon.
Barrier barrier_;
// A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
- std::unique_ptr<BacktraceMap> backtrace_map_;
+ unwindstack::AndroidLocalUnwinder unwinder_;
// Whether we should dump the native stack.
const bool dump_native_stack_;
};
@@ -486,7 +482,6 @@ void ThreadList::RunEmptyCheckpoint() {
// Assume it's stuck and safe to dump its stack.
thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
/*dump_native_stack=*/ true,
- /*backtrace_map=*/ nullptr,
/*force_dump_stack=*/ true);
}
}
@@ -1275,7 +1270,7 @@ void ThreadList::Register(Thread* self) {
}
CHECK(!Contains(self));
list_.push_back(self);
- if (kUseReadBarrier) {
+ if (gUseReadBarrier) {
gc::collector::ConcurrentCopying* const cc =
Runtime::Current()->GetHeap()->ConcurrentCopyingCollector();
// Initialize according to the state of the CC collector.
@@ -1291,6 +1286,10 @@ void ThreadList::Unregister(Thread* self) {
DCHECK_EQ(self, Thread::Current());
CHECK_NE(self->GetState(), ThreadState::kRunnable);
Locks::mutator_lock_->AssertNotHeld(self);
+ if (self->tls32_.disable_thread_flip_count != 0) {
+ LOG(FATAL) << "Incomplete PrimitiveArrayCritical section at exit: " << *self << "count = "
+ << self->tls32_.disable_thread_flip_count;
+ }
VLOG(threads) << "ThreadList::Unregister() " << *self;
@@ -1320,7 +1319,7 @@ void ThreadList::Unregister(Thread* self) {
std::string thread_name;
self->GetThreadName(thread_name);
std::ostringstream os;
- DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
+ DumpNativeStack(os, GetTid(), " native: ", nullptr);
LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
break;
} else {
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 57d7f61a0c..b361d16d95 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -246,6 +246,11 @@ void ThreadPool::StopWorkers(Thread* self) {
started_ = false;
}
+bool ThreadPool::HasStarted(Thread* self) {
+ MutexLock mu(self, task_queue_lock_);
+ return started_;
+}
+
Task* ThreadPool::GetTask(Thread* self) {
MutexLock mu(self, task_queue_lock_);
while (!IsShuttingDown()) {
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b9e5a97cb5..5c75733517 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -123,6 +123,9 @@ class ThreadPool {
// Do not allow workers to grab any new tasks.
void StopWorkers(Thread* self) REQUIRES(!task_queue_lock_);
+ // Returns if the thread pool has started.
+ bool HasStarted(Thread* self) REQUIRES(!task_queue_lock_);
+
// Add a new task, the first available started worker will process it. Does not delete the task
// after running it, it is the caller's responsibility.
void AddTask(Thread* self, Task* task) REQUIRES(!task_queue_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index ec61726ff2..6b4fb291b1 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -757,7 +757,6 @@ void Trace::MethodExited(Thread* thread,
}
void Trace::MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
ArtMethod* method,
uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
diff --git a/runtime/trace.h b/runtime/trace.h
index c6f36e4ab1..ab2fe8f9e5 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -184,7 +184,6 @@ class Trace final : public instrumentation::InstrumentationListener {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
override;
void MethodUnwind(Thread* thread,
- Handle<mirror::Object> this_object,
ArtMethod* method,
uint32_t dex_pc)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 006aa561ee..08452bd102 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -410,7 +410,6 @@ void Transaction::VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack)
for (auto& it : array_logs_) {
mirror::Array* old_root = it.first;
- CHECK(!old_root->IsObjectArray());
mirror::Array* new_root = old_root;
visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
if (new_root != old_root) {
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 6b53687c0c..7b5a4960d1 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -20,7 +20,6 @@
#include "register_line.h"
#include "base/logging.h" // For VLOG.
-#include "debug_print.h"
#include "method_verifier.h"
#include "reg_type_cache-inl.h"
@@ -139,14 +138,6 @@ inline bool RegisterLine::VerifyRegisterType(MethodVerifier* verifier, uint32_t
}
verifier->Fail(fail_type) << "register v" << vsrc << " has type "
<< src_type << " but expected " << check_type;
- if (check_type.IsNonZeroReferenceTypes() &&
- !check_type.IsUnresolvedTypes() &&
- check_type.HasClass() &&
- src_type.IsNonZeroReferenceTypes() &&
- !src_type.IsUnresolvedTypes() &&
- src_type.HasClass()) {
- DumpB77342775DebugData(check_type.GetClass(), src_type.GetClass());
- }
return false;
}
if (check_type.IsLowHalf()) {