summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp1
-rw-r--r--adbconnection/adbconnection.cc9
-rw-r--r--build/Android.common_test.mk4
-rw-r--r--build/Android.gtest.mk13
-rw-r--r--build/art.go2
-rw-r--r--cmdline/cmdline_parser_test.cc2
-rw-r--r--cmdline/cmdline_types.h6
-rw-r--r--compiler/Android.bp6
-rw-r--r--compiler/compiled_method.cc6
-rw-r--r--compiler/compiled_method.h6
-rw-r--r--compiler/compiler.cc2
-rw-r--r--compiler/debug/debug_info.h46
-rw-r--r--compiler/debug/elf_debug_info_writer.h4
-rw-r--r--compiler/debug/elf_debug_line_writer.h2
-rw-r--r--compiler/debug/elf_debug_loc_writer.h2
-rw-r--r--compiler/debug/elf_debug_writer.cc68
-rw-r--r--compiler/debug/elf_debug_writer.h9
-rw-r--r--compiler/debug/elf_gnu_debugdata_writer.h13
-rw-r--r--compiler/debug/elf_symtab_writer.h74
-rw-r--r--compiler/debug/method_debug_info.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc464
-rw-r--r--compiler/dex/dex_to_dex_compiler.h159
-rw-r--r--compiler/driver/compiled_method_storage.cc11
-rw-r--r--compiler/driver/compiler_driver.cc139
-rw-r--r--compiler/driver/compiler_driver.h34
-rw-r--r--compiler/driver/dex_compilation_unit.cc3
-rw-r--r--compiler/exception_test.cc2
-rw-r--r--compiler/jit/jit_compiler.cc1
-rw-r--r--compiler/linker/arm/relative_patcher_arm_base.cc4
-rw-r--r--compiler/linker/elf_builder.h74
-rw-r--r--compiler/optimizing/bounds_check_elimination.cc45
-rw-r--r--compiler/optimizing/builder.cc3
-rw-r--r--compiler/optimizing/builder.h5
-rw-r--r--compiler/optimizing/code_generator.cc2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc16
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc100
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.h1
-rw-r--r--compiler/optimizing/code_generator_mips.cc16
-rw-r--r--compiler/optimizing/code_generator_mips64.cc16
-rw-r--r--compiler/optimizing/code_generator_vector_x86.cc10
-rw-r--r--compiler/optimizing/code_generator_x86.cc14
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc14
-rw-r--r--compiler/optimizing/code_sinking.cc4
-rw-r--r--compiler/optimizing/codegen_test.cc87
-rw-r--r--compiler/optimizing/constant_folding_test.cc20
-rw-r--r--compiler/optimizing/dead_code_elimination.cc72
-rw-r--r--compiler/optimizing/dead_code_elimination.h1
-rw-r--r--compiler/optimizing/dead_code_elimination_test.cc8
-rw-r--r--compiler/optimizing/dominator_test.cc34
-rw-r--r--compiler/optimizing/find_loops_test.cc28
-rw-r--r--compiler/optimizing/graph_checker.cc10
-rw-r--r--compiler/optimizing/graph_checker_test.cc14
-rw-r--r--compiler/optimizing/inliner.cc57
-rw-r--r--compiler/optimizing/instruction_builder.cc2
-rw-r--r--compiler/optimizing/instruction_builder.h3
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc8
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc8
-rw-r--r--compiler/optimizing/intrinsics_mips.cc9
-rw-r--r--compiler/optimizing/intrinsics_mips64.cc9
-rw-r--r--compiler/optimizing/intrinsics_x86.cc8
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc8
-rw-r--r--compiler/optimizing/linearize_test.cc19
-rw-r--r--compiler/optimizing/live_ranges_test.cc16
-rw-r--r--compiler/optimizing/liveness_test.cc28
-rw-r--r--compiler/optimizing/load_store_elimination.cc378
-rw-r--r--compiler/optimizing/nodes.h13
-rw-r--r--compiler/optimizing/optimization.cc2
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc50
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h1
-rw-r--r--compiler/optimizing/optimizing_unit_test.h60
-rw-r--r--compiler/optimizing/pretty_printer_test.cc28
-rw-r--r--compiler/optimizing/register_allocator_test.cc18
-rw-r--r--compiler/optimizing/scheduler_arm.cc6
-rw-r--r--compiler/optimizing/scheduler_test.cc6
-rw-r--r--compiler/optimizing/ssa_test.cc30
-rw-r--r--compiler/optimizing/suspend_check_test.cc16
-rw-r--r--compiler/utils/arm/assembler_arm_shared.h7
-rw-r--r--compiler/utils/arm/assembler_arm_test.h555
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc6
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.h4
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc4
-rw-r--r--compiler/utils/assembler_thumb_test_expected.cc.inc2
-rw-r--r--compiler/utils/test_dex_file_builder.h4
-rw-r--r--dex2oat/Android.bp11
-rw-r--r--dex2oat/dex2oat.cc19
-rw-r--r--dex2oat/dex2oat_image_test.cc14
-rw-r--r--dex2oat/dex2oat_options.cc3
-rw-r--r--dex2oat/dex2oat_test.cc57
-rw-r--r--dex2oat/linker/elf_writer.h13
-rw-r--r--dex2oat/linker/elf_writer_quick.cc59
-rw-r--r--dex2oat/linker/image_test.h5
-rw-r--r--dex2oat/linker/oat_writer.cc348
-rw-r--r--dex2oat/linker/oat_writer.h11
-rw-r--r--dex2oat/linker/oat_writer_test.cc13
-rw-r--r--dexdump/dexdump.cc13
-rw-r--r--dexdump/dexdump_cfg.cc2
-rw-r--r--dexlayout/Android.bp6
-rw-r--r--dexlayout/compact_dex_writer.cc307
-rw-r--r--dexlayout/compact_dex_writer.h65
-rw-r--r--dexlayout/dex_ir.cc83
-rw-r--r--dexlayout/dex_ir.h44
-rw-r--r--dexlayout/dex_writer.cc165
-rw-r--r--dexlayout/dex_writer.h26
-rw-r--r--dexlayout/dexlayout.cc36
-rw-r--r--dexlayout/dexlayout.h2
-rw-r--r--dexlayout/dexlayout_main.cc12
-rw-r--r--dexlayout/dexlayout_test.cc29
-rw-r--r--dexlist/dexlist.cc6
-rw-r--r--oatdump/oatdump.cc20
-rw-r--r--openjdkjvmti/Android.bp2
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc179
-rw-r--r--openjdkjvmti/art_jvmti.h68
-rw-r--r--openjdkjvmti/deopt_manager.cc31
-rw-r--r--openjdkjvmti/deopt_manager.h7
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc36
-rw-r--r--openjdkjvmti/ti_class.cc14
-rw-r--r--openjdkjvmti/ti_redefine.cc14
-rw-r--r--openjdkjvmti/ti_search.cc4
-rw-r--r--patchoat/Android.bp2
-rw-r--r--patchoat/patchoat.cc289
-rw-r--r--patchoat/patchoat.h15
-rw-r--r--patchoat/patchoat_test.cc278
-rw-r--r--profman/profman.cc28
-rw-r--r--runtime/Android.bp73
-rw-r--r--runtime/arch/arm/entrypoints_init_arm.cc1
-rw-r--r--runtime/arch/arm/quick_entrypoints_arm.S23
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc1
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S5
-rw-r--r--runtime/arch/mips/entrypoints_init_mips.cc2
-rw-r--r--runtime/arch/mips64/entrypoints_init_mips64.cc1
-rw-r--r--runtime/arch/x86/entrypoints_init_x86.cc1
-rw-r--r--runtime/arch/x86/quick_entrypoints_x86.S7
-rw-r--r--runtime/arch/x86_64/entrypoints_init_x86_64.cc1
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S10
-rw-r--r--runtime/art_method-inl.h2
-rw-r--r--runtime/art_method.cc8
-rw-r--r--runtime/art_method.h7
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/bit_string.h38
-rw-r--r--runtime/base/bit_string_test.cc26
-rw-r--r--runtime/base/bit_utils.h16
-rw-r--r--runtime/base/mutex.h1
-rw-r--r--runtime/class_linker.cc17
-rw-r--r--runtime/class_loader_context.cc14
-rw-r--r--runtime/class_loader_context_test.cc38
-rw-r--r--runtime/common_runtime_test.cc15
-rw-r--r--runtime/common_runtime_test.h6
-rw-r--r--runtime/dex/art_dex_file_loader.cc472
-rw-r--r--runtime/dex/art_dex_file_loader.h127
-rw-r--r--runtime/dex/code_item_accessors-inl.h16
-rw-r--r--runtime/dex/code_item_accessors-no_art-inl.h94
-rw-r--r--runtime/dex/code_item_accessors.h27
-rw-r--r--runtime/dex/code_item_accessors_test.cc39
-rw-r--r--runtime/dex/compact_dex_debug_info.cc117
-rw-r--r--runtime/dex/compact_dex_debug_info.h65
-rw-r--r--runtime/dex/compact_dex_debug_info_test.cc95
-rw-r--r--runtime/dex/compact_dex_file.cc47
-rw-r--r--runtime/dex/compact_dex_file.h187
-rw-r--r--runtime/dex/compact_dex_file_test.cc61
-rw-r--r--runtime/dex/compact_dex_utils.h37
-rw-r--r--runtime/dex/dex_file-inl.h2
-rw-r--r--runtime/dex/dex_file.h59
-rw-r--r--runtime/dex/dex_file_loader.cc472
-rw-r--r--runtime/dex/dex_file_loader.h125
-rw-r--r--runtime/dex/dex_file_test.cc46
-rw-r--r--runtime/dex/dex_file_tracking_registrar.cc4
-rw-r--r--runtime/dex/dex_file_verifier.cc19
-rw-r--r--runtime/dex/dex_file_verifier_test.cc4
-rw-r--r--runtime/dex/standard_dex_file.cc33
-rw-r--r--runtime/dex/standard_dex_file.h25
-rw-r--r--runtime/dex2oat_environment_test.h28
-rw-r--r--runtime/dex_to_dex_decompiler.cc14
-rw-r--r--runtime/dexopt_test.cc4
-rw-r--r--runtime/elf.h5
-rw-r--r--runtime/entrypoints/quick/quick_entrypoints_list.h1
-rw-r--r--runtime/entrypoints_order_test.cc3
-rw-r--r--runtime/fault_handler.cc14
-rw-r--r--runtime/gc/collector/concurrent_copying.cc16
-rw-r--r--runtime/gc/collector/concurrent_copying.h5
-rw-r--r--runtime/gc/collector/semi_space.cc1
-rw-r--r--runtime/gc/gc_cause.cc1
-rw-r--r--runtime/gc/gc_cause.h3
-rw-r--r--runtime/gc/heap.cc73
-rw-r--r--runtime/gc/heap.h29
-rw-r--r--runtime/gc/space/image_space.cc4
-rw-r--r--runtime/gc/space/region_space-inl.h54
-rw-r--r--runtime/gc/space/region_space.cc53
-rw-r--r--runtime/gc/space/region_space.h20
-rw-r--r--runtime/globals.h6
-rw-r--r--runtime/hidden_api_access_flags.h152
-rw-r--r--runtime/image.cc2
-rw-r--r--runtime/interpreter/interpreter_intrinsics.cc1
-rw-r--r--runtime/intrinsics_list.h1
-rw-r--r--runtime/jit/debugger_interface.cc65
-rw-r--r--runtime/jit/debugger_interface.h35
-rw-r--r--runtime/jit/jit_code_cache.cc6
-rw-r--r--runtime/leb128.h2
-rw-r--r--runtime/mem_map.cc3
-rw-r--r--runtime/mirror/class.h4
-rw-r--r--runtime/modifiers.h17
-rw-r--r--runtime/native/dalvik_system_DexFile.cc14
-rw-r--r--runtime/native/dalvik_system_VMStack.cc10
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc9
-rw-r--r--runtime/native_stack_dump.cc22
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_file.cc101
-rw-r--r--runtime/oat_file.h17
-rw-r--r--runtime/oat_file_assistant.cc10
-rw-r--r--runtime/oat_file_manager.cc25
-rw-r--r--runtime/oat_file_test.cc28
-rw-r--r--runtime/obj_ptr.h2
-rw-r--r--runtime/parsed_options.cc5
-rw-r--r--runtime/quicken_info.h96
-rw-r--r--runtime/runtime.cc57
-rw-r--r--runtime/runtime.h18
-rw-r--r--runtime/runtime_options.def2
-rw-r--r--runtime/subtype_check_bits.h30
-rw-r--r--runtime/subtype_check_bits_and_status.h38
-rw-r--r--runtime/subtype_check_info.h15
-rw-r--r--runtime/subtype_check_info_test.cc56
-rw-r--r--runtime/thread.cc193
-rw-r--r--runtime/thread.h3
-rw-r--r--runtime/ti/agent.cc7
-rw-r--r--runtime/utils.cc5
-rw-r--r--runtime/utils.h14
-rw-r--r--runtime/vdex_file.cc228
-rw-r--r--runtime/vdex_file.h104
-rw-r--r--runtime/verifier/method_verifier.cc13
-rw-r--r--runtime/verifier/method_verifier.h4
-rw-r--r--test/004-NativeAllocations/src-art/Main.java147
-rwxr-xr-xtest/004-ThreadStress/check7
-rwxr-xr-xtest/071-dexfile-get-static-size/build12
-rw-r--r--test/071-dexfile-get-static-size/res/test1.dex (renamed from test/071-dexfile-get-static-size/test1.dex)bin1864 -> 1864 bytes
-rw-r--r--test/071-dexfile-get-static-size/res/test2.dex (renamed from test/071-dexfile-get-static-size/test2.dex)bin1264 -> 1264 bytes
-rw-r--r--test/071-dexfile-get-static-size/src/Main.java20
-rw-r--r--test/137-cfi/cfi.cc29
-rw-r--r--test/168-vmstack-annotated/expected.txt0
-rw-r--r--test/168-vmstack-annotated/info.txt1
-rw-r--r--test/168-vmstack-annotated/run18
-rw-r--r--test/168-vmstack-annotated/src/Main.java225
-rw-r--r--test/305-other-fault-handler/expected.txt2
-rw-r--r--test/305-other-fault-handler/fault_handler.cc102
-rw-r--r--test/305-other-fault-handler/info.txt3
-rw-r--r--test/305-other-fault-handler/src/Main.java25
-rw-r--r--test/449-checker-bce/src/Main.java88
-rw-r--r--test/530-checker-lse/src/Main.java175
-rw-r--r--test/608-checker-unresolved-lse/src/Main.java1
-rw-r--r--test/623-checker-loop-regressions/expected.txt1
-rw-r--r--test/623-checker-loop-regressions/src/Main.java7
-rw-r--r--test/639-checker-code-sinking/expected.txt2
-rw-r--r--test/639-checker-code-sinking/src/Main.java3
-rw-r--r--test/672-checker-throw-method/expected.txt1
-rw-r--r--test/672-checker-throw-method/info.txt1
-rw-r--r--test/672-checker-throw-method/src/Main.java244
-rw-r--r--test/673-checker-throw-vmethod/expected.txt1
-rw-r--r--test/673-checker-throw-vmethod/info.txt1
-rw-r--r--test/673-checker-throw-vmethod/src/Main.java219
-rw-r--r--test/983-source-transform-verify/source_transform.cc20
-rw-r--r--test/Android.bp1
-rw-r--r--test/HiddenApi/Main.java26
-rw-r--r--test/README.md2
-rwxr-xr-xtest/etc/run-test-jar14
-rw-r--r--test/knownfailures.json19
-rw-r--r--test/testrunner/target_config.py8
-rw-r--r--tools/hiddenapi/Android.bp64
-rw-r--r--tools/hiddenapi/README.md54
-rw-r--r--tools/hiddenapi/hiddenapi.cc408
-rw-r--r--tools/hiddenapi/hiddenapi_test.cc601
-rwxr-xr-xtools/jfuzz/run_dex_fuzz_test.py4
-rwxr-xr-xtools/jfuzz/run_jfuzz_test.py2
-rwxr-xr-xtools/run-jdwp-tests.sh12
-rwxr-xr-xtools/run-libcore-tests.sh14
273 files changed, 8754 insertions, 3463 deletions
diff --git a/Android.bp b/Android.bp
index 197860694b..caf4f9a325 100644
--- a/Android.bp
+++ b/Android.bp
@@ -47,6 +47,7 @@ subdirs = [
"tools/breakpoint-logger",
"tools/cpp-define-generator",
"tools/dmtracedump",
+ "tools/hiddenapi",
"tools/titrace",
"tools/wrapagentproperties",
]
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index a5c885a933..80cfc83d3c 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -63,9 +63,7 @@ static constexpr int kControlSockSendTimeout = 10;
static AdbConnectionState* gState;
static bool IsDebuggingPossible() {
- // TODO We need to do this on IsJdwpAllowed not IsDebuggable in order to support userdebug
- // workloads. For now we will only allow it when we are debuggable so that testing is easier.
- return art::Runtime::Current()->IsJavaDebuggable() && art::Dbg::IsJdwpAllowed();
+ return art::Dbg::IsJdwpAllowed();
}
// Begin running the debugger.
@@ -581,11 +579,14 @@ void AdbConnectionState::RunPollLoop(art::Thread* self) {
DCHECK(!agent_has_socket_);
if (!agent_loaded_) {
DCHECK(!agent_listening_);
+ // TODO Should we check in some other way if we are userdebug/eng?
+ CHECK(art::Dbg::IsJdwpAllowed());
// Load the agent now!
self->AssertNoPendingException();
art::Runtime::Current()->AttachAgent(/* JNIEnv* */ nullptr,
MakeAgentArg(),
- /* classloader */ nullptr);
+ /* classloader */ nullptr,
+ /*allow_non_debuggable_tooling*/ true);
if (self->IsExceptionPending()) {
LOG(ERROR) << "Failed to load agent " << agent_name_;
art::ScopedObjectAccess soa(self);
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 3d1f4343f1..7fae7f6200 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -23,10 +23,10 @@ include art/build/Android.common_path.mk
ifneq ($(TMPDIR),)
ART_HOST_TEST_DIR := $(TMPDIR)/test-art-$(shell echo $$PPID)
else
-# Use a BSD checksum calculated from PPID and USER as one of the path
+# Use a BSD checksum calculated from CWD and USER as one of the path
# components for the test output. This should allow us to run tests from
# multiple repositories at the same time.
-ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$PPID-${USER} | sum | cut -d ' ' -f1)
+ART_HOST_TEST_DIR := /tmp/test-art-$(shell echo $$CWD-${USER} | sum | cut -d ' ' -f1)
endif
# List of known broken tests that we won't attempt to execute. The test name must be the full
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1f36cb4e46..4f5df03c19 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -36,6 +36,7 @@ GTEST_DEX_DIRECTORIES := \
ForClassLoaderD \
ExceptionHandle \
GetMethodSignature \
+ HiddenApi \
ImageLayoutA \
ImageLayoutB \
IMTA \
@@ -113,6 +114,7 @@ ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps
ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
+ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi
ART_GTEST_image_test_DEX_DEPS := ImageLayoutA ImageLayoutB DefaultMethods
ART_GTEST_imtable_test_DEX_DEPS := IMTA IMTB
ART_GTEST_instrumentation_test_DEX_DEPS := Instrumentation
@@ -155,6 +157,11 @@ ART_GTEST_dex2oat_environment_tests_TARGET_DEPS := \
$(TARGET_CORE_IMAGE_interpreter_32) \
patchoatd-target
+ART_GTEST_oat_file_test_HOST_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
+ART_GTEST_oat_file_test_TARGET_DEPS := \
+ $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS)
+
ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
$(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
ART_GTEST_oat_file_assistant_test_TARGET_DEPS := \
@@ -261,6 +268,11 @@ ART_GTEST_patchoat_test_TARGET_DEPS := \
ART_GTEST_profile_assistant_test_HOST_DEPS := profmand-host
ART_GTEST_profile_assistant_test_TARGET_DEPS := profmand-target
+ART_GTEST_hiddenapi_test_HOST_DEPS := \
+ $(HOST_CORE_IMAGE_DEFAULT_64) \
+ $(HOST_CORE_IMAGE_DEFAULT_32) \
+ hiddenapid-host
+
# The path for which all the source files are relative, not actually the current directory.
LOCAL_PATH := art
@@ -274,6 +286,7 @@ ART_TEST_MODULES := \
art_dexlayout_tests \
art_dexlist_tests \
art_dexoptanalyzer_tests \
+ art_hiddenapi_tests \
art_imgdiag_tests \
art_oatdump_tests \
art_patchoat_tests \
diff --git a/build/art.go b/build/art.go
index bf6eee6c41..59480a0d0f 100644
--- a/build/art.go
+++ b/build/art.go
@@ -66,7 +66,7 @@ func globalFlags(ctx android.BaseContext) ([]string, []string) {
"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
}
- cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "none")
+ cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
// We need larger stack overflow guards for ASAN, as the compiled code will have
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 5d672061df..70cc07eff0 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -375,7 +375,7 @@ TEST_F(CmdlineParserTest, TestJdwpProviderEmpty) {
TEST_F(CmdlineParserTest, TestJdwpProviderDefault) {
const char* opt_args = "-XjdwpProvider:default";
- EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kInternal, opt_args, M::JdwpProvider);
+ EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kAdbConnection, opt_args, M::JdwpProvider);
} // TEST_F
TEST_F(CmdlineParserTest, TestJdwpProviderInternal) {
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index d0d6bfd3ce..2bc7409bb6 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -77,10 +77,10 @@ struct CmdlineType<JdwpProvider> : CmdlineTypeParser<JdwpProvider> {
"Example: -XjdwpProvider:internal for internal jdwp implementation\n"
"Example: -XjdwpProvider:adbconnection for adb connection mediated jdwp implementation\n"
"Example: -XjdwpProvider:default for the default jdwp implementation"
- " (currently internal)\n");
- } else if (option == "internal" || option == "default") {
+ " (currently adbconnection)\n");
+ } else if (option == "internal") {
return Result::Success(JdwpProvider::kInternal);
- } else if (option == "adbconnection") {
+ } else if (option == "adbconnection" || option == "default") {
return Result::Success(JdwpProvider::kAdbConnection);
} else if (option == "none") {
return Result::Success(JdwpProvider::kNone);
diff --git a/compiler/Android.bp b/compiler/Android.bp
index a76539d71a..2e60e7d658 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -250,6 +250,12 @@ art_cc_library {
shared_libs: [
"libart",
],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ }
}
art_cc_library {
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index e41371855d..0f69dbab94 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -159,4 +159,10 @@ CompiledMethod::~CompiledMethod() {
storage->ReleaseMethodInfo(method_info_);
}
+void CompiledMethod::ReleaseVMapTable() {
+ CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+ storage->ReleaseVMapTable(vmap_table_);
+ vmap_table_ = nullptr;
+}
+
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index acdce260e5..4e8f3efe5a 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -168,6 +168,10 @@ class CompiledMethod FINAL : public CompiledCode {
ArrayRef<const linker::LinkerPatch> GetPatches() const;
+ // The compiler sometimes unquickens shared code items. In that case, we need to clear the vmap
+ // table to avoid writing the quicken info to the vdex file.
+ void ReleaseVMapTable();
+
private:
static constexpr size_t kIsIntrinsicLsb = kNumberOfCompiledCodePackedBits;
static constexpr size_t kIsIntrinsicSize = 1u;
@@ -186,7 +190,7 @@ class CompiledMethod FINAL : public CompiledCode {
// For quick code, method specific information that is not very dedupe friendly (method indices).
const LengthPrefixedArray<uint8_t>* const method_info_;
// For quick code, holds code infos which contain stack maps, inline information, and etc.
- const LengthPrefixedArray<uint8_t>* const vmap_table_;
+ const LengthPrefixedArray<uint8_t>* vmap_table_;
// For quick code, a FDE entry for the debug_frame section.
const LengthPrefixedArray<uint8_t>* const cfi_info_;
// For quick code, linker patches needed by the method.
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 60977b6bd5..7c7ae71d77 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -47,7 +47,7 @@ bool Compiler::IsPathologicalCase(const DexFile::CodeItem& code_item,
* Dalvik uses 16-bit uints for instruction and register counts. We'll limit to a quarter
* of that, which also guarantees we cannot overflow our 16-bit internal Quick SSA name space.
*/
- CodeItemDataAccessor accessor(&dex_file, &code_item);
+ CodeItemDataAccessor accessor(dex_file, &code_item);
if (accessor.InsnsSizeInCodeUnits() >= UINT16_MAX / 4) {
LOG(INFO) << "Method exceeds compiler instruction limit: "
<< accessor.InsnsSizeInCodeUnits()
diff --git a/compiler/debug/debug_info.h b/compiler/debug/debug_info.h
new file mode 100644
index 0000000000..04c6991ea3
--- /dev/null
+++ b/compiler/debug/debug_info.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEBUG_DEBUG_INFO_H_
+#define ART_COMPILER_DEBUG_DEBUG_INFO_H_
+
+#include <map>
+
+#include "base/array_ref.h"
+#include "method_debug_info.h"
+
+namespace art {
+class DexFile;
+
+namespace debug {
+
+// References inputs for all debug information which can be written into the ELF file.
+struct DebugInfo {
+ // Describes compiled code in the .text section.
+ ArrayRef<const MethodDebugInfo> compiled_methods;
+
+ // Describes dex-files in the .dex section.
+ std::map<uint32_t, const DexFile*> dex_files; // Offset in section -> dex file content.
+
+ bool Empty() const {
+ return compiled_methods.empty() && dex_files.empty();
+ }
+};
+
+} // namespace debug
+} // namespace art
+
+#endif // ART_COMPILER_DEBUG_DEBUG_INFO_H_
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index e2bea8e096..893cad288b 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -49,7 +49,7 @@ static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) {
static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) {
std::vector<const char*> names;
- CodeItemDebugInfoAccessor accessor(mi->dex_file, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*mi->dex_file, mi->code_item, mi->dex_method_index);
if (accessor.HasCodeItem()) {
DCHECK(mi->dex_file != nullptr);
const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset());
@@ -163,7 +163,7 @@ class ElfCompilationUnitWriter {
for (auto mi : compilation_unit.methods) {
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
- CodeItemDebugInfoAccessor accessor(dex, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index);
const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method);
const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto);
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 9910e7a4ce..44504c1efb 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -159,7 +159,7 @@ class ElfDebugLineWriter {
PositionInfos dex2line_map;
DCHECK(mi->dex_file != nullptr);
const DexFile* dex = mi->dex_file;
- CodeItemDebugInfoAccessor accessor(dex, mi->code_item);
+ CodeItemDebugInfoAccessor accessor(*dex, mi->code_item, mi->dex_method_index);
const uint32_t debug_info_offset = accessor.DebugInfoOffset();
if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) {
continue;
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 34c2919a21..9ea9f01cd9 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -149,7 +149,7 @@ static std::vector<VariableLocation> GetVariableLocations(
DCHECK_LT(stack_map_index, dex_register_maps.size());
DexRegisterMap dex_register_map = dex_register_maps[stack_map_index];
DCHECK(dex_register_map.IsValid());
- CodeItemDataAccessor accessor(method_info->dex_file, method_info->code_item);
+ CodeItemDataAccessor accessor(*method_info->dex_file, method_info->code_item);
reg_lo = dex_register_map.GetDexRegisterLocation(
vreg, accessor.RegistersSize(), code_info, encoding);
if (is64bitValue) {
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index a6267292bf..df5bb37358 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -38,18 +38,18 @@ namespace debug {
template <typename ElfTypes>
void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches) {
// Write .strtab and .symtab.
- WriteDebugSymbols(builder, method_infos, true /* with_signature */);
+ WriteDebugSymbols(builder, false /* mini-debug-info */, debug_info);
// Write .debug_frame.
- WriteCFISection(builder, method_infos, cfi_format, write_oat_patches);
+ WriteCFISection(builder, debug_info.compiled_methods, cfi_format, write_oat_patches);
// Group the methods into compilation units based on class.
std::unordered_map<const DexFile::ClassDef*, ElfCompilationUnit> class_to_compilation_unit;
- for (const MethodDebugInfo& mi : method_infos) {
+ for (const MethodDebugInfo& mi : debug_info.compiled_methods) {
if (mi.dex_file != nullptr) {
auto& dex_class_def = mi.dex_file->GetClassDef(mi.class_def_index);
ElfCompilationUnit& cu = class_to_compilation_unit[&dex_class_def];
@@ -108,21 +108,27 @@ void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder,
std::vector<uint8_t> MakeMiniDebugInfo(
InstructionSet isa,
const InstructionSetFeatures* features,
- uint64_t text_address,
- size_t text_size,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+ uint64_t text_section_address,
+ size_t text_section_size,
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info) {
if (Is64BitInstructionSet(isa)) {
return MakeMiniDebugInfoInternal<ElfTypes64>(isa,
features,
- text_address,
- text_size,
- method_infos);
+ text_section_address,
+ text_section_size,
+ dex_section_address,
+ dex_section_size,
+ debug_info);
} else {
return MakeMiniDebugInfoInternal<ElfTypes32>(isa,
features,
- text_address,
- text_size,
- method_infos);
+ text_section_address,
+ text_section_size,
+ dex_section_address,
+ dex_section_size,
+ debug_info);
}
}
@@ -131,9 +137,17 @@ static std::vector<uint8_t> MakeElfFileForJITInternal(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& mi) {
- CHECK_EQ(mi.is_code_address_text_relative, false);
- ArrayRef<const MethodDebugInfo> method_infos(&mi, 1);
+ ArrayRef<const MethodDebugInfo> method_infos) {
+ CHECK_GT(method_infos.size(), 0u);
+ uint64_t min_address = std::numeric_limits<uint64_t>::max();
+ uint64_t max_address = 0;
+ for (const MethodDebugInfo& mi : method_infos) {
+ CHECK_EQ(mi.is_code_address_text_relative, false);
+ min_address = std::min(min_address, mi.code_address);
+ max_address = std::max(max_address, mi.code_address + mi.code_size);
+ }
+ DebugInfo debug_info{};
+ debug_info.compiled_methods = method_infos;
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Debug ELF file", &buffer);
@@ -144,14 +158,16 @@ static std::vector<uint8_t> MakeElfFileForJITInternal(
if (mini_debug_info) {
std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa,
features,
- mi.code_address,
- mi.code_size,
- method_infos);
+ min_address,
+ max_address - min_address,
+ /* dex_section_address */ 0,
+ /* dex_section_size */ 0,
+ debug_info);
builder->WriteSection(".gnu_debugdata", &mdi);
} else {
- builder->GetText()->AllocateVirtualMemory(mi.code_address, mi.code_size);
+ builder->GetText()->AllocateVirtualMemory(min_address, max_address - min_address);
WriteDebugInfo(builder.get(),
- method_infos,
+ debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
false /* write_oat_patches */);
}
@@ -164,11 +180,11 @@ std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& method_info) {
+ ArrayRef<const MethodDebugInfo> method_infos) {
if (Is64BitInstructionSet(isa)) {
- return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_info);
+ return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_infos);
} else {
- return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_info);
+ return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_infos);
}
}
@@ -209,12 +225,12 @@ std::vector<uint8_t> WriteDebugElfFileForClasses(InstructionSet isa,
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
linker::ElfBuilder<ElfTypes32>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
template void WriteDebugInfo<ElfTypes64>(
linker::ElfBuilder<ElfTypes64>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index a47bf076b9..e442e0016c 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "debug/dwarf/dwarf_constants.h"
+#include "debug/debug_info.h"
#include "linker/elf_builder.h"
namespace art {
@@ -36,7 +37,7 @@ struct MethodDebugInfo;
template <typename ElfTypes>
void WriteDebugInfo(
linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
+ const DebugInfo& debug_info,
dwarf::CFIFormat cfi_format,
bool write_oat_patches);
@@ -45,13 +46,15 @@ std::vector<uint8_t> MakeMiniDebugInfo(
const InstructionSetFeatures* features,
uint64_t text_section_address,
size_t text_section_size,
- const ArrayRef<const MethodDebugInfo>& method_infos);
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info);
std::vector<uint8_t> MakeElfFileForJIT(
InstructionSet isa,
const InstructionSetFeatures* features,
bool mini_debug_info,
- const MethodDebugInfo& method_info);
+ ArrayRef<const MethodDebugInfo> method_infos);
std::vector<uint8_t> WriteDebugElfFileForClasses(
InstructionSet isa,
diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h
index 78b8e2780c..a88c5cb213 100644
--- a/compiler/debug/elf_gnu_debugdata_writer.h
+++ b/compiler/debug/elf_gnu_debugdata_writer.h
@@ -82,18 +82,23 @@ static std::vector<uint8_t> MakeMiniDebugInfoInternal(
const InstructionSetFeatures* features,
typename ElfTypes::Addr text_section_address,
size_t text_section_size,
- const ArrayRef<const MethodDebugInfo>& method_infos) {
+ typename ElfTypes::Addr dex_section_address,
+ size_t dex_section_size,
+ const DebugInfo& debug_info) {
std::vector<uint8_t> buffer;
buffer.reserve(KB);
linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer);
std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder(
new linker::ElfBuilder<ElfTypes>(isa, features, &out));
builder->Start(false /* write_program_headers */);
- // Mirror .text as NOBITS section since the added symbols will reference it.
+ // Mirror ELF sections as NOBITS since the added symbols will reference them.
builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size);
- WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */);
+ if (dex_section_size != 0) {
+ builder->GetDex()->AllocateVirtualMemory(dex_section_address, dex_section_size);
+ }
+ WriteDebugSymbols(builder.get(), true /* mini-debug-info */, debug_info);
WriteCFISection(builder.get(),
- method_infos,
+ debug_info.compiled_methods,
dwarf::DW_DEBUG_FRAME_FORMAT,
false /* write_oat_paches */);
builder->End();
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index 57e010f232..9c9e8b35b8 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -17,9 +17,13 @@
#ifndef ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
#define ART_COMPILER_DEBUG_ELF_SYMTAB_WRITER_H_
+#include <map>
#include <unordered_set>
+#include "debug/debug_info.h"
#include "debug/method_debug_info.h"
+#include "dex/dex_file-inl.h"
+#include "dex/code_item_accessors.h"
#include "linker/elf_builder.h"
#include "utils.h"
@@ -35,22 +39,26 @@ namespace debug {
// one symbol which marks the whole .text section as code.
constexpr bool kGenerateSingleArmMappingSymbol = true;
+// Magic name for .symtab symbols which enumerate dex files used
+// by this ELF file (currently mmapped inside the .dex section).
+constexpr const char* kDexFileSymbolName = "$dexfile";
+
template <typename ElfTypes>
static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
- const ArrayRef<const MethodDebugInfo>& method_infos,
- bool with_signature) {
+ bool mini_debug_info,
+ const DebugInfo& debug_info) {
uint64_t mapping_symbol_address = std::numeric_limits<uint64_t>::max();
auto* strtab = builder->GetStrTab();
auto* symtab = builder->GetSymTab();
- if (method_infos.empty()) {
+ if (debug_info.Empty()) {
return;
}
// Find all addresses which contain deduped methods.
// The first instance of method is not marked deduped_, but the rest is.
std::unordered_set<uint64_t> deduped_addresses;
- for (const MethodDebugInfo& info : method_infos) {
+ for (const MethodDebugInfo& info : debug_info.compiled_methods) {
if (info.deduped) {
deduped_addresses.insert(info.code_address);
}
@@ -58,25 +66,21 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
strtab->Start();
strtab->Write(""); // strtab should start with empty string.
- std::string last_name;
- size_t last_name_offset = 0;
- for (const MethodDebugInfo& info : method_infos) {
+ // Add symbols for compiled methods.
+ for (const MethodDebugInfo& info : debug_info.compiled_methods) {
if (info.deduped) {
continue; // Add symbol only for the first instance.
}
size_t name_offset;
- if (!info.trampoline_name.empty()) {
- name_offset = strtab->Write(info.trampoline_name);
+ if (!info.custom_name.empty()) {
+ name_offset = strtab->Write(info.custom_name);
} else {
DCHECK(info.dex_file != nullptr);
- std::string name = info.dex_file->PrettyMethod(info.dex_method_index, with_signature);
+ std::string name = info.dex_file->PrettyMethod(info.dex_method_index, !mini_debug_info);
if (deduped_addresses.find(info.code_address) != deduped_addresses.end()) {
name += " [DEDUPED]";
}
- // If we write method names without signature, we might see the same name multiple times.
- name_offset = (name == last_name ? last_name_offset : strtab->Write(name));
- last_name = std::move(name);
- last_name_offset = name_offset;
+ name_offset = strtab->Write(name);
}
const auto* text = builder->GetText();
@@ -97,13 +101,47 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
}
}
}
+ // Add symbols for interpreted methods (with address range of the method's bytecode).
+ if (!debug_info.dex_files.empty() && builder->GetDex()->Exists()) {
+ auto dex = builder->GetDex();
+ for (auto it : debug_info.dex_files) {
+ uint64_t dex_address = dex->GetAddress() + it.first /* offset within the section */;
+ const DexFile* dex_file = it.second;
+ typename ElfTypes::Word dex_name = strtab->Write(kDexFileSymbolName);
+ symtab->Add(dex_name, dex, dex_address, dex_file->Size(), STB_GLOBAL, STT_FUNC);
+ if (mini_debug_info) {
+ continue; // Don't add interpreter method names to mini-debug-info for now.
+ }
+ for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (ClassDataItemIterator item(*dex_file, class_data); item.HasNext(); item.Next()) {
+ if (!item.IsAtMethod()) {
+ continue;
+ }
+ const DexFile::CodeItem* code_item = item.GetMethodCodeItem();
+ if (code_item == nullptr) {
+ continue;
+ }
+ CodeItemInstructionAccessor code(*dex_file, code_item);
+ DCHECK(code.HasCodeItem());
+ std::string name = dex_file->PrettyMethod(item.GetMemberIndex(), !mini_debug_info);
+ size_t name_offset = strtab->Write(name);
+ uint64_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file->Begin();
+ uint64_t address = dex_address + offset;
+ size_t size = code.InsnsSizeInCodeUnits() * sizeof(uint16_t);
+ symtab->Add(name_offset, dex, address, size, STB_GLOBAL, STT_FUNC);
+ }
+ }
+ }
+ }
strtab->End();
// Symbols are buffered and written after names (because they are smaller).
- // We could also do two passes in this function to avoid the buffering.
- symtab->Start();
- symtab->Write();
- symtab->End();
+ symtab->WriteCachedSection();
}
} // namespace debug
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index 43c8de26aa..d0b03ec441 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -27,7 +27,7 @@ namespace art {
namespace debug {
struct MethodDebugInfo {
- std::string trampoline_name;
+ std::string custom_name;
const DexFile* dex_file; // Native methods (trampolines) do not reference dex file.
size_t class_def_index;
uint32_t dex_method_index;
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 52cb217980..28c7fe2c34 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -28,6 +28,7 @@
#include "compiled_method.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_instruction-inl.h"
+#include "dex_to_dex_decompiler.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "mirror/dex_cache.h"
@@ -44,81 +45,106 @@ const bool kEnableQuickening = true;
// Control check-cast elision.
const bool kEnableCheckCastEllision = true;
-struct QuickenedInfo {
- QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
+DexToDexCompiler::DexToDexCompiler(CompilerDriver* driver)
+ : driver_(driver),
+ lock_("Quicken lock", kDexToDexCompilerLock) {
+ DCHECK(driver != nullptr);
+}
- uint32_t dex_pc;
- uint16_t dex_member_index;
-};
+void DexToDexCompiler::ClearState() {
+ MutexLock lock(Thread::Current(), lock_);
+ active_dex_file_ = nullptr;
+ active_bit_vector_ = nullptr;
+ seen_code_items_.clear();
+ should_quicken_.clear();
+ shared_code_items_.clear();
+ blacklisted_code_items_.clear();
+ shared_code_item_quicken_info_.clear();
+}
-class DexCompiler {
- public:
- DexCompiler(art::CompilerDriver& compiler,
- const DexCompilationUnit& unit,
- DexToDexCompilationLevel dex_to_dex_compilation_level)
- : driver_(compiler),
- unit_(unit),
- dex_to_dex_compilation_level_(dex_to_dex_compilation_level) {}
+size_t DexToDexCompiler::NumUniqueCodeItems(Thread* self) const {
+ MutexLock lock(self, lock_);
+ return seen_code_items_.size();
+}
- ~DexCompiler() {}
+BitVector* DexToDexCompiler::GetOrAddBitVectorForDex(const DexFile* dex_file) {
+ if (active_dex_file_ != dex_file) {
+ active_dex_file_ = dex_file;
+ auto inserted = should_quicken_.emplace(dex_file,
+ BitVector(dex_file->NumMethodIds(),
+ /*expandable*/ false,
+ Allocator::GetMallocAllocator()));
+ active_bit_vector_ = &inserted.first->second;
+ }
+ return active_bit_vector_;
+}
- void Compile();
+void DexToDexCompiler::MarkForCompilation(Thread* self,
+ const MethodReference& method_ref,
+ const DexFile::CodeItem* code_item) {
+ MutexLock lock(self, lock_);
+ BitVector* const bitmap = GetOrAddBitVectorForDex(method_ref.dex_file);
+ DCHECK(bitmap != nullptr);
+ DCHECK(!bitmap->IsBitSet(method_ref.index));
+ bitmap->SetBit(method_ref.index);
+ // Detect the shared code items.
+ if (!seen_code_items_.insert(code_item).second) {
+ shared_code_items_.insert(code_item);
+ }
+}
- const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
- return quickened_info_;
+DexToDexCompiler::CompilationState::CompilationState(DexToDexCompiler* compiler,
+ const DexCompilationUnit& unit,
+ const CompilationLevel compilation_level,
+ const std::vector<uint8_t>* quicken_data)
+ : compiler_(compiler),
+ driver_(*compiler->GetDriver()),
+ unit_(unit),
+ compilation_level_(compilation_level),
+ already_quickened_(quicken_data != nullptr),
+ existing_quicken_info_(already_quickened_
+ ? ArrayRef<const uint8_t>(*quicken_data) : ArrayRef<const uint8_t>()) {}
+
+uint16_t DexToDexCompiler::CompilationState::NextIndex() {
+ DCHECK(already_quickened_);
+ if (kIsDebugBuild && quicken_index_ >= existing_quicken_info_.NumIndices()) {
+ for (const DexInstructionPcPair& pair : unit_.GetCodeItemAccessor()) {
+ LOG(ERROR) << pair->DumpString(nullptr);
+ }
+ LOG(FATAL) << "Mismatched number of quicken slots.";
}
+ const uint16_t ret = existing_quicken_info_.GetData(quicken_index_);
+ quicken_index_++;
+ return ret;
+}
- private:
- const DexFile& GetDexFile() const {
- return *unit_.GetDexFile();
+uint16_t DexToDexCompiler::CompilationState::GetIndexForInstruction(const Instruction* inst,
+ uint32_t index) {
+ if (UNLIKELY(already_quickened_)) {
+ return inst->IsQuickened() ? NextIndex() : index;
}
+ DCHECK(!inst->IsQuickened());
+ return index;
+}
+
+bool DexToDexCompiler::ShouldCompileMethod(const MethodReference& ref) {
+ // TODO: It's probably safe to avoid the lock here if the active_dex_file_ matches since we only
+ // only call ShouldCompileMethod on one dex at a time.
+ MutexLock lock(Thread::Current(), lock_);
+ return GetOrAddBitVectorForDex(ref.dex_file)->IsBitSet(ref.index);
+}
- // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
- // a barrier is required.
- void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
-
- // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In
- // this case, returns the second NOP instruction pointer. Otherwise, returns
- // the given "inst".
- Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc);
-
- // Compiles a field access into a quick field access.
- // The field index is replaced by an offset within an Object where we can read
- // from / write to this field. Therefore, this does not involve any resolution
- // at runtime.
- // Since the field index is encoded with 16 bits, we can replace it only if the
- // field offset can be encoded with 16 bits too.
- void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_put);
-
- // Compiles a virtual method invocation into a quick virtual method invocation.
- // The method index is replaced by the vtable index where the corresponding
- // Executable can be found. Therefore, this does not involve any resolution
- // at runtime.
- // Since the method index is encoded with 16 bits, we can replace it only if the
- // vtable index can be encoded with 16 bits too.
- void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_range);
-
- CompilerDriver& driver_;
- const DexCompilationUnit& unit_;
- const DexToDexCompilationLevel dex_to_dex_compilation_level_;
-
- // Filled by the compiler when quickening, in order to encode that information
- // in the .oat file. The runtime will use that information to get to the original
- // opcodes.
- std::vector<QuickenedInfo> quickened_info_;
-
- DISALLOW_COPY_AND_ASSIGN(DexCompiler);
-};
-
-void DexCompiler::Compile() {
- DCHECK_EQ(dex_to_dex_compilation_level_, DexToDexCompilationLevel::kOptimize);
- IterationRange<DexInstructionIterator> instructions(unit_.GetCodeItemAccessor().begin(),
- unit_.GetCodeItemAccessor().end());
+std::vector<uint8_t> DexToDexCompiler::CompilationState::Compile() {
+ DCHECK_EQ(compilation_level_, CompilationLevel::kOptimize);
+ const CodeItemDataAccessor& instructions = unit_.GetCodeItemAccessor();
for (DexInstructionIterator it = instructions.begin(); it != instructions.end(); ++it) {
const uint32_t dex_pc = it.DexPc();
Instruction* inst = const_cast<Instruction*>(&it.Inst());
+
+ if (!already_quickened_) {
+ DCHECK(!inst->IsQuickened());
+ }
+
switch (inst->Opcode()) {
case Instruction::RETURN_VOID:
CompileReturnVoid(inst, dex_pc);
@@ -134,84 +160,147 @@ void DexCompiler::Compile() {
break;
case Instruction::IGET:
+ case Instruction::IGET_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_QUICK, false);
break;
case Instruction::IGET_WIDE:
+ case Instruction::IGET_WIDE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_WIDE_QUICK, false);
break;
case Instruction::IGET_OBJECT:
+ case Instruction::IGET_OBJECT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_OBJECT_QUICK, false);
break;
case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BOOLEAN_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BOOLEAN_QUICK, false);
break;
case Instruction::IGET_BYTE:
+ case Instruction::IGET_BYTE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_BYTE_QUICK, false);
break;
case Instruction::IGET_CHAR:
+ case Instruction::IGET_CHAR_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_CHAR_QUICK, false);
break;
case Instruction::IGET_SHORT:
+ case Instruction::IGET_SHORT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IGET_SHORT_QUICK, false);
break;
case Instruction::IPUT:
+ case Instruction::IPUT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
break;
case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BOOLEAN_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true);
break;
case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_BYTE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true);
break;
case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_CHAR_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true);
break;
case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_SHORT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true);
break;
case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_WIDE_QUICK, true);
break;
case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_OBJECT_QUICK:
CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_OBJECT_QUICK, true);
break;
case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_QUICK:
CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_QUICK, false);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
CompileInvokeVirtual(inst, dex_pc, Instruction::INVOKE_VIRTUAL_RANGE_QUICK, true);
break;
case Instruction::NOP:
- // We need to differentiate between check cast inserted NOP and normal NOP, put an invalid
- // index in the map for normal nops. This should be rare in real code.
- quickened_info_.push_back(QuickenedInfo(dex_pc, DexFile::kDexNoIndex16));
+ if (already_quickened_) {
+ const uint16_t reference_index = NextIndex();
+ quickened_info_.push_back(QuickenedInfo(dex_pc, reference_index));
+ if (reference_index == DexFile::kDexNoIndex16) {
+ // This means it was a normal nop and not a check-cast.
+ break;
+ }
+ const uint16_t type_index = NextIndex();
+ if (driver_.IsSafeCast(&unit_, dex_pc)) {
+ quickened_info_.push_back(QuickenedInfo(dex_pc, type_index));
+ }
+ ++it;
+ } else {
+ // We need to differentiate between check cast inserted NOP and normal NOP, put an invalid
+ // index in the map for normal nops. This should be rare in real code.
+ quickened_info_.push_back(QuickenedInfo(dex_pc, DexFile::kDexNoIndex16));
+ }
break;
default:
- DCHECK(!inst->IsQuickened());
// Nothing to do.
break;
}
}
+
+ if (already_quickened_) {
+ DCHECK_EQ(quicken_index_, existing_quicken_info_.NumIndices());
+ }
+
+ if (GetQuickenedInfo().empty()) {
+ // No need to create a CompiledMethod if there are no quickened opcodes.
+ return std::vector<uint8_t>();
+ }
+
+ std::vector<uint8_t> quicken_data;
+ if (kIsDebugBuild) {
+ // Double check that the counts line up with the size of the quicken info.
+ size_t quicken_count = 0;
+ for (const DexInstructionPcPair& pair : instructions) {
+ if (QuickenInfoTable::NeedsIndexForInstruction(&pair.Inst())) {
+ ++quicken_count;
+ }
+ }
+ CHECK_EQ(quicken_count, GetQuickenedInfo().size());
+ }
+
+ QuickenInfoTable::Builder builder(&quicken_data, GetQuickenedInfo().size());
+ // Length is encoded by the constructor.
+ for (const CompilationState::QuickenedInfo& info : GetQuickenedInfo()) {
+ // Dex pc is not serialized, only used for checking the instructions. Since we access the
+ // array based on the index of the quickened instruction, the indexes must line up perfectly.
+ // The reader side uses the NeedsIndexForInstruction function too.
+ const Instruction& inst = instructions.InstructionAt(info.dex_pc);
+ CHECK(QuickenInfoTable::NeedsIndexForInstruction(&inst)) << inst.Opcode();
+ builder.AddIndex(info.dex_member_index);
+ }
+ DCHECK(!quicken_data.empty());
+ return quicken_data;
}
-void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
+void DexToDexCompiler::CompilationState::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
DCHECK_EQ(inst->Opcode(), Instruction::RETURN_VOID);
if (unit_.IsConstructor()) {
// Are we compiling a non clinit constructor which needs a barrier ?
@@ -229,7 +318,8 @@ void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER);
}
-Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
+Instruction* DexToDexCompiler::CompilationState::CompileCheckCast(Instruction* inst,
+ uint32_t dex_pc) {
if (!kEnableCheckCastEllision) {
return inst;
}
@@ -246,27 +336,30 @@ Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) {
<< " by replacing it with 2 NOPs at dex pc "
<< StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c()));
- quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c()));
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(Instruction::NOP);
- inst->SetVRegA_10x(0u); // keep compliant with verifier.
- // Get to next instruction which is the second half of check-cast and replace
- // it by a NOP.
- inst = const_cast<Instruction*>(inst->Next());
- inst->SetOpcode(Instruction::NOP);
- inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ if (!already_quickened_) {
+ quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegA_21c()));
+ quickened_info_.push_back(QuickenedInfo(dex_pc, inst->VRegB_21c()));
+
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(Instruction::NOP);
+ inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ // Get to next instruction which is the second half of check-cast and replace
+ // it by a NOP.
+ inst = const_cast<Instruction*>(inst->Next());
+ inst->SetOpcode(Instruction::NOP);
+ inst->SetVRegA_10x(0u); // keep compliant with verifier.
+ }
return inst;
}
-void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
- uint32_t dex_pc,
- Instruction::Code new_opcode,
- bool is_put) {
+void DexToDexCompiler::CompilationState::CompileInstanceFieldAccess(Instruction* inst,
+ uint32_t dex_pc,
+ Instruction::Code new_opcode,
+ bool is_put) {
if (!kEnableQuickening) {
return;
}
- uint32_t field_idx = inst->VRegC_22c();
+ uint32_t field_idx = GetIndexForInstruction(inst, inst->VRegC_22c());
MemberOffset field_offset(0u);
bool is_volatile;
bool fast_path = driver_.ComputeInstanceFieldInfo(field_idx, &unit_, is_put,
@@ -278,20 +371,29 @@ void DexCompiler::CompileInstanceFieldAccess(Instruction* inst,
<< " by field offset " << field_offset.Int32Value()
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(new_opcode);
- // Replace field index by field offset.
- inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
+ if (!already_quickened_) {
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(new_opcode);
+ // Replace field index by field offset.
+ inst->SetVRegC_22c(static_cast<uint16_t>(field_offset.Int32Value()));
+ }
quickened_info_.push_back(QuickenedInfo(dex_pc, field_idx));
}
}
-void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_range) {
+const DexFile& DexToDexCompiler::CompilationState::GetDexFile() const {
+ return *unit_.GetDexFile();
+}
+
+void DexToDexCompiler::CompilationState::CompileInvokeVirtual(Instruction* inst,
+ uint32_t dex_pc,
+ Instruction::Code new_opcode,
+ bool is_range) {
if (!kEnableQuickening) {
return;
}
- uint32_t method_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+ uint32_t method_idx = GetIndexForInstruction(inst,
+ is_range ? inst->VRegB_3rc() : inst->VRegB_35c());
ScopedObjectAccess soa(Thread::Current());
ClassLinker* class_linker = unit_.GetClassLinker();
@@ -318,19 +420,20 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
<< " by vtable index " << vtable_idx
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
- // We are modifying 4 consecutive bytes.
- inst->SetOpcode(new_opcode);
- // Replace method index by vtable index.
- if (is_range) {
- inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
- } else {
- inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
+ if (!already_quickened_) {
+ // We are modifying 4 consecutive bytes.
+ inst->SetOpcode(new_opcode);
+ // Replace method index by vtable index.
+ if (is_range) {
+ inst->SetVRegB_3rc(static_cast<uint16_t>(vtable_idx));
+ } else {
+ inst->SetVRegB_35c(static_cast<uint16_t>(vtable_idx));
+ }
}
quickened_info_.push_back(QuickenedInfo(dex_pc, method_idx));
}
-CompiledMethod* ArtCompileDEX(
- CompilerDriver* driver,
+CompiledMethod* DexToDexCompiler::CompileMethod(
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type ATTRIBUTE_UNUSED,
@@ -338,69 +441,122 @@ CompiledMethod* ArtCompileDEX(
uint32_t method_idx,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level) {
- DCHECK(driver != nullptr);
- if (dex_to_dex_compilation_level != DexToDexCompilationLevel::kDontDexToDexCompile) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScope<1> hs(soa.Self());
- ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- art::DexCompilationUnit unit(
- class_loader,
- class_linker,
- dex_file,
- code_item,
- class_def_idx,
- method_idx,
- access_flags,
- driver->GetVerifiedMethod(&dex_file, method_idx),
- hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
- art::optimizer::DexCompiler dex_compiler(*driver, unit, dex_to_dex_compilation_level);
- dex_compiler.Compile();
- if (dex_compiler.GetQuickenedInfo().empty()) {
- // No need to create a CompiledMethod if there are no quickened opcodes.
+ CompilationLevel compilation_level) {
+ if (compilation_level == CompilationLevel::kDontDexToDexCompile) {
+ return nullptr;
+ }
+
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScope<1> hs(soa.Self());
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ art::DexCompilationUnit unit(
+ class_loader,
+ class_linker,
+ dex_file,
+ code_item,
+ class_def_idx,
+ method_idx,
+ access_flags,
+ driver_->GetVerifiedMethod(&dex_file, method_idx),
+ hs.NewHandle(class_linker->FindDexCache(soa.Self(), dex_file)));
+
+ std::vector<uint8_t> quicken_data;
+ // If the code item is shared with multiple different method ids, make sure that we quicken only
+ // once and verify that all the dequicken maps match.
+ if (UNLIKELY(shared_code_items_.find(code_item) != shared_code_items_.end())) {
+ // For shared code items, use a lock to prevent races.
+ MutexLock mu(soa.Self(), lock_);
+ // Blacklisted means there was a quickening conflict previously, bail early.
+ if (blacklisted_code_items_.find(code_item) != blacklisted_code_items_.end()) {
return nullptr;
}
+ auto existing = shared_code_item_quicken_info_.find(code_item);
+ const bool already_quickened = existing != shared_code_item_quicken_info_.end();
+ {
+ CompilationState state(this,
+ unit,
+ compilation_level,
+ already_quickened ? &existing->second.quicken_data_ : nullptr);
+ quicken_data = state.Compile();
+ }
- // Create a `CompiledMethod`, with the quickened information in the vmap table.
- if (kIsDebugBuild) {
- // Double check that the counts line up with the size of the quicken info.
- size_t quicken_count = 0;
- for (const DexInstructionPcPair& pair : unit.GetCodeItemAccessor()) {
- if (QuickenInfoTable::NeedsIndexForInstruction(&pair.Inst())) {
- ++quicken_count;
+ // Already quickened, check that the data matches what was previously seen.
+ MethodReference method_ref(&dex_file, method_idx);
+ if (already_quickened) {
+ QuickenState* const existing_data = &existing->second;
+ if (existing_data->quicken_data_ != quicken_data) {
+ VLOG(compiler) << "Quicken data mismatch, dequickening method "
+ << dex_file.PrettyMethod(method_idx);
+ // Unquicken using the existing quicken data.
+ optimizer::ArtDecompileDEX(dex_file,
+ *code_item,
+ ArrayRef<const uint8_t>(existing_data->quicken_data_),
+ /* decompile_return_instruction*/ false);
+ // Go clear the vmaps for all the methods that were already quickened to avoid writing them
+ // out during oat writing.
+ for (const MethodReference& ref : existing_data->methods_) {
+ CompiledMethod* method = driver_->GetCompiledMethod(ref);
+ DCHECK(method != nullptr);
+ method->ReleaseVMapTable();
}
+ // Blacklist the method to never attempt to quicken it in the future.
+ blacklisted_code_items_.insert(code_item);
+ shared_code_item_quicken_info_.erase(existing);
+ return nullptr;
}
- CHECK_EQ(quicken_count, dex_compiler.GetQuickenedInfo().size());
+ existing_data->methods_.push_back(method_ref);
+ } else {
+ QuickenState new_state;
+ new_state.methods_.push_back(method_ref);
+ new_state.quicken_data_ = quicken_data;
+ bool inserted = shared_code_item_quicken_info_.emplace(code_item, new_state).second;
+ CHECK(inserted) << "Failed to insert " << dex_file.PrettyMethod(method_idx);
}
- std::vector<uint8_t> quicken_data;
- for (QuickenedInfo info : dex_compiler.GetQuickenedInfo()) {
- // Dex pc is not serialized, only used for checking the instructions. Since we access the
- // array based on the index of the quickened instruction, the indexes must line up perfectly.
- // The reader side uses the NeedsIndexForInstruction function too.
- const Instruction& inst = unit.GetCodeItemAccessor().InstructionAt(info.dex_pc);
- CHECK(QuickenInfoTable::NeedsIndexForInstruction(&inst)) << inst.Opcode();
- // Add the index.
- quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 0));
- quicken_data.push_back(static_cast<uint8_t>(info.dex_member_index >> 8));
+
+ // Easy sanity check is to check that the existing stuff matches by re-quickening using the
+ // newly produced quicken data.
+ // Note that this needs to be behind the lock for this case since we may unquicken in another
+ // thread.
+ if (kIsDebugBuild) {
+ CompilationState state2(this, unit, compilation_level, &quicken_data);
+ std::vector<uint8_t> new_data = state2.Compile();
+ CHECK(new_data == quicken_data) << "Mismatch producing new quicken data";
}
- InstructionSet instruction_set = driver->GetInstructionSet();
- if (instruction_set == InstructionSet::kThumb2) {
- // Don't use the thumb2 instruction set to avoid the one off code delta.
- instruction_set = InstructionSet::kArm;
+ } else {
+ CompilationState state(this, unit, compilation_level, /*quicken_data*/ nullptr);
+ quicken_data = state.Compile();
+
+ // Easy sanity check is to check that the existing stuff matches by re-quickening using the
+ // newly produced quicken data.
+ if (kIsDebugBuild) {
+ CompilationState state2(this, unit, compilation_level, &quicken_data);
+ std::vector<uint8_t> new_data = state2.Compile();
+ CHECK(new_data == quicken_data) << "Mismatch producing new quicken data";
}
- return CompiledMethod::SwapAllocCompiledMethod(
- driver,
- instruction_set,
- ArrayRef<const uint8_t>(), // no code
- 0,
- 0,
- 0,
- ArrayRef<const uint8_t>(), // method_info
- ArrayRef<const uint8_t>(quicken_data), // vmap_table
- ArrayRef<const uint8_t>(), // cfi data
- ArrayRef<const linker::LinkerPatch>());
}
- return nullptr;
+
+ if (quicken_data.empty()) {
+ return nullptr;
+ }
+
+ // Create a `CompiledMethod`, with the quickened information in the vmap table.
+ InstructionSet instruction_set = driver_->GetInstructionSet();
+ if (instruction_set == InstructionSet::kThumb2) {
+ // Don't use the thumb2 instruction set to avoid the one off code delta.
+ instruction_set = InstructionSet::kArm;
+ }
+ CompiledMethod* ret = CompiledMethod::SwapAllocCompiledMethod(
+ driver_,
+ instruction_set,
+ ArrayRef<const uint8_t>(), // no code
+ 0,
+ 0,
+ 0,
+ ArrayRef<const uint8_t>(), // method_info
+ ArrayRef<const uint8_t>(quicken_data), // vmap_table
+ ArrayRef<const uint8_t>(), // cfi data
+ ArrayRef<const linker::LinkerPatch>());
+ return ret;
}
} // namespace optimizer
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 80b94d2dc3..abd048167c 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -17,14 +17,22 @@
#ifndef ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
#define ART_COMPILER_DEX_DEX_TO_DEX_COMPILER_H_
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+
+#include "base/bit_vector.h"
#include "dex/dex_file.h"
#include "handle.h"
#include "invoke_type.h"
+#include "method_reference.h"
+#include "quicken_info.h"
namespace art {
class CompiledMethod;
class CompilerDriver;
+class DexCompilationUnit;
namespace mirror {
class ClassLoader;
@@ -32,21 +40,144 @@ class ClassLoader;
namespace optimizer {
-enum class DexToDexCompilationLevel {
- kDontDexToDexCompile, // Only meaning wrt image time interpretation.
- kOptimize // Perform peep-hole optimizations.
+class DexToDexCompiler {
+ public:
+ enum class CompilationLevel {
+ kDontDexToDexCompile, // Only meaning wrt image time interpretation.
+ kOptimize // Perform peep-hole optimizations.
+ };
+
+ explicit DexToDexCompiler(CompilerDriver* driver);
+
+ CompiledMethod* CompileMethod(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ const CompilationLevel compilation_level) WARN_UNUSED;
+
+ void MarkForCompilation(Thread* self,
+ const MethodReference& method_ref,
+ const DexFile::CodeItem* code_item);
+
+ void ClearState();
+
+ CompilerDriver* GetDriver() {
+ return driver_;
+ }
+
+ bool ShouldCompileMethod(const MethodReference& ref);
+
+ size_t NumUniqueCodeItems(Thread* self) const;
+
+ private:
+ // Holds the state for compiling a single method.
+ struct CompilationState {
+ struct QuickenedInfo {
+ QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
+
+ uint32_t dex_pc;
+ uint16_t dex_member_index;
+ };
+
+ CompilationState(DexToDexCompiler* compiler,
+ const DexCompilationUnit& unit,
+ const CompilationLevel compilation_level,
+ const std::vector<uint8_t>* quicken_data);
+
+ const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
+ return quickened_info_;
+ }
+
+ // Returns the quickening info, or an empty array if it was not quickened.
+ // If already_quickened is true, then don't change anything but still return what the quicken
+ // data would have been.
+ std::vector<uint8_t> Compile();
+
+ const DexFile& GetDexFile() const;
+
+ // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
+ // a barrier is required.
+ void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
+
+ // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In
+ // this case, returns the second NOP instruction pointer. Otherwise, returns
+ // the given "inst".
+ Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc);
+
+ // Compiles a field access into a quick field access.
+ // The field index is replaced by an offset within an Object where we can read
+ // from / write to this field. Therefore, this does not involve any resolution
+ // at runtime.
+ // Since the field index is encoded with 16 bits, we can replace it only if the
+ // field offset can be encoded with 16 bits too.
+ void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
+ Instruction::Code new_opcode, bool is_put);
+
+ // Compiles a virtual method invocation into a quick virtual method invocation.
+ // The method index is replaced by the vtable index where the corresponding
+ // executable can be found. Therefore, this does not involve any resolution
+ // at runtime.
+ // Since the method index is encoded with 16 bits, we can replace it only if the
+ // vtable index can be encoded with 16 bits too.
+ void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
+ Instruction::Code new_opcode, bool is_range);
+
+ // Return the next index.
+ uint16_t NextIndex();
+
+ // Returns the dequickened index if an instruction is quickened, otherwise return index.
+ uint16_t GetIndexForInstruction(const Instruction* inst, uint32_t index);
+
+ DexToDexCompiler* const compiler_;
+ CompilerDriver& driver_;
+ const DexCompilationUnit& unit_;
+ const CompilationLevel compilation_level_;
+
+ // Filled by the compiler when quickening, in order to encode that information
+ // in the .oat file. The runtime will use that information to get to the original
+ // opcodes.
+ std::vector<QuickenedInfo> quickened_info_;
+
+ // If the code item was already quickened previously.
+ const bool already_quickened_;
+ const QuickenInfoTable existing_quicken_info_;
+ uint32_t quicken_index_ = 0u;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationState);
+ };
+
+ struct QuickenState {
+ std::vector<MethodReference> methods_;
+ std::vector<uint8_t> quicken_data_;
+ };
+
+ BitVector* GetOrAddBitVectorForDex(const DexFile* dex_file) REQUIRES(lock_);
+
+ CompilerDriver* const driver_;
+
+ // State for adding methods (should this be in its own class?).
+ const DexFile* active_dex_file_ = nullptr;
+ BitVector* active_bit_vector_ = nullptr;
+
+ // Lock that guards duplicate code items and the bitmap.
+ mutable Mutex lock_;
+ // Record what method references are going to get quickened.
+ std::unordered_map<const DexFile*, BitVector> should_quicken_;
+ // Record what code items are already seen to detect when multiple methods have the same code
+ // item.
+ std::unordered_set<const DexFile::CodeItem*> seen_code_items_ GUARDED_BY(lock_);
+ // Guarded by lock_ during writing, accessed without a lock during quickening.
+ // This is safe because no thread is adding to the shared code items during the quickening phase.
+ std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
+ std::unordered_set<const DexFile::CodeItem*> blacklisted_code_items_ GUARDED_BY(lock_);
+ std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
+ GUARDED_BY(lock_);
};
-std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
-
-CompiledMethod* ArtCompileDEX(CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file,
- DexToDexCompilationLevel dex_to_dex_compilation_level);
+
+std::ostream& operator<<(std::ostream& os, const DexToDexCompiler::CompilationLevel& rhs);
} // namespace optimizer
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index c8c2b6998f..48477abe5b 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -137,16 +137,7 @@ class CompiledMethodStorage::DedupeHashFunc {
return hash;
} else {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
+ return HashBytes(data, len);
}
}
};
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index fe83a66d0f..6c5cc50269 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -255,24 +255,6 @@ class CompilerDriver::AOTCompilationStats {
DISALLOW_COPY_AND_ASSIGN(AOTCompilationStats);
};
-class CompilerDriver::DexFileMethodSet {
- public:
- explicit DexFileMethodSet(const DexFile& dex_file)
- : dex_file_(dex_file),
- method_indexes_(dex_file.NumMethodIds(), false, Allocator::GetMallocAllocator()) {
- }
- DexFileMethodSet(DexFileMethodSet&& other) = default;
-
- const DexFile& GetDexFile() const { return dex_file_; }
-
- BitVector& GetMethodIndexes() { return method_indexes_; }
- const BitVector& GetMethodIndexes() const { return method_indexes_; }
-
- private:
- const DexFile& dex_file_;
- BitVector method_indexes_;
-};
-
CompilerDriver::CompilerDriver(
const CompilerOptions* compiler_options,
VerificationResults* verification_results,
@@ -306,9 +288,8 @@ CompilerDriver::CompilerDriver(
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
max_arena_alloc_(0),
- dex_to_dex_references_lock_("dex-to-dex references lock"),
- dex_to_dex_references_(),
- current_dex_to_dex_methods_(nullptr) {
+ compiling_dex_to_dex_(false),
+ dex_to_dex_compiler_(this) {
DCHECK(compiler_options_ != nullptr);
compiler_->Init();
@@ -398,7 +379,7 @@ void CompilerDriver::CompileAll(jobject class_loader,
FreeThreadPools();
}
-static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
+static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& class_def)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -410,7 +391,7 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
if (klass == nullptr) {
CHECK(self->IsExceptionPending());
self->ClearException();
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ return optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
// DexToDex at the kOptimize level may introduce quickened opcodes, which replace symbolic
// references with actual offsets. We cannot re-verify such instructions.
@@ -418,26 +399,23 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
// We store the verification information in the class status in the oat file, which the linker
// can validate (checksums) and use to skip load-time verification. It is thus safe to
// optimize when a class has been fully verified before.
- optimizer::DexToDexCompilationLevel max_level = optimizer::DexToDexCompilationLevel::kOptimize;
+ optimizer::DexToDexCompiler::CompilationLevel max_level =
+ optimizer::DexToDexCompiler::CompilationLevel::kOptimize;
if (driver.GetCompilerOptions().GetDebuggable()) {
// We are debuggable so definitions of classes might be changed. We don't want to do any
// optimizations that could break that.
- max_level = optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
- }
- if (!VdexFile::CanEncodeQuickenedData(dex_file)) {
- // Don't do any dex level optimizations if we cannot encode the quickening.
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ max_level = optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
if (klass->IsVerified()) {
// Class is verified so we can enable DEX-to-DEX compilation for performance.
return max_level;
} else {
// Class verification has failed: do not run DEX-to-DEX optimizations.
- return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile;
+ return optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile;
}
}
-static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
+static optimizer::DexToDexCompiler::CompilationLevel GetDexToDexCompilationLevel(
Thread* self,
const CompilerDriver& driver,
jobject jclass_loader,
@@ -474,7 +452,7 @@ static void CompileMethod(Thread* self,
uint32_t method_idx,
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
bool compilation_enabled,
Handle<mirror::DexCache> dex_cache) {
DCHECK(driver != nullptr);
@@ -482,18 +460,18 @@ static void CompileMethod(Thread* self,
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
MethodReference method_ref(&dex_file, method_idx);
- if (driver->GetCurrentDexToDexMethods() != nullptr) {
+ if (driver->GetCompilingDexToDex()) {
+ optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
// This is the second pass when we dex-to-dex compile previously marked methods.
// TODO: Refactor the compilation to avoid having to distinguish the two passes
// here. That should be done on a higher level. http://b/29089975
- if (driver->GetCurrentDexToDexMethods()->IsBitSet(method_idx)) {
+ if (compiler->ShouldCompileMethod(method_ref)) {
VerificationResults* results = driver->GetVerificationResults();
DCHECK(results != nullptr);
const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
// Do not optimize if a VerifiedMethod is missing. SafeCast elision,
// for example, relies on it.
- compiled_method = optimizer::ArtCompileDEX(
- driver,
+ compiled_method = compiler->CompileMethod(
code_item,
access_flags,
invoke_type,
@@ -503,7 +481,7 @@ static void CompileMethod(Thread* self,
dex_file,
(verified_method != nullptr)
? dex_to_dex_compilation_level
- : optimizer::DexToDexCompilationLevel::kDontDexToDexCompile);
+ : optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile);
}
} else if ((access_flags & kAccNative) != 0) {
// Are we extracting only and have support for generic JNI down calls?
@@ -528,7 +506,7 @@ static void CompileMethod(Thread* self,
bool compile = compilation_enabled &&
// Basic checks, e.g., not <clinit>.
results->IsCandidateForCompilation(method_ref, access_flags) &&
- // Did not fail to create VerifiedMethod metadata.
+ // Did not fail to create VerifiedMethod metadcata.
verified_method != nullptr &&
// Do not have failures that should punt to the interpreter.
!verified_method->HasRuntimeThrow() &&
@@ -550,10 +528,12 @@ static void CompileMethod(Thread* self,
dex_cache);
}
if (compiled_method == nullptr &&
- dex_to_dex_compilation_level != optimizer::DexToDexCompilationLevel::kDontDexToDexCompile) {
+ dex_to_dex_compilation_level !=
+ optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile) {
DCHECK(!Runtime::Current()->UseJitCompilation());
+ DCHECK(!driver->GetCompilingDexToDex());
// TODO: add a command-line option to disable DEX-to-DEX compilation ?
- driver->MarkForDexToDexCompilation(self, method_ref);
+ driver->GetDexToDexCompiler().MarkForCompilation(self, method_ref, code_item);
}
}
if (kTimeCompileMethod) {
@@ -620,14 +600,14 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
PreCompile(jclass_loader, dex_files, timings);
// Can we run DEX-to-DEX compiler on this class ?
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(self,
*this,
jclass_loader,
*dex_file,
dex_file->GetClassDef(class_def_idx));
- DCHECK(current_dex_to_dex_methods_ == nullptr);
+ DCHECK(!compiling_dex_to_dex_);
CompileMethod(self,
this,
code_item,
@@ -641,19 +621,10 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
true,
dex_cache);
- ArrayRef<DexFileMethodSet> dex_to_dex_references;
- {
- // From this point on, we shall not modify dex_to_dex_references_, so
- // just grab a reference to it that we use without holding the mutex.
- MutexLock lock(Thread::Current(), dex_to_dex_references_lock_);
- dex_to_dex_references = ArrayRef<DexFileMethodSet>(dex_to_dex_references_);
- }
- if (!dex_to_dex_references.empty()) {
- DCHECK_EQ(dex_to_dex_references.size(), 1u);
- DCHECK(&dex_to_dex_references[0].GetDexFile() == dex_file);
- current_dex_to_dex_methods_ = &dex_to_dex_references.front().GetMethodIndexes();
- DCHECK(current_dex_to_dex_methods_->IsBitSet(method_idx));
- DCHECK_EQ(current_dex_to_dex_methods_->NumSetBits(), 1u);
+ const size_t num_methods = dex_to_dex_compiler_.NumUniqueCodeItems(self);
+ if (num_methods != 0) {
+ DCHECK_EQ(num_methods, 1u);
+ compiling_dex_to_dex_ = true;
CompileMethod(self,
this,
code_item,
@@ -666,7 +637,8 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
dex_to_dex_compilation_level,
true,
dex_cache);
- current_dex_to_dex_methods_ = nullptr;
+ compiling_dex_to_dex_ = false;
+ dex_to_dex_compiler_.ClearState();
}
FreeThreadPools();
@@ -711,7 +683,7 @@ static void ResolveConstStrings(Handle<mirror::DexCache> dex_cache,
}
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(&dex_file, code_item)) {
+ for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(dex_file, code_item)) {
switch (inst->Opcode()) {
case Instruction::CONST_STRING:
case Instruction::CONST_STRING_JUMBO: {
@@ -1284,17 +1256,6 @@ bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) {
return IsImageClass(descriptor);
}
-void CompilerDriver::MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref) {
- MutexLock lock(self, dex_to_dex_references_lock_);
- // Since we're compiling one dex file at a time, we need to look for the
- // current dex file entry only at the end of dex_to_dex_references_.
- if (dex_to_dex_references_.empty() ||
- &dex_to_dex_references_.back().GetDexFile() != method_ref.dex_file) {
- dex_to_dex_references_.emplace_back(*method_ref.dex_file);
- }
- dex_to_dex_references_.back().GetMethodIndexes().SetBit(method_ref.index);
-}
-
bool CompilerDriver::CanAccessTypeWithoutChecks(ObjPtr<mirror::Class> referrer_class,
ObjPtr<mirror::Class> resolved_class) {
if (resolved_class == nullptr) {
@@ -2616,14 +2577,8 @@ void CompilerDriver::Compile(jobject class_loader,
: profile_compilation_info_->DumpInfo(&dex_files));
}
- current_dex_to_dex_methods_ = nullptr;
- Thread* const self = Thread::Current();
- {
- // Clear in case we aren't the first call to Compile.
- MutexLock mu(self, dex_to_dex_references_lock_);
- dex_to_dex_references_.clear();
- }
-
+ dex_to_dex_compiler_.ClearState();
+ compiling_dex_to_dex_ = false;
for (const DexFile* dex_file : dex_files) {
CHECK(dex_file != nullptr);
CompileDexFile(class_loader,
@@ -2638,23 +2593,21 @@ void CompilerDriver::Compile(jobject class_loader,
Runtime::Current()->ReclaimArenaPoolMemory();
}
- ArrayRef<DexFileMethodSet> dex_to_dex_references;
- {
- // From this point on, we shall not modify dex_to_dex_references_, so
- // just grab a reference to it that we use without holding the mutex.
- MutexLock lock(self, dex_to_dex_references_lock_);
- dex_to_dex_references = ArrayRef<DexFileMethodSet>(dex_to_dex_references_);
- }
- for (const auto& method_set : dex_to_dex_references) {
- current_dex_to_dex_methods_ = &method_set.GetMethodIndexes();
- CompileDexFile(class_loader,
- method_set.GetDexFile(),
- dex_files,
- parallel_thread_pool_.get(),
- parallel_thread_count_,
- timings);
+ if (dex_to_dex_compiler_.NumUniqueCodeItems(Thread::Current()) > 0u) {
+ compiling_dex_to_dex_ = true;
+ // TODO: Not visit all of the dex files, its probably rare that only one would have quickened
+ // methods though.
+ for (const DexFile* dex_file : dex_files) {
+ CompileDexFile(class_loader,
+ *dex_file,
+ dex_files,
+ parallel_thread_pool_.get(),
+ parallel_thread_count_,
+ timings);
+ }
+ dex_to_dex_compiler_.ClearState();
+ compiling_dex_to_dex_ = false;
}
- current_dex_to_dex_methods_ = nullptr;
VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
@@ -2705,7 +2658,7 @@ class CompileClassVisitor : public CompilationVisitor {
CompilerDriver* const driver = manager_->GetCompiler();
// Can we run DEX-to-DEX compiler on this class ?
- optimizer::DexToDexCompilationLevel dex_to_dex_compilation_level =
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
GetDexToDexCompilationLevel(soa.Self(), *driver, jclass_loader, dex_file, class_def);
ClassDataItemIterator it(dex_file, class_data);
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index ef16212fb7..87a8a186c1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -35,6 +35,7 @@
#include "compiler.h"
#include "dex/dex_file.h"
#include "dex/dex_file_types.h"
+#include "dex/dex_to_dex_compiler.h"
#include "driver/compiled_method_storage.h"
#include "jit/profile_compilation_info.h"
#include "method_reference.h"
@@ -120,12 +121,11 @@ class CompilerDriver {
void CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings)
- REQUIRES(!Locks::mutator_lock_, !dex_to_dex_references_lock_);
+ REQUIRES(!Locks::mutator_lock_);
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!dex_to_dex_references_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
VerificationResults* GetVerificationResults() const;
@@ -362,13 +362,6 @@ class CompilerDriver {
return true;
}
- void MarkForDexToDexCompilation(Thread* self, const MethodReference& method_ref)
- REQUIRES(!dex_to_dex_references_lock_);
-
- const BitVector* GetCurrentDexToDexMethods() const {
- return current_dex_to_dex_methods_;
- }
-
const ProfileCompilationInfo* GetProfileCompilationInfo() const {
return profile_compilation_info_;
}
@@ -381,6 +374,14 @@ class CompilerDriver {
|| android::base::EndsWith(boot_image_filename, "core-optimizing.art");
}
+ bool GetCompilingDexToDex() const {
+ return compiling_dex_to_dex_;
+ }
+
+ optimizer::DexToDexCompiler& GetDexToDexCompiler() {
+ return dex_to_dex_compiler_;
+ }
+
private:
void PreCompile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
@@ -447,7 +448,7 @@ class CompilerDriver {
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) REQUIRES(!dex_to_dex_references_lock_);
+ TimingLogger* timings);
void CompileDexFile(jobject class_loader,
const DexFile& dex_file,
const std::vector<const DexFile*>& dex_files,
@@ -539,14 +540,9 @@ class CompilerDriver {
size_t max_arena_alloc_;
- // Data for delaying dex-to-dex compilation.
- Mutex dex_to_dex_references_lock_;
- // In the first phase, dex_to_dex_references_ collects methods for dex-to-dex compilation.
- class DexFileMethodSet;
- std::vector<DexFileMethodSet> dex_to_dex_references_ GUARDED_BY(dex_to_dex_references_lock_);
- // In the second phase, current_dex_to_dex_methods_ points to the BitVector with method
- // indexes for dex-to-dex compilation in the current dex file.
- const BitVector* current_dex_to_dex_methods_;
+ // Compiler for dex to dex (quickening).
+ bool compiling_dex_to_dex_;
+ optimizer::DexToDexCompiler dex_to_dex_compiler_;
friend class CompileClassVisitor;
friend class DexToDexDecompilerTest;
diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc
index 1fe30de355..28e68c94df 100644
--- a/compiler/driver/dex_compilation_unit.cc
+++ b/compiler/driver/dex_compilation_unit.cc
@@ -40,8 +40,7 @@ DexCompilationUnit::DexCompilationUnit(Handle<mirror::ClassLoader> class_loader,
access_flags_(access_flags),
verified_method_(verified_method),
dex_cache_(dex_cache),
- code_item_accessor_(&dex_file, code_item) {
-}
+ code_item_accessor_(dex_file, code_item) {}
const std::string& DexCompilationUnit::GetSymbol() {
if (symbol_.empty()) {
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index 8f7ab05791..7bacacf91d 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -130,7 +130,7 @@ class ExceptionTest : public CommonRuntimeTest {
TEST_F(ExceptionTest, FindCatchHandler) {
ScopedObjectAccess soa(Thread::Current());
- CodeItemDataAccessor accessor(dex_, dex_->GetCodeItem(method_f_->GetCodeItemOffset()));
+ CodeItemDataAccessor accessor(*dex_, dex_->GetCodeItem(method_f_->GetCodeItemOffset()));
ASSERT_TRUE(accessor.HasCodeItem());
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 88e3e5b230..2c62095458 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,6 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
+ MutexLock mu(Thread::Current(), g_jit_debug_mutex);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index cedbe5d97f..6e0286afac 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -250,12 +250,12 @@ std::vector<debug::MethodDebugInfo> ArmBaseRelativePatcher::GenerateThunkDebugIn
for (size_t i = start, num = data.NumberOfThunks(); i != num; ++i) {
debug::MethodDebugInfo info = {};
if (i == 0u) {
- info.trampoline_name = base_name;
+ info.custom_name = base_name;
} else {
// Add a disambiguating tag for subsequent identical thunks. Since the `thunks_`
// keeps records also for thunks in previous oat files, names based on the thunk
// index shall be unique across the whole multi-oat output.
- info.trampoline_name = base_name + "_" + std::to_string(i);
+ info.custom_name = base_name + "_" + std::to_string(i);
}
info.isa = instruction_set_;
info.is_code_address_text_relative = true;
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index aa3cd98595..1c875189c5 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_LINKER_ELF_BUILDER_H_
#include <vector>
+#include <unordered_map>
#include "arch/instruction_set.h"
#include "arch/mips/instruction_set_features_mips.h"
@@ -38,9 +39,10 @@ namespace linker {
// Elf_Ehdr - The ELF header.
// Elf_Phdr[] - Program headers for the linker.
// .note.gnu.build-id - Optional build ID section (SHA-1 digest).
-// .rodata - DEX files and oat metadata.
+// .rodata - Oat metadata.
// .text - Compiled code.
// .bss - Zero-initialized writeable section.
+// .dex - Reserved NOBITS space for dex-related data.
// .MIPS.abiflags - MIPS specific section.
// .dynstr - Names for .dynsym.
// .dynsym - A few oat-specific dynamic symbols.
@@ -195,6 +197,11 @@ class ElfBuilder FINAL {
return section_index_;
}
+ // Returns true if this section has been added.
+ bool Exists() const {
+ return section_index_ != 0;
+ }
+
private:
// Add this section to the list of generated ELF sections (if not there already).
// It also ensures the alignment is sufficient to generate valid program headers,
@@ -310,35 +317,36 @@ class ElfBuilder FINAL {
if (current_offset_ == 0) {
DCHECK(name.empty());
}
- Elf_Word offset = current_offset_;
- this->WriteFully(name.c_str(), name.length() + 1);
- current_offset_ += name.length() + 1;
- return offset;
+ auto res = written_names_.emplace(name, current_offset_);
+ if (res.second) { // Inserted.
+ this->WriteFully(name.c_str(), name.length() + 1);
+ current_offset_ += name.length() + 1;
+ }
+ return res.first->second; // Offset.
}
private:
Elf_Word current_offset_;
+ std::unordered_map<std::string, Elf_Word> written_names_; // Dedup strings.
};
// Writer of .dynsym and .symtab sections.
- class SymbolSection FINAL : public CachedSection {
+ class SymbolSection FINAL : public Section {
public:
SymbolSection(ElfBuilder<ElfTypes>* owner,
const std::string& name,
Elf_Word type,
Elf_Word flags,
Section* strtab)
- : CachedSection(owner,
- name,
- type,
- flags,
- strtab,
- /* info */ 0,
- sizeof(Elf_Off),
- sizeof(Elf_Sym)) {
- // The symbol table always has to start with NULL symbol.
- Elf_Sym null_symbol = Elf_Sym();
- CachedSection::Add(&null_symbol, sizeof(null_symbol));
+ : Section(owner,
+ name,
+ type,
+ flags,
+ strtab,
+ /* info */ 0,
+ sizeof(Elf_Off),
+ sizeof(Elf_Sym)) {
+ syms_.push_back(Elf_Sym()); // The symbol table always has to start with NULL symbol.
}
// Buffer symbol for this section. It will be written later.
@@ -361,6 +369,7 @@ class ElfBuilder FINAL {
Add(name, section_index, addr, size, binding, type);
}
+ // Buffer symbol for this section. It will be written later.
void Add(Elf_Word name,
Elf_Word section_index,
Elf_Addr addr,
@@ -374,8 +383,19 @@ class ElfBuilder FINAL {
sym.st_other = 0;
sym.st_shndx = section_index;
sym.st_info = (binding << 4) + (type & 0xf);
- CachedSection::Add(&sym, sizeof(sym));
+ syms_.push_back(sym);
+ }
+
+ Elf_Word GetCacheSize() { return syms_.size() * sizeof(Elf_Sym); }
+
+ void WriteCachedSection() {
+ this->Start();
+ this->WriteFully(syms_.data(), syms_.size() * sizeof(Elf_Sym));
+ this->End();
}
+
+ private:
+ std::vector<Elf_Sym> syms_; // Buffered/cached content of the whole section.
};
class AbiflagsSection FINAL : public Section {
@@ -503,6 +523,7 @@ class ElfBuilder FINAL {
rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0),
bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
+ dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize),
dynsym_(this, ".dynsym", SHT_DYNSYM, SHF_ALLOC, &dynstr_),
hash_(this, ".hash", SHT_HASH, SHF_ALLOC, &dynsym_, 0, sizeof(Elf_Word), sizeof(Elf_Word)),
@@ -525,6 +546,7 @@ class ElfBuilder FINAL {
virtual_address_(0) {
text_.phdr_flags_ = PF_R | PF_X;
bss_.phdr_flags_ = PF_R | PF_W;
+ dex_.phdr_flags_ = PF_R;
dynamic_.phdr_flags_ = PF_R | PF_W;
dynamic_.phdr_type_ = PT_DYNAMIC;
eh_frame_hdr_.phdr_type_ = PT_GNU_EH_FRAME;
@@ -538,6 +560,7 @@ class ElfBuilder FINAL {
Section* GetRoData() { return &rodata_; }
Section* GetText() { return &text_; }
Section* GetBss() { return &bss_; }
+ Section* GetDex() { return &dex_; }
StringSection* GetStrTab() { return &strtab_; }
SymbolSection* GetSymTab() { return &symtab_; }
Section* GetEhFrame() { return &eh_frame_; }
@@ -666,7 +689,8 @@ class ElfBuilder FINAL {
Elf_Word text_size,
Elf_Word bss_size,
Elf_Word bss_methods_offset,
- Elf_Word bss_roots_offset) {
+ Elf_Word bss_roots_offset,
+ Elf_Word dex_size) {
std::string soname(elf_file_path);
size_t directory_separator_pos = soname.rfind('/');
if (directory_separator_pos != std::string::npos) {
@@ -679,6 +703,9 @@ class ElfBuilder FINAL {
if (bss_size != 0) {
bss_.AllocateVirtualMemory(bss_size);
}
+ if (dex_size != 0) {
+ dex_.AllocateVirtualMemory(dex_size);
+ }
if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) {
abiflags_.AllocateVirtualMemory(abiflags_.GetSize());
}
@@ -725,6 +752,14 @@ class ElfBuilder FINAL {
Elf_Word bsslastword_address = bss_.GetAddress() + bss_size - 4;
dynsym_.Add(oatbsslastword, &bss_, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT);
}
+ if (dex_size != 0u) {
+ Elf_Word oatdex = dynstr_.Add("oatdex");
+ dynsym_.Add(oatdex, &dex_, dex_.GetAddress(), dex_size, STB_GLOBAL, STT_OBJECT);
+ Elf_Word oatdexlastword = dynstr_.Add("oatdexlastword");
+ Elf_Word oatdexlastword_address = dex_.GetAddress() + dex_size - 4;
+ dynsym_.Add(oatdexlastword, &dex_, oatdexlastword_address, 4, STB_GLOBAL, STT_OBJECT);
+ }
+
Elf_Word soname_offset = dynstr_.Add(soname);
// We do not really need a hash-table since there is so few entries.
@@ -967,6 +1002,7 @@ class ElfBuilder FINAL {
Section rodata_;
Section text_;
Section bss_;
+ Section dex_;
CachedStringSection dynstr_;
SymbolSection dynsym_;
CachedSection hash_;
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 9c2068ec5e..147df1e3e8 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -302,7 +302,7 @@ class ValueRange : public ArenaObject<kArenaAllocBoundsCheckElimination> {
ValueBound GetLower() const { return lower_; }
ValueBound GetUpper() const { return upper_; }
- bool IsConstantValueRange() { return lower_.IsConstant() && upper_.IsConstant(); }
+ bool IsConstantValueRange() const { return lower_.IsConstant() && upper_.IsConstant(); }
// If it's certain that this value range fits in other_range.
virtual bool FitsIn(ValueRange* other_range) const {
@@ -789,24 +789,33 @@ class BCEVisitor : public HGraphVisitor {
ApplyRangeFromComparison(left, block, false_successor, new_range);
}
} else if (cond == kCondNE || cond == kCondEQ) {
- if (left->IsArrayLength() && lower.IsConstant() && upper.IsConstant()) {
- // Special case:
- // length == [c,d] yields [c, d] along true
- // length != [c,d] yields [c, d] along false
- if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
- ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
- ApplyRangeFromComparison(
- left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
- }
- // In addition:
- // length == 0 yields [1, max] along false
- // length != 0 yields [1, max] along true
- if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
- ValueRange* new_range = new (&allocator_) ValueRange(
- &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
- ApplyRangeFromComparison(
- left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
+ if (left->IsArrayLength()) {
+ if (lower.IsConstant() && upper.IsConstant()) {
+ // Special case:
+ // length == [c,d] yields [c, d] along true
+ // length != [c,d] yields [c, d] along false
+ if (!lower.Equals(ValueBound::Min()) || !upper.Equals(ValueBound::Max())) {
+ ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
+ }
+ // In addition:
+ // length == 0 yields [1, max] along false
+ // length != 0 yields [1, max] along true
+ if (lower.GetConstant() == 0 && upper.GetConstant() == 0) {
+ ValueRange* new_range = new (&allocator_) ValueRange(
+ &allocator_, ValueBound(nullptr, 1), ValueBound::Max());
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? false_successor : true_successor, new_range);
+ }
}
+ } else if (lower.IsRelatedToArrayLength() && lower.Equals(upper)) {
+ // Special aliasing case, with x not array length itself:
+ // x == [length,length] yields x == length along true
+ // x != [length,length] yields x == length along false
+ ValueRange* new_range = new (&allocator_) ValueRange(&allocator_, lower, upper);
+ ApplyRangeFromComparison(
+ left, block, cond == kCondEQ ? true_successor : false_successor, new_range);
}
}
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index af537dd653..a1a5692ef6 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -43,7 +43,7 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
VariableSizedHandleScope* handles)
: graph_(graph),
dex_file_(&graph->GetDexFile()),
@@ -70,7 +70,6 @@ HGraphBuilder::HGraphBuilder(HGraph* graph,
compiler_driver_(nullptr),
code_generator_(nullptr),
compilation_stats_(nullptr),
- interpreter_metadata_(nullptr),
handles_(handles),
return_type_(return_type) {}
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c16a3a928d..5a1914ce08 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
#include "base/arena_object.h"
+#include "base/array_ref.h"
#include "dex/code_item_accessors.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file.h"
@@ -40,7 +41,7 @@ class HGraphBuilder : public ValueObject {
CompilerDriver* driver,
CodeGenerator* code_generator,
OptimizingCompilerStats* compiler_stats,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
VariableSizedHandleScope* handles);
// Only for unit testing.
@@ -73,7 +74,7 @@ class HGraphBuilder : public ValueObject {
CodeGenerator* const code_generator_;
OptimizingCompilerStats* const compilation_stats_;
- const uint8_t* const interpreter_metadata_;
+ const ArrayRef<const uint8_t> interpreter_metadata_;
VariableSizedHandleScope* const handles_;
const DataType::Type return_type_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 07894fd1b1..01155dcd37 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -911,7 +911,7 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph,
}
ArenaVector<size_t> covered(
loop_headers.size(), 0, graph.GetAllocator()->Adapter(kArenaAllocMisc));
- for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(&graph.GetDexFile(),
+ for (const DexInstructionPcPair& pair : CodeItemInstructionAccessor(graph.GetDexFile(),
&code_item)) {
const uint32_t dex_pc = pair.DexPc();
const Instruction& instruction = pair.Inst();
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 13886b32b3..13bbffa1e3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2097,13 +2097,17 @@ void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCod
Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
Register temp = temps.AcquireW();
- size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
// TODO(vixl): Let the MacroAssembler handle MemOperand.
- __ Add(temp, class_reg, status_offset);
+ __ Add(temp, class_reg, status_byte_offset);
__ Ldarb(temp, HeapOperand(temp));
- __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ __ Cmp(temp, shifted_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
}
@@ -3487,7 +3491,11 @@ void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant
}
void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 7f8353312f..577fe00dcd 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2776,7 +2776,11 @@ void CodeGeneratorARMVIXL::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_poi
}
void InstructionCodeGeneratorARMVIXL::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
@@ -5597,42 +5601,13 @@ Location LocationsBuilderARMVIXL::ArmEncodableConstantOrRegister(HInstruction* c
return Location::RequiresRegister();
}
-bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst,
- Opcode opcode) {
- uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
- if (DataType::Is64BitType(input_cst->GetType())) {
- Opcode high_opcode = opcode;
- SetCc low_set_cc = kCcDontCare;
- switch (opcode) {
- case SUB:
- // Flip the operation to an ADD.
- value = -value;
- opcode = ADD;
- FALLTHROUGH_INTENDED;
- case ADD:
- if (Low32Bits(value) == 0u) {
- return CanEncodeConstantAsImmediate(High32Bits(value), opcode, kCcDontCare);
- }
- high_opcode = ADC;
- low_set_cc = kCcSet;
- break;
- default:
- break;
- }
- return CanEncodeConstantAsImmediate(Low32Bits(value), opcode, low_set_cc) &&
- CanEncodeConstantAsImmediate(High32Bits(value), high_opcode, kCcDontCare);
- } else {
- return CanEncodeConstantAsImmediate(Low32Bits(value), opcode);
- }
-}
-
-// TODO(VIXL): Replace art::arm::SetCc` with `vixl32::FlagsUpdate after flags set optimization
-// enabled.
-bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(uint32_t value,
- Opcode opcode,
- SetCc set_cc) {
- ArmVIXLAssembler* assembler = codegen_->GetAssembler();
- if (assembler->ShifterOperandCanHold(opcode, value, set_cc)) {
+static bool CanEncode32BitConstantAsImmediate(
+ CodeGeneratorARMVIXL* codegen,
+ uint32_t value,
+ Opcode opcode,
+ vixl32::FlagsUpdate flags_update = vixl32::FlagsUpdate::DontCare) {
+ ArmVIXLAssembler* assembler = codegen->GetAssembler();
+ if (assembler->ShifterOperandCanHold(opcode, value, flags_update)) {
return true;
}
Opcode neg_opcode = kNoOperand;
@@ -5649,13 +5624,41 @@ bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(uint32_t value,
return false;
}
- if (assembler->ShifterOperandCanHold(neg_opcode, neg_value, set_cc)) {
+ if (assembler->ShifterOperandCanHold(neg_opcode, neg_value, flags_update)) {
return true;
}
return opcode == AND && IsPowerOfTwo(value + 1);
}
+bool LocationsBuilderARMVIXL::CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode) {
+ uint64_t value = static_cast<uint64_t>(Int64FromConstant(input_cst));
+ if (DataType::Is64BitType(input_cst->GetType())) {
+ Opcode high_opcode = opcode;
+ vixl32::FlagsUpdate low_flags_update = vixl32::FlagsUpdate::DontCare;
+ switch (opcode) {
+ case SUB:
+ // Flip the operation to an ADD.
+ value = -value;
+ opcode = ADD;
+ FALLTHROUGH_INTENDED;
+ case ADD:
+ if (Low32Bits(value) == 0u) {
+ return CanEncode32BitConstantAsImmediate(codegen_, High32Bits(value), opcode);
+ }
+ high_opcode = ADC;
+ low_flags_update = vixl32::FlagsUpdate::SetFlags;
+ break;
+ default:
+ break;
+ }
+ return CanEncode32BitConstantAsImmediate(codegen_, High32Bits(value), high_opcode) &&
+ CanEncode32BitConstantAsImmediate(codegen_, Low32Bits(value), opcode, low_flags_update);
+ } else {
+ return CanEncode32BitConstantAsImmediate(codegen_, Low32Bits(value), opcode);
+ }
+}
+
void InstructionCodeGeneratorARMVIXL::HandleFieldGet(HInstruction* instruction,
const FieldInfo& field_info) {
DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
@@ -7173,11 +7176,14 @@ void InstructionCodeGeneratorARMVIXL::GenerateClassInitializationCheck(
LoadClassSlowPathARMVIXL* slow_path, vixl32::Register class_reg) {
UseScratchRegisterScope temps(GetVIXLAssembler());
vixl32::Register temp = temps.Acquire();
- GetAssembler()->LoadFromOffset(kLoadUnsignedByte,
- temp,
- class_reg,
- mirror::Class::StatusOffset().Int32Value());
- __ Cmp(temp, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ GetAssembler()->LoadFromOffset(kLoadUnsignedByte, temp, class_reg, status_byte_offset);
+ __ Cmp(temp, shifted_initialized_value);
__ B(lo, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we may be in a situation where caches are not synced
// properly. Therefore, we do a memory fence.
@@ -8131,13 +8137,11 @@ void InstructionCodeGeneratorARMVIXL::GenerateAddLongConst(Location out,
return;
}
__ Adds(out_low, first_low, value_low);
- if (GetAssembler()->ShifterOperandCanHold(ADC, value_high, kCcDontCare)) {
+ if (GetAssembler()->ShifterOperandCanHold(ADC, value_high)) {
__ Adc(out_high, first_high, value_high);
- } else if (GetAssembler()->ShifterOperandCanHold(SBC, ~value_high, kCcDontCare)) {
- __ Sbc(out_high, first_high, ~value_high);
} else {
- LOG(FATAL) << "Unexpected constant " << value_high;
- UNREACHABLE();
+ DCHECK(GetAssembler()->ShifterOperandCanHold(SBC, ~value_high));
+ __ Sbc(out_high, first_high, ~value_high);
}
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index c46d17ccec..38570bb0fe 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -287,7 +287,6 @@ class LocationsBuilderARMVIXL : public HGraphVisitor {
Location ArithmeticZeroOrFpuRegister(HInstruction* input);
Location ArmEncodableConstantOrRegister(HInstruction* constant, Opcode opcode);
bool CanEncodeConstantAsImmediate(HConstant* input_cst, Opcode opcode);
- bool CanEncodeConstantAsImmediate(uint32_t value, Opcode opcode, SetCc set_cc = kCcDontCare);
CodeGeneratorARMVIXL* const codegen_;
InvokeDexCallingConventionVisitorARMVIXL parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ebe252a9c8..5c8e46ed19 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1915,8 +1915,14 @@ void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool d
void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
Register class_reg) {
- __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
+ __ LoadConst32(AT, shifted_initialized_value);
__ Bltu(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
@@ -4028,7 +4034,11 @@ void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
}
void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 3ea7b827bb..bcfe051c90 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1761,8 +1761,14 @@ void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
GpuRegister class_reg) {
- __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
- __ LoadConst32(AT, enum_cast<>(ClassStatus::kInitialized));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
+ __ LoadConst32(AT, shifted_initialized_value);
__ Bltuc(TMP, AT, slow_path->GetEntryLabel());
// Even if the initialized flag is set, we need to ensure consistent memory ordering.
__ Sync(0);
@@ -3556,7 +3562,11 @@ void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant
}
void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
+
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
HLoopInformation* info = block->GetLoopInformation();
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index ad8128a5b1..7b4b85d2fe 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -92,8 +92,8 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i
__ pshufd(dst, dst, Immediate(0));
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ movd(dst, locations->InAt(0).AsRegisterPairLow<Register>());
__ movd(tmp, locations->InAt(0).AsRegisterPairHigh<Register>());
__ punpckldq(dst, tmp);
@@ -101,13 +101,13 @@ void InstructionCodeGeneratorX86::VisitVecReplicateScalar(HVecReplicateScalar* i
break;
}
case DataType::Type::kFloat32:
- DCHECK(locations->InAt(0).Equals(locations->Out()));
DCHECK_EQ(4u, instruction->GetVectorLength());
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
__ shufps(dst, dst, Immediate(0));
break;
case DataType::Type::kFloat64:
- DCHECK(locations->InAt(0).Equals(locations->Out()));
DCHECK_EQ(2u, instruction->GetVectorLength());
+ DCHECK(locations->InAt(0).Equals(locations->Out()));
__ shufpd(dst, dst, Immediate(0));
break;
default:
@@ -160,8 +160,8 @@ void InstructionCodeGeneratorX86::VisitVecExtractScalar(HVecExtractScalar* instr
__ movd(locations->Out().AsRegister<Register>(), src);
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ movd(locations->Out().AsRegisterPairLow<Register>(), src);
__ pshufd(tmp, src, Immediate(1));
__ movd(locations->Out().AsRegisterPairHigh<Register>(), tmp);
@@ -1022,8 +1022,8 @@ void InstructionCodeGeneratorX86::VisitVecSetScalars(HVecSetScalars* instruction
__ movd(dst, locations->InAt(0).AsRegister<Register>());
break;
case DataType::Type::kInt64: {
- XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
DCHECK_EQ(2u, instruction->GetVectorLength());
+ XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
__ xorps(tmp, tmp);
__ movd(dst, locations->InAt(0).AsRegisterPairLow<Register>());
__ movd(tmp, locations->InAt(0).AsRegisterPairHigh<Register>());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 68532386e1..cbe9e0a35c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1347,7 +1347,10 @@ void CodeGeneratorX86::AddLocationAsTemp(Location location, LocationSummary* loc
}
void InstructionCodeGeneratorX86::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
@@ -6219,8 +6222,13 @@ void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
SlowPathCode* slow_path, Register class_reg) {
- __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the X86 memory model.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 1f8d822507..510eec4f30 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1449,7 +1449,10 @@ void CodeGeneratorX86_64::AddLocationAsTemp(Location location, LocationSummary*
}
void InstructionCodeGeneratorX86_64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
- DCHECK(!successor->IsExitBlock());
+ if (successor->IsExitBlock()) {
+ DCHECK(got->GetPrevious()->AlwaysThrows());
+ return; // no code needed
+ }
HBasicBlock* block = got->GetBlock();
HInstruction* previous = got->GetPrevious();
@@ -5425,8 +5428,13 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
SlowPathCode* slow_path, CpuRegister class_reg) {
- __ cmpb(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(enum_cast<>(ClassStatus::kInitialized)));
+ constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
+ const size_t status_byte_offset =
+ mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
+ constexpr uint32_t shifted_initialized_value =
+ enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+
+ __ cmpb(Address(class_reg, status_byte_offset), Immediate(shifted_initialized_value));
__ j(kBelow, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
// No need for memory fence, thanks to the x86-64 memory model.
diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc
index d8ebac95a8..f4760d661f 100644
--- a/compiler/optimizing/code_sinking.cc
+++ b/compiler/optimizing/code_sinking.cc
@@ -34,7 +34,9 @@ void CodeSinking::Run() {
// TODO(ngeoffray): we do not profile branches yet, so use throw instructions
// as an indicator of an uncommon branch.
for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
- if (exit_predecessor->GetLastInstruction()->IsThrow()) {
+ HInstruction* last = exit_predecessor->GetLastInstruction();
+ // Any predecessor of the exit that does not return, throws an exception.
+ if (!last->IsReturn() && !last->IsReturnVoid()) {
SinkCodeToUncommonBranch(exit_predecessor);
}
}
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6eda289861..ba4040acad 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -74,8 +74,8 @@ static ::std::vector<CodegenTargetConfig> GetTargetConfigs() {
class CodegenTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0);
- void TestCodeLong(const uint16_t* data, bool has_result, int64_t expected);
+ void TestCode(const std::vector<uint16_t>& data, bool has_result = false, int32_t expected = 0);
+ void TestCodeLong(const std::vector<uint16_t>& data, bool has_result, int64_t expected);
void TestComparison(IfCondition condition,
int64_t i,
int64_t j,
@@ -83,7 +83,7 @@ class CodegenTest : public OptimizingUnitTest {
const CodegenTargetConfig target_config);
};
-void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expected) {
+void CodegenTest::TestCode(const std::vector<uint16_t>& data, bool has_result, int32_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
ResetPoolAndAllocator();
HGraph* graph = CreateCFG(data);
@@ -93,7 +93,8 @@ void CodegenTest::TestCode(const uint16_t* data, bool has_result, int32_t expect
}
}
-void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t expected) {
+void CodegenTest::TestCodeLong(const std::vector<uint16_t>& data,
+ bool has_result, int64_t expected) {
for (const CodegenTargetConfig& target_config : GetTargetConfigs()) {
ResetPoolAndAllocator();
HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
@@ -104,12 +105,12 @@ void CodegenTest::TestCodeLong(const uint16_t* data, bool has_result, int64_t ex
}
TEST_F(CodegenTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
TestCode(data);
}
TEST_F(CodegenTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -117,7 +118,7 @@ TEST_F(CodegenTest, CFG1) {
}
TEST_F(CodegenTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -126,21 +127,21 @@ TEST_F(CodegenTest, CFG2) {
}
TEST_F(CodegenTest, CFG3) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
TestCode(data1);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2);
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -149,7 +150,7 @@ TEST_F(CodegenTest, CFG3) {
}
TEST_F(CodegenTest, CFG4) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
@@ -158,7 +159,7 @@ TEST_F(CodegenTest, CFG4) {
}
TEST_F(CodegenTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -168,7 +169,7 @@ TEST_F(CodegenTest, CFG5) {
}
TEST_F(CodegenTest, IntConstant) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -176,7 +177,7 @@ TEST_F(CodegenTest, IntConstant) {
}
TEST_F(CodegenTest, Return1) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN | 0);
@@ -184,7 +185,7 @@ TEST_F(CodegenTest, Return1) {
}
TEST_F(CodegenTest, Return2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 0 | 1 << 8,
Instruction::RETURN | 1 << 8);
@@ -193,7 +194,7 @@ TEST_F(CodegenTest, Return2) {
}
TEST_F(CodegenTest, Return3) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::RETURN | 1 << 8);
@@ -202,7 +203,7 @@ TEST_F(CodegenTest, Return3) {
}
TEST_F(CodegenTest, ReturnIf1) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::IF_EQ, 3,
@@ -213,7 +214,7 @@ TEST_F(CodegenTest, ReturnIf1) {
}
TEST_F(CodegenTest, ReturnIf2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::IF_EQ | 0 << 4 | 1 << 8, 3,
@@ -224,17 +225,17 @@ TEST_F(CodegenTest, ReturnIf2) {
}
// Exercise bit-wise (one's complement) not-int instruction.
-#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_F(CodegenTest, TEST_NAME) { \
- const int32_t input = INPUT; \
- const uint16_t input_lo = Low16Bits(input); \
- const uint16_t input_hi = High16Bits(input); \
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM( \
- Instruction::CONST | 0 << 8, input_lo, input_hi, \
- Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
- Instruction::RETURN | 1 << 8); \
- \
- TestCode(data, true, EXPECTED_OUTPUT); \
+#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
+TEST_F(CodegenTest, TEST_NAME) { \
+ const int32_t input = INPUT; \
+ const uint16_t input_lo = Low16Bits(input); \
+ const uint16_t input_hi = High16Bits(input); \
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM( \
+ Instruction::CONST | 0 << 8, input_lo, input_hi, \
+ Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
+ Instruction::RETURN | 1 << 8); \
+ \
+ TestCode(data, true, EXPECTED_OUTPUT); \
}
NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
@@ -256,7 +257,7 @@ TEST_F(CodegenTest, TEST_NAME) { \
const uint16_t word1 = High16Bits(Low32Bits(input)); \
const uint16_t word2 = Low16Bits(High32Bits(input)); \
const uint16_t word3 = High16Bits(High32Bits(input)); /* MSW. */ \
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM( \
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM( \
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3, \
Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
Instruction::RETURN_WIDE | 2 << 8); \
@@ -306,7 +307,7 @@ TEST_F(CodegenTest, IntToLongOfLongToInt) {
const uint16_t word1 = High16Bits(Low32Bits(input));
const uint16_t word2 = Low16Bits(High32Bits(input));
const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
- const uint16_t data[] = FIVE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FIVE_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
Instruction::CONST_WIDE | 2 << 8, 1, 0, 0, 0,
Instruction::ADD_LONG | 0, 0 << 8 | 2, // v0 <- 2^32 + 1
@@ -318,7 +319,7 @@ TEST_F(CodegenTest, IntToLongOfLongToInt) {
}
TEST_F(CodegenTest, ReturnAdd1) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT, 1 << 8 | 0,
@@ -328,7 +329,7 @@ TEST_F(CodegenTest, ReturnAdd1) {
}
TEST_F(CodegenTest, ReturnAdd2) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -338,7 +339,7 @@ TEST_F(CodegenTest, ReturnAdd2) {
}
TEST_F(CodegenTest, ReturnAdd3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -347,7 +348,7 @@ TEST_F(CodegenTest, ReturnAdd3) {
}
TEST_F(CodegenTest, ReturnAdd4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT16, 3,
Instruction::RETURN);
@@ -356,7 +357,7 @@ TEST_F(CodegenTest, ReturnAdd4) {
}
TEST_F(CodegenTest, ReturnMulInt) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT, 1 << 8 | 0,
@@ -366,7 +367,7 @@ TEST_F(CodegenTest, ReturnMulInt) {
}
TEST_F(CodegenTest, ReturnMulInt2addr) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT_2ADDR | 1 << 12,
@@ -376,7 +377,7 @@ TEST_F(CodegenTest, ReturnMulInt2addr) {
}
TEST_F(CodegenTest, ReturnMulLong) {
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG, 2 << 8 | 0,
@@ -386,7 +387,7 @@ TEST_F(CodegenTest, ReturnMulLong) {
}
TEST_F(CodegenTest, ReturnMulLong2addr) {
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG_2ADDR | 2 << 12,
@@ -396,7 +397,7 @@ TEST_F(CodegenTest, ReturnMulLong2addr) {
}
TEST_F(CodegenTest, ReturnMulIntLit8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -405,7 +406,7 @@ TEST_F(CodegenTest, ReturnMulIntLit8) {
}
TEST_F(CodegenTest, ReturnMulIntLit16) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
Instruction::RETURN);
@@ -578,7 +579,7 @@ TEST_F(CodegenTest, MaterializedCondition2) {
}
TEST_F(CodegenTest, ReturnDivIntLit8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::DIV_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
@@ -587,7 +588,7 @@ TEST_F(CodegenTest, ReturnDivIntLit8) {
}
TEST_F(CodegenTest, ReturnDivInt2Addr) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::CONST_4 | 2 << 12 | 1 << 8,
Instruction::DIV_INT_2ADDR | 1 << 12,
diff --git a/compiler/optimizing/constant_folding_test.cc b/compiler/optimizing/constant_folding_test.cc
index e1980e080e..d27104752b 100644
--- a/compiler/optimizing/constant_folding_test.cc
+++ b/compiler/optimizing/constant_folding_test.cc
@@ -36,7 +36,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
public:
ConstantFoldingTest() : graph_(nullptr) { }
- void TestCode(const uint16_t* data,
+ void TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after_cf,
const std::string& expected_after_dce,
@@ -100,7 +100,7 @@ class ConstantFoldingTest : public OptimizingUnitTest {
* return v1 2. return v1
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingNegation) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::NEG_INT | 1 << 8 | 0 << 12,
Instruction::RETURN | 1 << 8);
@@ -161,7 +161,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
const uint16_t word1 = High16Bits(Low32Bits(input));
const uint16_t word2 = Low16Bits(High32Bits(input));
const uint16_t word3 = High16Bits(High32Bits(input)); // MSW.
- const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = FOUR_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE | 0 << 8, word0, word1, word2, word3,
Instruction::NEG_LONG | 2 << 8 | 0 << 12,
Instruction::RETURN_WIDE | 2 << 8);
@@ -219,7 +219,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingNegation) {
* return v2 4. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
@@ -284,7 +284,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition1) {
* return v2 8. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT_2ADDR | 0 << 8 | 1 << 12,
@@ -369,7 +369,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnAddition2) {
* return v2 4. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 3 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::SUB_INT | 2 << 8, 0 | 1 << 8,
@@ -432,7 +432,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingOnSubtraction) {
* return (v4, v5) 6. return-wide v4
*/
TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 1,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
Instruction::ADD_LONG | 4 << 8, 0 | 2 << 8,
@@ -496,7 +496,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnAddition) {
* return (v4, v5) 6. return-wide v4
*/
TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_WIDE_16 | 0 << 8, 3,
Instruction::CONST_WIDE_16 | 2 << 8, 2,
Instruction::SUB_LONG | 4 << 8, 0 | 2 << 8,
@@ -569,7 +569,7 @@ TEST_F(ConstantFoldingTest, LongConstantFoldingOnSubtraction) {
* return v2 13. return v2
*/
TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 1 << 12,
Instruction::CONST_4 | 1 << 8 | 2 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
@@ -672,7 +672,7 @@ TEST_F(ConstantFoldingTest, IntConstantFoldingAndJumps) {
* return-void 7. return
*/
TEST_F(ConstantFoldingTest, ConstantCondition) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::IF_GEZ | 1 << 8, 3,
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 3cc7b0e78d..cca1055ac8 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -148,6 +148,77 @@ static HConstant* Evaluate(HCondition* condition, HInstruction* left, HInstructi
// Simplify the pattern:
//
+// B1
+// / \
+// | foo() // always throws
+// \ goto B2
+// \ /
+// B2
+//
+// Into:
+//
+// B1
+// / \
+// | foo()
+// | goto Exit
+// | |
+// B2 Exit
+//
+// Rationale:
+// Removal of the never taken edge to B2 may expose
+// other optimization opportunities, such as code sinking.
+bool HDeadCodeElimination::SimplifyAlwaysThrows() {
+ // Make sure exceptions go to exit.
+ if (graph_->HasTryCatch()) {
+ return false;
+ }
+ HBasicBlock* exit = graph_->GetExitBlock();
+ if (exit == nullptr) {
+ return false;
+ }
+
+ bool rerun_dominance_and_loop_analysis = false;
+
+ // Order does not matter, just pick one.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ HInstruction* first = block->GetFirstInstruction();
+ HInstruction* last = block->GetLastInstruction();
+ // Ensure only one throwing instruction appears before goto.
+ if (first->AlwaysThrows() &&
+ first->GetNext() == last &&
+ last->IsGoto() &&
+ block->GetPhis().IsEmpty() &&
+ block->GetPredecessors().size() == 1u) {
+ DCHECK_EQ(block->GetSuccessors().size(), 1u);
+ HBasicBlock* pred = block->GetSinglePredecessor();
+ HBasicBlock* succ = block->GetSingleSuccessor();
+ // Ensure no computations are merged through throwing block.
+ // This does not prevent the optimization per se, but would
+ // require an elaborate clean up of the SSA graph.
+ if (succ != exit &&
+ !block->Dominates(pred) &&
+ pred->Dominates(succ) &&
+ succ->GetPredecessors().size() > 1u &&
+ succ->GetPhis().IsEmpty()) {
+ block->ReplaceSuccessor(succ, exit);
+ rerun_dominance_and_loop_analysis = true;
+ MaybeRecordStat(stats_, MethodCompilationStat::kSimplifyThrowingInvoke);
+ }
+ }
+ }
+
+ // We need to re-analyze the graph in order to run DCE afterwards.
+ if (rerun_dominance_and_loop_analysis) {
+ graph_->ClearLoopInformation();
+ graph_->ClearDominanceInformation();
+ graph_->BuildDominatorTree();
+ return true;
+ }
+ return false;
+}
+
+// Simplify the pattern:
+//
// B1 B2 ...
// goto goto goto
// \ | /
@@ -381,6 +452,7 @@ void HDeadCodeElimination::Run() {
// Simplify graph to generate more dead block patterns.
ConnectSuccessiveBlocks();
bool did_any_simplification = false;
+ did_any_simplification |= SimplifyAlwaysThrows();
did_any_simplification |= SimplifyIfs();
did_any_simplification |= RemoveDeadBlocks();
if (did_any_simplification) {
diff --git a/compiler/optimizing/dead_code_elimination.h b/compiler/optimizing/dead_code_elimination.h
index 84fd890eee..92a7f562e1 100644
--- a/compiler/optimizing/dead_code_elimination.h
+++ b/compiler/optimizing/dead_code_elimination.h
@@ -40,6 +40,7 @@ class HDeadCodeElimination : public HOptimization {
void MaybeRecordSimplifyIf();
bool RemoveDeadBlocks();
void RemoveDeadInstructions();
+ bool SimplifyAlwaysThrows();
bool SimplifyIfs();
void ConnectSuccessiveBlocks();
diff --git a/compiler/optimizing/dead_code_elimination_test.cc b/compiler/optimizing/dead_code_elimination_test.cc
index 929572ee3b..adb6ce1187 100644
--- a/compiler/optimizing/dead_code_elimination_test.cc
+++ b/compiler/optimizing/dead_code_elimination_test.cc
@@ -29,12 +29,12 @@ namespace art {
class DeadCodeEliminationTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data,
+ void TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after);
};
-void DeadCodeEliminationTest::TestCode(const uint16_t* data,
+void DeadCodeEliminationTest::TestCode(const std::vector<uint16_t>& data,
const std::string& expected_before,
const std::string& expected_after) {
HGraph* graph = CreateCFG(data);
@@ -73,7 +73,7 @@ void DeadCodeEliminationTest::TestCode(const uint16_t* data,
* return-void 7. return
*/
TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::IF_GEZ | 1 << 8, 3,
@@ -135,7 +135,7 @@ TEST_F(DeadCodeEliminationTest, AdditionAndConditionalJump) {
* return 13. return-void
*/
TEST_F(DeadCodeEliminationTest, AdditionsAndInconditionalJumps) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 8 | 0 << 12,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::ADD_INT | 2 << 8, 0 | 1 << 8,
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 572466eec8..1d72ba116e 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -26,10 +26,12 @@ namespace art {
class OptimizerTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length);
+ void TestCode(const std::vector<uint16_t>& data, const uint32_t* blocks, size_t blocks_length);
};
-void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
+void OptimizerTest::TestCode(const std::vector<uint16_t>& data,
+ const uint32_t* blocks,
+ size_t blocks_length) {
HGraph* graph = CreateCFG(data);
ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
@@ -49,7 +51,7 @@ void OptimizerTest::TestCode(const uint16_t* data, const uint32_t* blocks, size_
}
TEST_F(OptimizerTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID); // Block number 1
const uint32_t dominators[] = {
@@ -62,7 +64,7 @@ TEST_F(OptimizerTest, ReturnVoid) {
}
TEST_F(OptimizerTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::RETURN_VOID); // Block number 2
@@ -77,7 +79,7 @@ TEST_F(OptimizerTest, CFG1) {
}
TEST_F(OptimizerTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100, // Block number 1
Instruction::GOTO | 0x100, // Block number 2
Instruction::RETURN_VOID); // Block number 3
@@ -94,7 +96,7 @@ TEST_F(OptimizerTest, CFG2) {
}
TEST_F(OptimizerTest, CFG3) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200, // Block number 1
Instruction::RETURN_VOID, // Block number 2
Instruction::GOTO | 0xFF00); // Block number 3
@@ -109,14 +111,14 @@ TEST_F(OptimizerTest, CFG3) {
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -125,7 +127,7 @@ TEST_F(OptimizerTest, CFG3) {
}
TEST_F(OptimizerTest, CFG4) {
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -138,14 +140,14 @@ TEST_F(OptimizerTest, CFG4) {
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data2, dominators, sizeof(dominators) / sizeof(int));
}
TEST_F(OptimizerTest, CFG5) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID, // Block number 1
Instruction::GOTO | 0x100, // Dead block
Instruction::GOTO | 0xFE00); // Block number 2
@@ -162,7 +164,7 @@ TEST_F(OptimizerTest, CFG5) {
}
TEST_F(OptimizerTest, CFG6) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -181,7 +183,7 @@ TEST_F(OptimizerTest, CFG6) {
}
TEST_F(OptimizerTest, CFG7) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x100, // Block number 2
@@ -201,7 +203,7 @@ TEST_F(OptimizerTest, CFG7) {
}
TEST_F(OptimizerTest, CFG8) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x200, // Block number 2
@@ -222,7 +224,7 @@ TEST_F(OptimizerTest, CFG8) {
}
TEST_F(OptimizerTest, CFG9) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3, // Block number 1
Instruction::GOTO | 0x200, // Block number 2
@@ -243,7 +245,7 @@ TEST_F(OptimizerTest, CFG9) {
}
TEST_F(OptimizerTest, CFG10) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6, // Block number 1
Instruction::IF_EQ, 3, // Block number 2
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index b799fb4688..75b8e9609e 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -31,7 +31,7 @@ class FindLoopsTest : public OptimizingUnitTest {};
TEST_F(FindLoopsTest, CFG1) {
// Constant is not used.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -42,7 +42,7 @@ TEST_F(FindLoopsTest, CFG1) {
}
TEST_F(FindLoopsTest, CFG2) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -53,7 +53,7 @@ TEST_F(FindLoopsTest, CFG2) {
}
TEST_F(FindLoopsTest, CFG3) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -67,7 +67,7 @@ TEST_F(FindLoopsTest, CFG3) {
}
TEST_F(FindLoopsTest, CFG4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -82,7 +82,7 @@ TEST_F(FindLoopsTest, CFG4) {
}
TEST_F(FindLoopsTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -126,7 +126,7 @@ TEST_F(FindLoopsTest, Loop1) {
// while (a == a) {
// }
// return;
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0xFE00,
@@ -150,7 +150,7 @@ TEST_F(FindLoopsTest, Loop2) {
// while (a == a) {
// }
// return a;
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x400,
Instruction::IF_EQ, 4,
@@ -173,7 +173,7 @@ TEST_F(FindLoopsTest, Loop2) {
TEST_F(FindLoopsTest, Loop3) {
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -197,7 +197,7 @@ TEST_F(FindLoopsTest, Loop3) {
TEST_F(FindLoopsTest, Loop4) {
// Test loop with originally two back edges.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -221,7 +221,7 @@ TEST_F(FindLoopsTest, Loop4) {
TEST_F(FindLoopsTest, Loop5) {
// Test loop with two exit edges.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -244,7 +244,7 @@ TEST_F(FindLoopsTest, Loop5) {
}
TEST_F(FindLoopsTest, InnerLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::IF_EQ, 3,
@@ -273,7 +273,7 @@ TEST_F(FindLoopsTest, InnerLoop) {
}
TEST_F(FindLoopsTest, TwoLoops) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0xFE00, // first loop
@@ -301,7 +301,7 @@ TEST_F(FindLoopsTest, TwoLoops) {
}
TEST_F(FindLoopsTest, NonNaturalLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x0100,
@@ -317,7 +317,7 @@ TEST_F(FindLoopsTest, NonNaturalLoop) {
}
TEST_F(FindLoopsTest, DoWhileLoop) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0xFFFF,
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index b1ac027a68..c88baa8610 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -31,7 +31,15 @@ namespace art {
using android::base::StringPrintf;
static bool IsAllowedToJumpToExitBlock(HInstruction* instruction) {
- return instruction->IsThrow() || instruction->IsReturn() || instruction->IsReturnVoid();
+ // Anything that returns is allowed to jump into the exit block.
+ if (instruction->IsReturn() || instruction->IsReturnVoid()) {
+ return true;
+ }
+ // Anything that always throws is allowed to jump into the exit block.
+ if (instruction->IsGoto() && instruction->GetPrevious() != nullptr) {
+ instruction = instruction->GetPrevious();
+ }
+ return instruction->AlwaysThrows();
}
static bool IsExitTryBoundaryIntoExitBlock(HBasicBlock* block) {
diff --git a/compiler/optimizing/graph_checker_test.cc b/compiler/optimizing/graph_checker_test.cc
index 9ca3e4953a..08bfa5d80f 100644
--- a/compiler/optimizing/graph_checker_test.cc
+++ b/compiler/optimizing/graph_checker_test.cc
@@ -22,7 +22,7 @@ namespace art {
class GraphCheckerTest : public OptimizingUnitTest {
protected:
HGraph* CreateSimpleCFG();
- void TestCode(const uint16_t* data);
+ void TestCode(const std::vector<uint16_t>& data);
};
/**
@@ -48,7 +48,7 @@ HGraph* GraphCheckerTest::CreateSimpleCFG() {
return graph;
}
-void GraphCheckerTest::TestCode(const uint16_t* data) {
+void GraphCheckerTest::TestCode(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
ASSERT_NE(graph, nullptr);
@@ -58,14 +58,14 @@ void GraphCheckerTest::TestCode(const uint16_t* data) {
}
TEST_F(GraphCheckerTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
TestCode(data);
}
TEST_F(GraphCheckerTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -73,7 +73,7 @@ TEST_F(GraphCheckerTest, CFG1) {
}
TEST_F(GraphCheckerTest, CFG2) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -83,7 +83,7 @@ TEST_F(GraphCheckerTest, CFG2) {
}
TEST_F(GraphCheckerTest, CFG3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -128,7 +128,7 @@ TEST_F(GraphCheckerTest, BlockEndingWithNonBranchInstruction) {
TEST_F(GraphCheckerTest, SSAPhi) {
// This code creates one Phi function during the conversion to SSA form.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 7a66d807cf..452be6feae 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -392,6 +392,34 @@ ArtMethod* HInliner::TryCHADevirtualization(ArtMethod* resolved_method) {
return single_impl;
}
+static bool AlwaysThrows(ArtMethod* method) {
+ CodeItemDataAccessor accessor(method);
+ // Skip native methods, methods with try blocks, and methods that are too large.
+ if (!accessor.HasCodeItem() ||
+ accessor.TriesSize() != 0 ||
+ accessor.InsnsSizeInCodeUnits() > kMaximumNumberOfTotalInstructions) {
+ return false;
+ }
+ // Scan for exits.
+ bool throw_seen = false;
+ for (const DexInstructionPcPair& pair : accessor) {
+ switch (pair.Inst().Opcode()) {
+ case Instruction::RETURN:
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN_WIDE:
+ case Instruction::RETURN_OBJECT:
+ case Instruction::RETURN_VOID_NO_BARRIER:
+ return false; // found regular control flow back
+ case Instruction::THROW:
+ throw_seen = true;
+ break;
+ default:
+ break;
+ }
+ }
+ return throw_seen;
+}
+
bool HInliner::TryInline(HInvoke* invoke_instruction) {
if (invoke_instruction->IsInvokeUnresolved() ||
invoke_instruction->IsInvokePolymorphic()) {
@@ -431,20 +459,29 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
}
if (actual_method != nullptr) {
+ // Single target.
bool result = TryInlineAndReplace(invoke_instruction,
actual_method,
ReferenceTypeInfo::CreateInvalid(),
/* do_rtp */ true,
cha_devirtualize);
- if (result && !invoke_instruction->IsInvokeStaticOrDirect()) {
- if (cha_devirtualize) {
- // Add dependency due to devirtulization. We've assumed resolved_method
- // has single implementation.
- outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
- MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
- } else {
- MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
+ if (result) {
+ // Successfully inlined.
+ if (!invoke_instruction->IsInvokeStaticOrDirect()) {
+ if (cha_devirtualize) {
+ // Add dependency due to devirtualization. We've assumed resolved_method
+ // has single implementation.
+ outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+ MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
+ } else {
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
+ }
}
+ } else if (!cha_devirtualize && AlwaysThrows(actual_method)) {
+ // Set always throws property for non-inlined method call with single target
+ // (unless it was obtained through CHA, because that would imply we have
+ // to add the CHA dependency, which seems not worth it).
+ invoke_instruction->SetAlwaysThrows(true);
}
return result;
}
@@ -1660,7 +1697,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
const DexFile::CodeItem* code_item = resolved_method->GetCodeItem();
const DexFile& callee_dex_file = *resolved_method->GetDexFile();
uint32_t method_index = resolved_method->GetDexMethodIndex();
- CodeItemDebugInfoAccessor code_item_accessor(&callee_dex_file, code_item);
+ CodeItemDebugInfoAccessor code_item_accessor(resolved_method);
ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
caller_compilation_unit_.GetDexCache(),
@@ -1968,7 +2005,7 @@ void HInliner::RunOptimizations(HGraph* callee_graph,
return;
}
- CodeItemDataAccessor accessor(&callee_graph->GetDexFile(), code_item);
+ CodeItemDataAccessor accessor(callee_graph->GetDexFile(), code_item);
HInliner inliner(callee_graph,
outermost_graph_,
codegen_,
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 72a93c1f77..64a1eccf60 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -49,7 +49,7 @@ HInstructionBuilder::HInstructionBuilder(HGraph* graph,
const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
VariableSizedHandleScope* handles,
ScopedArenaAllocator* local_allocator)
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 708a09711a..4428c53277 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_INSTRUCTION_BUILDER_H_
+#include "base/array_ref.h"
#include "base/scoped_arena_allocator.h"
#include "base/scoped_arena_containers.h"
#include "data_type.h"
@@ -57,7 +58,7 @@ class HInstructionBuilder : public ValueObject {
const DexCompilationUnit* outer_compilation_unit,
CompilerDriver* compiler_driver,
CodeGenerator* code_generator,
- const uint8_t* interpreter_metadata,
+ ArrayRef<const uint8_t> interpreter_metadata,
OptimizingCompilerStats* compiler_stats,
VariableSizedHandleScope* handles,
ScopedArenaAllocator* local_allocator);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ca1b451e6b..2f8e33f941 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2011,6 +2011,14 @@ void IntrinsicCodeGeneratorARM64::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderARM64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderARM64::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index 99b8b5df74..830d0403e4 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -2811,6 +2811,14 @@ void IntrinsicCodeGeneratorARMVIXL::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderARMVIXL::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARMVIXL::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, GetAssembler(), codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderARMVIXL::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 113c9de5a2..cafa5228d9 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -2835,6 +2835,15 @@ void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+// static double java.lang.Math.pow(double y, double x)
+void IntrinsicLocationsBuilderMIPS::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 521bad27e2..89f1818be2 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -2416,6 +2416,15 @@ void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+// static double java.lang.Math.pow(double y, double x)
+void IntrinsicLocationsBuilderMIPS64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitMathPow(HInvoke* invoke) {
+ GenFPFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
// static double java.lang.Math.cbrt(double a)
void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
CreateFPToFPCallLocations(allocator_, invoke);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index baa410b884..46b7f3f1ce 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -1105,6 +1105,14 @@ void IntrinsicCodeGeneratorX86::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderX86::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderX86::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 6dd8b8e1f5..6483b7cb2a 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -897,6 +897,14 @@ void IntrinsicCodeGeneratorX86_64::VisitMathAtan2(HInvoke* invoke) {
GenFPToFPCall(invoke, codegen_, kQuickAtan2);
}
+void IntrinsicLocationsBuilderX86_64::VisitMathPow(HInvoke* invoke) {
+ CreateFPFPToFPCallLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorX86_64::VisitMathPow(HInvoke* invoke) {
+ GenFPToFPCall(invoke, codegen_, kQuickPow);
+}
+
void IntrinsicLocationsBuilderX86_64::VisitMathHypot(HInvoke* invoke) {
CreateFPFPToFPCallLocations(allocator_, invoke);
}
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 43b63a73ef..9fa5b74c62 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -35,11 +35,12 @@ namespace art {
class LinearizeTest : public OptimizingUnitTest {
protected:
template <size_t number_of_blocks>
- void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]);
+ void TestCode(const std::vector<uint16_t>& data,
+ const uint32_t (&expected_order)[number_of_blocks]);
};
template <size_t number_of_blocks>
-void LinearizeTest::TestCode(const uint16_t* data,
+void LinearizeTest::TestCode(const std::vector<uint16_t>& data,
const uint32_t (&expected_order)[number_of_blocks]) {
HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
@@ -68,7 +69,7 @@ TEST_F(LinearizeTest, CFG1) {
// + / \ +
// Block4 Block8
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 5,
Instruction::IF_EQ, 0xFFFE,
@@ -93,7 +94,7 @@ TEST_F(LinearizeTest, CFG2) {
// + / \ +
// Block5 Block8
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::RETURN_VOID,
@@ -119,7 +120,7 @@ TEST_F(LinearizeTest, CFG3) {
// Block6 + Block9
// | +
// Block4 ++
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::RETURN_VOID,
@@ -149,7 +150,7 @@ TEST_F(LinearizeTest, CFG4) {
// + / \ +
// Block5 Block11
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 7,
Instruction::IF_EQ, 0xFFFE,
@@ -179,7 +180,7 @@ TEST_F(LinearizeTest, CFG5) {
// +/ \ +
// Block6 Block11
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::RETURN_VOID,
@@ -205,7 +206,7 @@ TEST_F(LinearizeTest, CFG6) {
// Block5 <- Block9 Block6 +
// |
// Block7
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0x0004,
@@ -233,7 +234,7 @@ TEST_F(LinearizeTest, CFG7) {
// |
// Block7
//
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x0100,
Instruction::IF_EQ, 0x0005,
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index e45d7c820c..66660662e4 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -31,10 +31,10 @@ namespace art {
class LiveRangesTest : public OptimizingUnitTest {
public:
- HGraph* BuildGraph(const uint16_t* data);
+ HGraph* BuildGraph(const std::vector<uint16_t>& data);
};
-HGraph* LiveRangesTest::BuildGraph(const uint16_t* data) {
+HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
@@ -57,7 +57,7 @@ TEST_F(LiveRangesTest, CFG1) {
* |
* 12: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -102,7 +102,7 @@ TEST_F(LiveRangesTest, CFG2) {
* |
* 26: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -151,7 +151,7 @@ TEST_F(LiveRangesTest, CFG3) {
* |
* 28: exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -225,7 +225,7 @@ TEST_F(LiveRangesTest, Loop1) {
* 30: exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -304,7 +304,7 @@ TEST_F(LiveRangesTest, Loop2) {
* We want to make sure the phi at 10 has a lifetime hole after the add at 20.
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::ADD_INT, 0, 0,
@@ -378,7 +378,7 @@ TEST_F(LiveRangesTest, CFG4) {
*
* We want to make sure the constant0 has a lifetime hole after the 16: add.
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::IF_EQ, 5,
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 35bc4ff8b3..6621a03568 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -31,7 +31,7 @@ namespace art {
class LivenessTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
static void DumpBitVector(BitVector* vector,
@@ -46,7 +46,7 @@ static void DumpBitVector(BitVector* vector,
buffer << ")\n";
}
-void LivenessTest::TestCode(const uint16_t* data, const char* expected) {
+void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
PrepareForRegisterAllocation(graph).Run();
@@ -86,7 +86,7 @@ TEST_F(LivenessTest, CFG1) {
" kill: (0)\n";
// Constant is not used.
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -108,7 +108,7 @@ TEST_F(LivenessTest, CFG2) {
" live out: (0)\n"
" kill: (0)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -134,7 +134,7 @@ TEST_F(LivenessTest, CFG3) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
@@ -181,7 +181,7 @@ TEST_F(LivenessTest, CFG4) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -228,7 +228,7 @@ TEST_F(LivenessTest, CFG5) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -273,7 +273,7 @@ TEST_F(LivenessTest, Loop1) {
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -318,7 +318,7 @@ TEST_F(LivenessTest, Loop3) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -370,7 +370,7 @@ TEST_F(LivenessTest, Loop4) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x500,
Instruction::IF_EQ, 5,
@@ -425,7 +425,7 @@ TEST_F(LivenessTest, Loop5) {
" live out: (0001)\n"
" kill: (0001)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -475,7 +475,7 @@ TEST_F(LivenessTest, Loop6) {
" live out: (0000)\n"
" kill: (0000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -530,7 +530,7 @@ TEST_F(LivenessTest, Loop7) {
" live out: (00000)\n"
" kill: (00000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -580,7 +580,7 @@ TEST_F(LivenessTest, Loop8) {
" live out: (000)\n"
" kill: (000)\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 6,
Instruction::ADD_INT, 0, 0,
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 88326d321b..aae94b227c 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -25,6 +25,51 @@
#include <iostream>
+/**
+ * The general algorithm of load-store elimination (LSE).
+ * Load-store analysis in the previous pass collects a list of heap locations
+ * and does alias analysis of those heap locations.
+ * LSE keeps track of a list of heap values corresponding to the heap
+ * locations. It visits basic blocks in reverse post order and for
+ * each basic block, visits instructions sequentially, and processes
+ * instructions as follows:
+ * - If the instruction is a load, and the heap location for that load has a
+ * valid heap value, the load can be eliminated. In order to maintain the
+ * validity of all heap locations during the optimization phase, the real
+ * elimination is delayed till the end of LSE.
+ * - If the instruction is a store, it updates the heap value for the heap
+ * location of the store with the store instruction. The real heap value
+ * can be fetched from the store instruction. Heap values are invalidated
+ * for heap locations that may alias with the store instruction's heap
+ * location. The store instruction can be eliminated unless the value stored
+ * is later needed e.g. by a load from the same/aliased heap location or
+ * the heap location persists at method return/deoptimization.
+ * The store instruction is also needed if it's not used to track the heap
+ * value anymore, e.g. when it fails to merge with the heap values from other
+ * predecessors.
+ * - A store that stores the same value as the heap value is eliminated.
+ * - The list of heap values are merged at basic block entry from the basic
+ * block's predecessors. The algorithm is single-pass, so loop side-effects is
+ * used as best effort to decide if a heap location is stored inside the loop.
+ * - A special type of objects called singletons are instantiated in the method
+ * and have a single name, i.e. no aliases. Singletons have exclusive heap
+ * locations since they have no aliases. Singletons are helpful in narrowing
+ * down the life span of a heap location such that they do not always
+ * need to participate in merging heap values. Allocation of a singleton
+ * can be eliminated if that singleton is not used and does not persist
+ * at method return/deoptimization.
+ * - For newly instantiated instances, their heap values are initialized to
+ * language defined default values.
+ * - Some instructions such as invokes are treated as loading and invalidating
+ * all the heap values, depending on the instruction's side effects.
+ * - Finalizable objects are considered as persisting at method
+ * return/deoptimization.
+ * - Currently this LSE algorithm doesn't handle SIMD graph, e.g. with VecLoad
+ * and VecStore instructions.
+ * - Currently this LSE algorithm doesn't handle graph with try-catch, due to
+ * the special block merging structure.
+ */
+
namespace art {
// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
@@ -59,8 +104,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
removed_loads_(allocator_.Adapter(kArenaAllocLSE)),
substitute_instructions_for_loads_(allocator_.Adapter(kArenaAllocLSE)),
possibly_removed_stores_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(allocator_.Adapter(kArenaAllocLSE)) {
+ singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -88,19 +132,26 @@ class LSEVisitor : public HGraphDelegateVisitor {
return type_conversion;
}
- // Find an instruction's substitute if it should be removed.
+ // Find an instruction's substitute if it's a removed load.
// Return the same instruction if it should not be removed.
HInstruction* FindSubstitute(HInstruction* instruction) {
+ if (!IsLoad(instruction)) {
+ return instruction;
+ }
size_t size = removed_loads_.size();
for (size_t i = 0; i < size; i++) {
if (removed_loads_[i] == instruction) {
- return substitute_instructions_for_loads_[i];
+ HInstruction* substitute = substitute_instructions_for_loads_[i];
+ // The substitute list is a flat hierarchy.
+ DCHECK_EQ(FindSubstitute(substitute), substitute);
+ return substitute;
}
}
return instruction;
}
void AddRemovedLoad(HInstruction* load, HInstruction* heap_value) {
+ DCHECK(IsLoad(load));
DCHECK_EQ(FindSubstitute(heap_value), heap_value) <<
"Unexpected heap_value that has a substitute " << heap_value->DebugName();
removed_loads_.push_back(load);
@@ -207,28 +258,59 @@ class LSEVisitor : public HGraphDelegateVisitor {
new_instance->GetBlock()->RemoveInstruction(new_instance);
}
}
- for (HInstruction* new_array : singleton_new_arrays_) {
- size_t removed = HConstructorFence::RemoveConstructorFences(new_array);
- MaybeRecordStat(stats_,
- MethodCompilationStat::kConstructorFenceRemovedLSE,
- removed);
+ }
- if (!new_array->HasNonEnvironmentUses()) {
- new_array->RemoveEnvironmentUsers();
- new_array->GetBlock()->RemoveInstruction(new_array);
- }
+ private:
+ static bool IsLoad(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
}
+ // Unresolved load is not treated as a load.
+ return instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet();
}
- private:
- // If heap_values[index] is an instance field store, need to keep the store.
- // This is necessary if a heap value is killed due to merging, or loop side
- // effects (which is essentially merging also), since a load later from the
- // location won't be eliminated.
+ static bool IsStore(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
+ }
+ // Unresolved store is not treated as a store.
+ return instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->IsStaticFieldSet();
+ }
+
+ // Returns the real heap value by finding its substitute or by "peeling"
+ // a store instruction.
+ HInstruction* GetRealHeapValue(HInstruction* heap_value) {
+ if (IsLoad(heap_value)) {
+ return FindSubstitute(heap_value);
+ }
+ if (!IsStore(heap_value)) {
+ return heap_value;
+ }
+
+ // We keep track of store instructions as the heap values which might be
+ // eliminated if the stores are later found not necessary. The real stored
+ // value needs to be fetched from the store instruction.
+ if (heap_value->IsInstanceFieldSet()) {
+ heap_value = heap_value->AsInstanceFieldSet()->GetValue();
+ } else if (heap_value->IsStaticFieldSet()) {
+ heap_value = heap_value->AsStaticFieldSet()->GetValue();
+ } else {
+ DCHECK(heap_value->IsArraySet());
+ heap_value = heap_value->AsArraySet()->GetValue();
+ }
+ // heap_value may already be a removed load.
+ return FindSubstitute(heap_value);
+ }
+
+ // If heap_value is a store, need to keep the store.
+ // This is necessary if a heap value is killed or replaced by another value,
+ // so that the store is no longer used to track heap value.
void KeepIfIsStore(HInstruction* heap_value) {
- if (heap_value == kDefaultHeapValue ||
- heap_value == kUnknownHeapValue ||
- !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
+ if (!IsStore(heap_value)) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -239,26 +321,41 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
+ // If a heap location X may alias with heap location at `loc_index`
+ // and heap_values of that heap location X holds a store, keep that store.
+ // It's needed for a dependent load that's not eliminated since any store
+ // that may put value into the load's heap location needs to be kept.
+ void KeepStoresIfAliasedToLocation(ScopedArenaVector<HInstruction*>& heap_values,
+ size_t loc_index) {
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ if ((i == loc_index) || heap_location_collector_.MayAlias(i, loc_index)) {
+ KeepIfIsStore(heap_values[i]);
+ }
+ }
+ }
+
void HandleLoopSideEffects(HBasicBlock* block) {
DCHECK(block->IsLoopHeader());
int block_id = block->GetBlockId();
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ ScopedArenaVector<HInstruction*>& pre_header_heap_values =
+ heap_values_for_[pre_header->GetBlockId()];
- // Don't eliminate loads in irreducible loops. This is safe for singletons, because
- // they are always used by the non-eliminated loop-phi.
+ // Don't eliminate loads in irreducible loops.
+ // Also keep the stores before the loop.
if (block->GetLoopInformation()->IsIrreducible()) {
if (kIsDebugBuild) {
for (size_t i = 0; i < heap_values.size(); i++) {
DCHECK_EQ(heap_values[i], kUnknownHeapValue);
}
}
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ KeepIfIsStore(pre_header_heap_values[i]);
+ }
return;
}
- HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
- ScopedArenaVector<HInstruction*>& pre_header_heap_values =
- heap_values_for_[pre_header->GetBlockId()];
-
// Inherit the values from pre-header.
for (size_t i = 0; i < heap_values.size(); i++) {
heap_values[i] = pre_header_heap_values[i];
@@ -270,18 +367,17 @@ class LSEVisitor : public HGraphDelegateVisitor {
for (size_t i = 0; i < heap_values.size(); i++) {
HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
ReferenceInfo* ref_info = location->GetReferenceInfo();
- if (ref_info->IsSingletonAndRemovable() &&
- !location->IsValueKilledByLoopSideEffects()) {
- // A removable singleton's field that's not stored into inside a loop is
+ if (ref_info->IsSingleton() && !location->IsValueKilledByLoopSideEffects()) {
+ // A singleton's field that's not stored into inside a loop is
// invariant throughout the loop. Nothing to do.
} else {
- // heap value is killed by loop side effects (stored into directly, or
- // due to aliasing). Or the heap value may be needed after method return
- // or deoptimization.
+ // heap value is killed by loop side effects.
KeepIfIsStore(pre_header_heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
+ } else {
+ // The loop doesn't kill any value.
}
}
@@ -300,45 +396,73 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HInstruction* merged_value = nullptr;
+ // If we can merge the store itself from the predecessors, we keep
+ // the store as the heap value as long as possible. In case we cannot
+ // merge the store, we try to merge the values of the stores.
+ HInstruction* merged_store_value = nullptr;
// Whether merged_value is a result that's merged from all predecessors.
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ HInstruction* ref = ref_info->GetReference();
HInstruction* singleton_ref = nullptr;
if (ref_info->IsSingleton()) {
- // We do more analysis of liveness when merging heap values for such
- // cases since stores into such references may potentially be eliminated.
- singleton_ref = ref_info->GetReference();
+ // We do more analysis based on singleton's liveness when merging
+ // heap values for such cases.
+ singleton_ref = ref;
}
for (HBasicBlock* predecessor : predecessors) {
HInstruction* pred_value = heap_values_for_[predecessor->GetBlockId()][i];
+ if (!IsStore(pred_value)) {
+ pred_value = FindSubstitute(pred_value);
+ }
+ DCHECK(pred_value != nullptr);
+ HInstruction* pred_store_value = GetRealHeapValue(pred_value);
if ((singleton_ref != nullptr) &&
!singleton_ref->GetBlock()->Dominates(predecessor)) {
- // singleton_ref is not live in this predecessor. Skip this predecessor since
- // it does not really have the location.
+ // singleton_ref is not live in this predecessor. No need to merge
+ // since singleton_ref is not live at the beginning of this block.
DCHECK_EQ(pred_value, kUnknownHeapValue);
from_all_predecessors = false;
- continue;
+ break;
}
if (merged_value == nullptr) {
// First seen heap value.
+ DCHECK(pred_value != nullptr);
merged_value = pred_value;
} else if (pred_value != merged_value) {
// There are conflicting values.
merged_value = kUnknownHeapValue;
+ // We may still be able to merge store values.
+ }
+
+ // Conflicting stores may be storing the same value. We do another merge
+ // of real stored values.
+ if (merged_store_value == nullptr) {
+ // First seen store value.
+ DCHECK(pred_store_value != nullptr);
+ merged_store_value = pred_store_value;
+ } else if (pred_store_value != merged_store_value) {
+ // There are conflicting store values.
+ merged_store_value = kUnknownHeapValue;
+ // There must be conflicting stores also.
+ DCHECK_EQ(merged_value, kUnknownHeapValue);
+ // No need to merge anymore.
break;
}
}
- if (ref_info->IsSingleton()) {
- if (ref_info->IsSingletonAndNonRemovable() ||
- (merged_value == kUnknownHeapValue &&
- !block->IsSingleReturnOrReturnVoidAllowingPhis())) {
- // The heap value may be needed after method return or deoptimization,
- // or there are conflicting heap values from different predecessors and
- // this block is not a single return,
- // keep the last store in each predecessor since future loads may not
- // be eliminated.
+ if (merged_value == nullptr) {
+ DCHECK(!from_all_predecessors);
+ DCHECK(singleton_ref != nullptr);
+ }
+ if (from_all_predecessors) {
+ if (ref_info->IsSingletonAndRemovable() &&
+ block->IsSingleReturnOrReturnVoidAllowingPhis()) {
+ // Values in the singleton are not needed anymore.
+ } else if (!IsStore(merged_value)) {
+ // We don't track merged value as a store anymore. We have to
+ // hold the stores in predecessors live here.
for (HBasicBlock* predecessor : predecessors) {
ScopedArenaVector<HInstruction*>& pred_values =
heap_values_for_[predecessor->GetBlockId()];
@@ -346,18 +470,33 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
} else {
- // Currenctly we don't eliminate stores to non-singletons.
+ DCHECK(singleton_ref != nullptr);
+ // singleton_ref is non-existing at the beginning of the block. There is
+ // no need to keep the stores.
}
- if ((merged_value == nullptr) || !from_all_predecessors) {
+ if (!from_all_predecessors) {
DCHECK(singleton_ref != nullptr);
DCHECK((singleton_ref->GetBlock() == block) ||
- !singleton_ref->GetBlock()->Dominates(block));
+ !singleton_ref->GetBlock()->Dominates(block))
+ << "method: " << GetGraph()->GetMethodName();
// singleton_ref is not defined before block or defined only in some of its
// predecessors, so block doesn't really have the location at its entry.
heap_values[i] = kUnknownHeapValue;
- } else {
+ } else if (predecessors.size() == 1) {
+ // Inherit heap value from the single predecessor.
+ DCHECK_EQ(heap_values_for_[predecessors[0]->GetBlockId()][i], merged_value);
heap_values[i] = merged_value;
+ } else {
+ DCHECK(merged_value == kUnknownHeapValue ||
+ merged_value == kDefaultHeapValue ||
+ merged_value->GetBlock()->Dominates(block));
+ if (merged_value != kUnknownHeapValue) {
+ heap_values[i] = merged_value;
+ } else {
+ // Stores in different predecessors may be storing the same value.
+ heap_values[i] = merged_store_value;
+ }
}
}
}
@@ -423,23 +562,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
heap_values[idx] = constant;
return;
}
- if (heap_value != kUnknownHeapValue) {
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- HInstruction* store = heap_value;
- // This load must be from a singleton since it's from the same
- // field/element that a "removed" store puts the value. That store
- // must be to a singleton's field/element.
- DCHECK(ref_info->IsSingleton());
- // Get the real heap value of the store.
- heap_value = heap_value->IsInstanceFieldSet() ? store->InputAt(1) : store->InputAt(2);
- // heap_value may already have a substitute.
- heap_value = FindSubstitute(heap_value);
- }
- }
+ heap_value = GetRealHeapValue(heap_value);
if (heap_value == kUnknownHeapValue) {
// Load isn't eliminated. Put the load as the value into the HeapLocation.
// This acts like GVN but with better aliasing analysis.
heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
} else {
if (DataType::Kind(heap_value->GetType()) != DataType::Kind(instruction->GetType())) {
// The only situation where the same heap location has different type is when
@@ -452,6 +580,10 @@ class LSEVisitor : public HGraphDelegateVisitor {
DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName();
DCHECK(instruction->IsArrayGet()) << instruction->DebugName();
}
+ // Load isn't eliminated. Put the load as the value into the HeapLocation.
+ // This acts like GVN but with better aliasing analysis.
+ heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
return;
}
AddRemovedLoad(instruction, heap_value);
@@ -460,12 +592,21 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
bool Equal(HInstruction* heap_value, HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
+ if (heap_value == kUnknownHeapValue) {
+ // Don't compare kUnknownHeapValue with other values.
+ return false;
+ }
if (heap_value == value) {
return true;
}
if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
return true;
}
+ HInstruction* real_heap_value = GetRealHeapValue(heap_value);
+ if (real_heap_value != heap_value) {
+ return Equal(real_heap_value, value);
+ }
return false;
}
@@ -476,6 +617,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
size_t vector_length,
int16_t declaring_class_def_index,
HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
// value may already have a substitute.
value = FindSubstitute(value);
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
@@ -486,59 +628,47 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
HInstruction* heap_value = heap_values[idx];
- bool same_value = false;
bool possibly_redundant = false;
+
if (Equal(heap_value, value)) {
// Store into the heap location with the same value.
- same_value = true;
- } else if (index != nullptr &&
- heap_location_collector_.GetHeapLocation(idx)->HasAliasedLocations()) {
- // For array element, don't eliminate stores if the location can be aliased
- // (due to either ref or index aliasing).
- } else if (ref_info->IsSingleton()) {
- // Store into a field/element of a singleton. The value cannot be killed due to
- // aliasing/invocation. It can be redundant since future loads can
- // directly get the value set by this instruction. The value can still be killed due to
- // merging or loop side effects. Stores whose values are killed due to merging/loop side
- // effects later will be removed from possibly_removed_stores_ when that is detected.
- // Stores whose values may be needed after method return or deoptimization
- // are also removed from possibly_removed_stores_ when that is detected.
- possibly_redundant = true;
+ // This store can be eliminated right away.
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ } else {
HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
- if (loop_info != nullptr) {
- // instruction is a store in the loop so the loop must does write.
+ if (loop_info == nullptr) {
+ // Store is not in a loop. We try to precisely track the heap value by
+ // the store.
+ possibly_redundant = true;
+ } else if (!loop_info->IsIrreducible()) {
+ // instruction is a store in the loop so the loop must do write.
DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
-
- if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
- DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
- // Keep the store since its value may be needed at the loop header.
- possibly_redundant = false;
- } else {
- // The singleton is created inside the loop. Value stored to it isn't needed at
+ if (ref_info->IsSingleton() && !loop_info->IsDefinedOutOfTheLoop(original_ref)) {
+ // original_ref is created inside the loop. Value stored to it isn't needed at
// the loop header. This is true for outer loops also.
+ possibly_redundant = true;
+ } else {
+ // Keep the store since its value may be needed at the loop header.
}
+ } else {
+ // Keep the store inside irreducible loops.
}
}
- if (same_value || possibly_redundant) {
+ if (possibly_redundant) {
possibly_removed_stores_.push_back(instruction);
}
- if (!same_value) {
- if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
- // Put the store as the heap value. If the value is loaded from heap
- // by a load later, this store isn't really redundant.
- heap_values[idx] = instruction;
- } else {
- heap_values[idx] = value;
- }
- }
+ // Put the store as the heap value. If the value is loaded or needed after
+ // return/deoptimization later, this store isn't really redundant.
+ heap_values[idx] = instruction;
+
// This store may kill values in other heap locations due to aliasing.
for (size_t i = 0; i < heap_values.size(); i++) {
if (i == idx) {
continue;
}
- if (heap_values[i] == value) {
+ if (Equal(heap_values[i], value)) {
// Same value should be kept even if aliasing happens.
continue;
}
@@ -547,7 +677,9 @@ class LSEVisitor : public HGraphDelegateVisitor {
continue;
}
if (heap_location_collector_.MayAlias(i, idx)) {
- // Kill heap locations that may alias.
+ // Kill heap locations that may alias and as a result if the heap value
+ // is a store, the store needs to be kept.
+ KeepIfIsStore(heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
@@ -633,24 +765,35 @@ class LSEVisitor : public HGraphDelegateVisitor {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
- // Filter out fake instructions before checking instruction kind below.
- if (heap_value == kUnknownHeapValue || heap_value == kDefaultHeapValue) {
- continue;
- }
// A store is kept as the heap value for possibly removed stores.
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- // Check whether the reference for a store is used by an environment local of
- // HDeoptimize.
+ // That value stored is generally observeable after deoptimization, except
+ // for singletons that don't escape after deoptimization.
+ if (IsStore(heap_value)) {
+ if (heap_value->IsStaticFieldSet()) {
+ KeepIfIsStore(heap_value);
+ continue;
+ }
HInstruction* reference = heap_value->InputAt(0);
- DCHECK(heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton());
- for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
- HEnvironment* user = use.GetUser();
- if (user->GetHolder() == instruction) {
- // The singleton for the store is visible at this deoptimization
- // point. Need to keep the store so that the heap value is
- // seen by the interpreter.
+ if (heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton()) {
+ if (reference->IsNewInstance() && reference->AsNewInstance()->IsFinalizable()) {
+ // Finalizable objects alway escape.
KeepIfIsStore(heap_value);
+ continue;
}
+ // Check whether the reference for a store is used by an environment local of
+ // HDeoptimize. If not, the singleton is not observed after
+ // deoptimizion.
+ for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+ HEnvironment* user = use.GetUser();
+ if (user->GetHolder() == instruction) {
+ // The singleton for the store is visible at this deoptimization
+ // point. Need to keep the store so that the heap value is
+ // seen by the interpreter.
+ KeepIfIsStore(heap_value);
+ }
+ }
+ } else {
+ KeepIfIsStore(heap_value);
}
}
}
@@ -758,7 +901,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
return;
}
if (ref_info->IsSingletonAndRemovable()) {
- singleton_new_arrays_.push_back(new_array);
+ singleton_new_instances_.push_back(new_array);
}
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[new_array->GetBlock()->GetBlockId()];
@@ -791,7 +934,6 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*> possibly_removed_stores_;
ScopedArenaVector<HInstruction*> singleton_new_instances_;
- ScopedArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 180893dc86..66fca36f91 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2040,6 +2040,10 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
// TODO: We should rename to CanVisiblyThrow, as some instructions (like HNewInstance),
// could throw OOME, but it is still OK to remove them if they are unused.
virtual bool CanThrow() const { return false; }
+
+ // Does the instruction always throw an exception unconditionally?
+ virtual bool AlwaysThrows() const { return false; }
+
bool CanThrowIntoCatchBlock() const { return CanThrow() && block_->IsTryBlock(); }
bool HasSideEffects() const { return side_effects_.HasSideEffects(); }
@@ -4191,6 +4195,10 @@ class HInvoke : public HVariableInputSizeInstruction {
bool CanThrow() const OVERRIDE { return GetPackedFlag<kFlagCanThrow>(); }
+ void SetAlwaysThrows(bool always_throws) { SetPackedFlag<kFlagAlwaysThrows>(always_throws); }
+
+ bool AlwaysThrows() const OVERRIDE { return GetPackedFlag<kFlagAlwaysThrows>(); }
+
bool CanBeMoved() const OVERRIDE { return IsIntrinsic() && !DoesAnyWrite(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -4221,7 +4229,8 @@ class HInvoke : public HVariableInputSizeInstruction {
static constexpr size_t kFieldReturnTypeSize =
MinimumBitsToStore(static_cast<size_t>(DataType::Type::kLast));
static constexpr size_t kFlagCanThrow = kFieldReturnType + kFieldReturnTypeSize;
- static constexpr size_t kNumberOfInvokePackedBits = kFlagCanThrow + 1;
+ static constexpr size_t kFlagAlwaysThrows = kFlagCanThrow + 1;
+ static constexpr size_t kNumberOfInvokePackedBits = kFlagAlwaysThrows + 1;
static_assert(kNumberOfInvokePackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using InvokeTypeField = BitField<InvokeType, kFieldInvokeType, kFieldInvokeTypeSize>;
using ReturnTypeField = BitField<DataType::Type, kFieldReturnType, kFieldReturnTypeSize>;
@@ -6597,6 +6606,8 @@ class HThrow FINAL : public HTemplateInstruction<1> {
bool CanThrow() const OVERRIDE { return true; }
+ bool AlwaysThrows() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(Throw);
protected:
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 92b427cafa..57db7a634c 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -242,7 +242,7 @@ ArenaVector<HOptimization*> ConstructOptimizations(
opt = new (allocator) HDeadCodeElimination(graph, stats, name);
break;
case OptimizationPass::kInliner: {
- CodeItemDataAccessor accessor(dex_compilation_unit.GetDexFile(),
+ CodeItemDataAccessor accessor(*dex_compilation_unit.GetDexFile(),
dex_compilation_unit.GetCodeItem());
opt = new (allocator) HInliner(graph, // outer_graph
graph, // outermost_graph
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index e2b2106f65..d20b681b49 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -41,7 +41,7 @@ namespace art {
// Run the tests only on host.
#ifndef ART_TARGET_ANDROID
-class OptimizingCFITest : public CFITest {
+class OptimizingCFITest : public CFITest, public OptimizingUnitTestHelper {
public:
// Enable this flag to generate the expected outputs.
static constexpr bool kGenerateExpected = false;
@@ -63,7 +63,7 @@ class OptimizingCFITest : public CFITest {
// Setup simple context.
std::string error;
isa_features_ = InstructionSetFeatures::FromVariant(isa, "default", &error);
- graph_ = CreateGraph(&pool_and_allocator_);
+ graph_ = CreateGraph();
// Generate simple frame with some spills.
code_gen_ = CodeGenerator::Create(graph_, isa, *isa_features_, opts_);
code_gen_->GetAssembler()->cfi().SetEnabled(true);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index b64f82caee..c35c490118 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -382,6 +382,8 @@ class OptimizingCompiler FINAL : public Compiler {
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
+ void GenerateJitDebugInfo(debug::MethodDebugInfo method_debug_info);
+
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -766,13 +768,13 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
static constexpr size_t kSpaceFilterOptimizingThreshold = 128;
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
- && (CodeItemInstructionAccessor(&dex_file, code_item).InsnsSizeInCodeUnits() >
+ && (CodeItemInstructionAccessor(dex_file, code_item).InsnsSizeInCodeUnits() >
kSpaceFilterOptimizingThreshold)) {
MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
return nullptr;
}
- CodeItemDebugInfoAccessor code_item_accessor(&dex_file, code_item);
+ CodeItemDebugInfoAccessor code_item_accessor(dex_file, code_item, method_idx);
HGraph* graph = new (allocator) HGraph(
allocator,
arena_stack,
@@ -783,7 +785,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
compiler_driver->GetCompilerOptions().GetDebuggable(),
osr);
- const uint8_t* interpreter_metadata = nullptr;
+ ArrayRef<const uint8_t> interpreter_metadata;
// For AOT compilation, we may not get a method, for example if its class is erroneous.
// JIT should always have a method.
DCHECK(Runtime::Current()->IsAotCompiler() || method != nullptr);
@@ -940,7 +942,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
compiler_driver,
codegen.get(),
compilation_stats_.get(),
- /* interpreter_metadata */ nullptr,
+ /* interpreter_metadata */ ArrayRef<const uint8_t>(),
handles);
builder.BuildIntrinsicGraph(method);
}
@@ -1230,7 +1232,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
info.class_def_index = class_def_idx;
info.dex_method_index = method_idx;
@@ -1246,14 +1248,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = nullptr;
info.cfi = jni_compiled_method.GetCfi();
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- GetCompilerDriver()->GetInstructionSet(),
- GetCompilerDriver()->GetInstructionSetFeatures(),
- mini_debug_info,
- info);
- CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ GenerateJitDebugInfo(info);
}
Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1361,7 +1356,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = dex_file;
info.class_def_index = class_def_idx;
info.dex_method_index = method_idx;
@@ -1377,14 +1372,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = stack_map_size == 0 ? nullptr : stack_map_data;
info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
- // If both flags are passed, generate full debug info.
- const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
- std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
- GetCompilerDriver()->GetInstructionSet(),
- GetCompilerDriver()->GetInstructionSetFeatures(),
- mini_debug_info,
- info);
- CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ GenerateJitDebugInfo(info);
}
Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1408,4 +1396,22 @@ bool OptimizingCompiler::JitCompile(Thread* self,
return true;
}
+void OptimizingCompiler::GenerateJitDebugInfo(debug::MethodDebugInfo info) {
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+ DCHECK(compiler_options.GenerateAnyDebugInfo());
+
+ // If both flags are passed, generate full debug info.
+ const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
+
+ // Create entry for the single method that we just compiled.
+ std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
+ GetCompilerDriver()->GetInstructionSet(),
+ GetCompilerDriver()->GetInstructionSetFeatures(),
+ mini_debug_info,
+ ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
+ IncrementJITCodeEntryRefcount(entry, info.code_address);
+}
+
} // namespace art
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 32a94ab5e4..0023265e50 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -75,6 +75,7 @@ enum class MethodCompilationStat {
kImplicitNullCheckGenerated,
kExplicitNullCheckGenerated,
kSimplifyIf,
+ kSimplifyThrowingInvoke,
kInstructionSunk,
kNotInlinedUnresolvedEntrypoint,
kNotInlinedDexCache,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 661abb125c..6dcbadba6e 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -17,12 +17,16 @@
#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
#define ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
+#include <memory>
+#include <vector>
+
#include "base/scoped_arena_allocator.h"
#include "builder.h"
#include "common_compiler_test.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_instruction.h"
+#include "dex/standard_dex_file.h"
#include "driver/dex_compilation_unit.h"
#include "handle_scope-inl.h"
#include "mirror/class_loader.h"
@@ -99,18 +103,11 @@ class ArenaPoolAndAllocator {
ScopedArenaAllocator scoped_allocator_;
};
-inline HGraph* CreateGraph(ArenaPoolAndAllocator* pool_and_allocator) {
- return new (pool_and_allocator->GetAllocator()) HGraph(
- pool_and_allocator->GetAllocator(),
- pool_and_allocator->GetArenaStack(),
- *reinterpret_cast<DexFile*>(pool_and_allocator->GetAllocator()->Alloc(sizeof(DexFile))),
- /*method_idx*/-1,
- kRuntimeISA);
-}
-
-class OptimizingUnitTest : public CommonCompilerTest {
- protected:
- OptimizingUnitTest() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
+// Have a separate helper so the OptimizingCFITest can inherit it without causing
+// multiple inheritance errors from having two gtest as a parent twice.
+class OptimizingUnitTestHelper {
+ public:
+ OptimizingUnitTestHelper() : pool_and_allocator_(new ArenaPoolAndAllocator()) { }
ArenaAllocator* GetAllocator() { return pool_and_allocator_->GetAllocator(); }
ArenaStack* GetArenaStack() { return pool_and_allocator_->GetArenaStack(); }
@@ -122,14 +119,42 @@ class OptimizingUnitTest : public CommonCompilerTest {
}
HGraph* CreateGraph() {
- return art::CreateGraph(pool_and_allocator_.get());
+ ArenaAllocator* const allocator = pool_and_allocator_->GetAllocator();
+
+ // Reserve a big array of 0s so the dex file constructor can offsets from the header.
+ static constexpr size_t kDexDataSize = 4 * KB;
+ const uint8_t* dex_data = reinterpret_cast<uint8_t*>(allocator->Alloc(kDexDataSize));
+
+ // Create the dex file based on the fake data. Call the constructor so that we can use virtual
+ // functions. Don't use the arena for the StandardDexFile otherwise the dex location leaks.
+ dex_files_.emplace_back(new StandardDexFile(
+ dex_data,
+ sizeof(StandardDexFile::Header),
+ "no_location",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*container*/ nullptr));
+
+ return new (allocator) HGraph(
+ allocator,
+ pool_and_allocator_->GetArenaStack(),
+ *dex_files_.back(),
+ /*method_idx*/-1,
+ kRuntimeISA);
}
// Create a control-flow graph from Dex instructions.
- HGraph* CreateCFG(const uint16_t* data, DataType::Type return_type = DataType::Type::kInt32) {
- const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ HGraph* CreateCFG(const std::vector<uint16_t>& data,
+ DataType::Type return_type = DataType::Type::kInt32) {
HGraph* graph = CreateGraph();
+ // The code item data might not aligned to 4 bytes, copy it to ensure that.
+ const size_t code_item_size = data.size() * sizeof(data.front());
+ void* aligned_data = GetAllocator()->Alloc(code_item_size);
+ memcpy(aligned_data, &data[0], code_item_size);
+ CHECK_ALIGNED(aligned_data, StandardDexFile::CodeItem::kAlignment);
+ const DexFile::CodeItem* code_item = reinterpret_cast<const DexFile::CodeItem*>(aligned_data);
+
{
ScopedObjectAccess soa(Thread::Current());
if (handles_ == nullptr) {
@@ -146,7 +171,7 @@ class OptimizingUnitTest : public CommonCompilerTest {
/* access_flags */ 0u,
/* verified_method */ nullptr,
handles_->NewHandle<mirror::DexCache>(nullptr));
- CodeItemDebugInfoAccessor accessor(&graph->GetDexFile(), code_item);
+ CodeItemDebugInfoAccessor accessor(graph->GetDexFile(), code_item, /*dex_method_idx*/ 0u);
HGraphBuilder builder(graph, dex_compilation_unit, accessor, handles_.get(), return_type);
bool graph_built = (builder.BuildGraph() == kAnalysisSuccess);
return graph_built ? graph : nullptr;
@@ -154,10 +179,13 @@ class OptimizingUnitTest : public CommonCompilerTest {
}
private:
+ std::vector<std::unique_ptr<const StandardDexFile>> dex_files_;
std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
std::unique_ptr<VariableSizedHandleScope> handles_;
};
+class OptimizingUnitTest : public CommonCompilerTest, public OptimizingUnitTestHelper {};
+
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 4fc7fe9427..6ef386b4a5 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -29,10 +29,10 @@ namespace art {
class PrettyPrinterTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
-void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
+void PrettyPrinterTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
StringPrettyPrinter printer(graph);
printer.VisitInsertionOrder();
@@ -40,7 +40,7 @@ void PrettyPrinterTest::TestCode(const uint16_t* data, const char* expected) {
}
TEST_F(PrettyPrinterTest, ReturnVoid) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID);
const char* expected =
@@ -67,7 +67,7 @@ TEST_F(PrettyPrinterTest, CFG1) {
"BasicBlock 3, pred: 2\n"
" 4: Exit\n";
- const uint16_t data[] =
+ const std::vector<uint16_t> data =
ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -89,7 +89,7 @@ TEST_F(PrettyPrinterTest, CFG2) {
"BasicBlock 4, pred: 3\n"
" 5: Exit\n";
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -111,21 +111,21 @@ TEST_F(PrettyPrinterTest, CFG3) {
"BasicBlock 4, pred: 2\n"
" 5: Exit\n";
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
TestCode(data1, expected);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
TestCode(data2, expected);
- const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data3 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
@@ -144,13 +144,13 @@ TEST_F(PrettyPrinterTest, CFG4) {
"BasicBlock 3, pred: 0, succ: 1\n"
" 0: Goto 1\n";
- const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data1 = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
TestCode(data1, expected);
- const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data2 = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data2, expected);
@@ -166,7 +166,7 @@ TEST_F(PrettyPrinterTest, CFG5) {
"BasicBlock 3, pred: 1\n"
" 3: Exit\n";
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
@@ -192,7 +192,7 @@ TEST_F(PrettyPrinterTest, CFG6) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 0: Goto 3\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -220,7 +220,7 @@ TEST_F(PrettyPrinterTest, CFG7) {
"BasicBlock 6, pred: 1, succ: 2\n"
" 1: Goto 2\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -240,7 +240,7 @@ TEST_F(PrettyPrinterTest, IntConstant) {
"BasicBlock 2, pred: 1\n"
" 4: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 3748d599a3..a70b0664dc 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -46,7 +46,7 @@ class RegisterAllocatorTest : public OptimizingUnitTest {
void ExpectedInRegisterHint(Strategy strategy);
// Helper functions that make use of the OptimizingUnitTest's members.
- bool Check(const uint16_t* data, Strategy strategy);
+ bool Check(const std::vector<uint16_t>& data, Strategy strategy);
void CFG1(Strategy strategy);
void Loop1(Strategy strategy);
void Loop2(Strategy strategy);
@@ -79,7 +79,7 @@ TEST_F(RegisterAllocatorTest, test_name##_GraphColor) {\
test_name(Strategy::kRegisterAllocatorGraphColor);\
}
-bool RegisterAllocatorTest::Check(const uint16_t* data, Strategy strategy) {
+bool RegisterAllocatorTest::Check(const std::vector<uint16_t>& data, Strategy strategy) {
HGraph* graph = CreateCFG(data);
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
@@ -185,7 +185,7 @@ void RegisterAllocatorTest::CFG1(Strategy strategy) {
* |
* exit
*/
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
@@ -222,7 +222,7 @@ void RegisterAllocatorTest::Loop1(Strategy strategy) {
* exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -268,7 +268,7 @@ void RegisterAllocatorTest::Loop2(Strategy strategy) {
* exit
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 8 << 12 | 1 << 8,
Instruction::IF_EQ | 1 << 8, 7,
@@ -314,7 +314,7 @@ void RegisterAllocatorTest::Loop3(Strategy strategy) {
* exit
*/
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::ADD_INT_LIT8 | 1 << 8, 1 << 8,
Instruction::CONST_4 | 5 << 12 | 2 << 8,
@@ -351,7 +351,7 @@ void RegisterAllocatorTest::Loop3(Strategy strategy) {
TEST_ALL_STRATEGIES(Loop3);
TEST_F(RegisterAllocatorTest, FirstRegisterUse) {
- const uint16_t data[] = THREE_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = THREE_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::XOR_INT_LIT8 | 1 << 8, 1 << 8,
Instruction::XOR_INT_LIT8 | 0 << 8, 1 << 8,
@@ -402,7 +402,7 @@ void RegisterAllocatorTest::DeadPhi(Strategy strategy) {
* } while (true);
*/
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 0,
Instruction::IF_NE | 1 << 8 | 1 << 12, 3,
@@ -432,7 +432,7 @@ TEST_ALL_STRATEGIES(DeadPhi);
* This test only applies to the linear scan allocator.
*/
TEST_F(RegisterAllocatorTest, FreeUntil) {
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN);
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index b3c8f105d1..8dcadaad2e 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -330,10 +330,12 @@ bool SchedulingLatencyVisitorARM::CanGenerateTest(HCondition* condition) {
}
} else if (c == kCondLE || c == kCondGT) {
if (value < std::numeric_limits<int64_t>::max() &&
- !codegen_->GetAssembler()->ShifterOperandCanHold(SBC, High32Bits(value + 1), kCcSet)) {
+ !codegen_->GetAssembler()->ShifterOperandCanHold(
+ SBC, High32Bits(value + 1), vixl32::FlagsUpdate::SetFlags)) {
return false;
}
- } else if (!codegen_->GetAssembler()->ShifterOperandCanHold(SBC, High32Bits(value), kCcSet)) {
+ } else if (!codegen_->GetAssembler()->ShifterOperandCanHold(
+ SBC, High32Bits(value), vixl32::FlagsUpdate::SetFlags)) {
return false;
}
}
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 104ebc79c2..fb15fc8975 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -182,7 +182,9 @@ class SchedulerTest : public OptimizingUnitTest {
scheduler->Schedule(graph_);
}
- void CompileWithRandomSchedulerAndRun(const uint16_t* data, bool has_result, int expected) {
+ void CompileWithRandomSchedulerAndRun(const std::vector<uint16_t>& data,
+ bool has_result,
+ int expected) {
for (CodegenTargetConfig target_config : GetTargetConfigs()) {
HGraph* graph = CreateCFG(data);
@@ -393,7 +395,7 @@ TEST_F(SchedulerTest, RandomScheduling) {
// }
// return result;
//
- const uint16_t data[] = SIX_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = SIX_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 << 12 | 2 << 8, // const/4 v2, #int 0
Instruction::CONST_HIGH16 | 0 << 8, 0x4120, // const/high16 v0, #float 10.0 // #41200000
Instruction::CONST_4 | 1 << 12 | 1 << 8, // const/4 v1, #int 1
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 77e70d733e..85ed06eb9b 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -31,7 +31,7 @@ namespace art {
class SsaTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data, const char* expected);
+ void TestCode(const std::vector<uint16_t>& data, const char* expected);
};
class SsaPrettyPrinter : public HPrettyPrinter {
@@ -80,7 +80,7 @@ static void ReNumberInstructions(HGraph* graph) {
}
}
-void SsaTest::TestCode(const uint16_t* data, const char* expected) {
+void SsaTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
@@ -119,7 +119,7 @@ TEST_F(SsaTest, CFG1) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 7: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
@@ -150,7 +150,7 @@ TEST_F(SsaTest, CFG2) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 9: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -181,7 +181,7 @@ TEST_F(SsaTest, CFG3) {
"BasicBlock 5, pred: 4\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -214,7 +214,7 @@ TEST_F(SsaTest, Loop1) {
"BasicBlock 6, pred: 5\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -245,7 +245,7 @@ TEST_F(SsaTest, Loop2) {
"BasicBlock 5, pred: 4\n"
" 9: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -276,7 +276,7 @@ TEST_F(SsaTest, Loop3) {
"BasicBlock 5, pred: 4\n"
" 10: Exit\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -310,7 +310,7 @@ TEST_F(SsaTest, Loop4) {
"BasicBlock 6, pred: 5\n"
" 10: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::GOTO | 0x500,
Instruction::IF_EQ, 5,
@@ -351,7 +351,7 @@ TEST_F(SsaTest, Loop5) {
" 13: Phi(2, 1) [11, 8, 8]\n"
" 14: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 4,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -390,7 +390,7 @@ TEST_F(SsaTest, Loop6) {
"BasicBlock 7, pred: 6\n"
" 13: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -432,7 +432,7 @@ TEST_F(SsaTest, Loop7) {
"BasicBlock 8, pred: 2, succ: 6\n"
" 15: Goto\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 8,
Instruction::CONST_4 | 4 << 12 | 0,
@@ -456,7 +456,7 @@ TEST_F(SsaTest, DeadLocal) {
"BasicBlock 2, pred: 1\n"
" 3: Exit\n";
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
@@ -484,7 +484,7 @@ TEST_F(SsaTest, LocalInIf) {
"BasicBlock 5, pred: 1, succ: 3\n"
" 8: Goto\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
@@ -520,7 +520,7 @@ TEST_F(SsaTest, MultiplePredecessors) {
"BasicBlock 7, pred: 3, succ: 5\n"
" 12: Goto\n";
- const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ const std::vector<uint16_t> data = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 5,
Instruction::ADD_INT_LIT8 | 1 << 8, 0 << 8,
diff --git a/compiler/optimizing/suspend_check_test.cc b/compiler/optimizing/suspend_check_test.cc
index 7e83f8ce5f..33823e2a11 100644
--- a/compiler/optimizing/suspend_check_test.cc
+++ b/compiler/optimizing/suspend_check_test.cc
@@ -30,10 +30,10 @@ namespace art {
class SuspendCheckTest : public OptimizingUnitTest {
protected:
- void TestCode(const uint16_t* data);
+ void TestCode(const std::vector<uint16_t>& data);
};
-void SuspendCheckTest::TestCode(const uint16_t* data) {
+void SuspendCheckTest::TestCode(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
HBasicBlock* first_block = graph->GetEntryBlock()->GetSingleSuccessor();
HBasicBlock* loop_header = first_block->GetSingleSuccessor();
@@ -43,7 +43,7 @@ void SuspendCheckTest::TestCode(const uint16_t* data) {
}
TEST_F(SuspendCheckTest, CFG1) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::NOP,
Instruction::GOTO | 0xFF00);
@@ -51,14 +51,14 @@ TEST_F(SuspendCheckTest, CFG1) {
}
TEST_F(SuspendCheckTest, CFG2) {
- const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 0, 0);
TestCode(data);
}
TEST_F(SuspendCheckTest, CFG3) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 0xFFFF,
Instruction::RETURN_VOID);
@@ -67,7 +67,7 @@ TEST_F(SuspendCheckTest, CFG3) {
}
TEST_F(SuspendCheckTest, CFG4) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NE, 0xFFFF,
Instruction::RETURN_VOID);
@@ -76,7 +76,7 @@ TEST_F(SuspendCheckTest, CFG4) {
}
TEST_F(SuspendCheckTest, CFG5) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQZ, 0xFFFF,
Instruction::RETURN_VOID);
@@ -85,7 +85,7 @@ TEST_F(SuspendCheckTest, CFG5) {
}
TEST_F(SuspendCheckTest, CFG6) {
- const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ const std::vector<uint16_t> data = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_NEZ, 0xFFFF,
Instruction::RETURN_VOID);
diff --git a/compiler/utils/arm/assembler_arm_shared.h b/compiler/utils/arm/assembler_arm_shared.h
index 21f13eeab7..7464052d93 100644
--- a/compiler/utils/arm/assembler_arm_shared.h
+++ b/compiler/utils/arm/assembler_arm_shared.h
@@ -40,13 +40,6 @@ enum StoreOperandType {
kStoreDWord
};
-// Set condition codes request.
-enum SetCc {
- kCcDontCare, // Allows prioritizing 16-bit instructions on Thumb2 whether they set CCs or not.
- kCcSet,
- kCcKeep,
-};
-
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/assembler_arm_test.h b/compiler/utils/arm/assembler_arm_test.h
deleted file mode 100644
index 8c3a11f2cf..0000000000
--- a/compiler/utils/arm/assembler_arm_test.h
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
-#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
-
-#include "utils/assembler_test.h"
-
-namespace art {
-
-template<typename Ass,
- typename Reg,
- typename FPReg,
- typename Imm,
- typename SOp,
- typename Cond,
- typename SetCc>
-class AssemblerArmTest : public AssemblerTest<Ass, Reg, FPReg, Imm> {
- public:
- typedef AssemblerTest<Ass, Reg, FPReg, Imm> Base;
-
- using Base::GetRegisters;
- using Base::GetRegName;
- using Base::CreateImmediate;
- using Base::WarnOnCombinations;
-
- static constexpr int64_t kFullImmRangeThreshold = 32;
-
- virtual void FillImmediates(std::vector<Imm>& immediates, int64_t imm_min, int64_t imm_max) {
- // Small range: do completely.
- if (imm_max - imm_min <= kFullImmRangeThreshold) {
- for (int64_t i = imm_min; i <= imm_max; ++i) {
- immediates.push_back(CreateImmediate(i));
- }
- } else {
- immediates.push_back(CreateImmediate(imm_min));
- immediates.push_back(CreateImmediate(imm_max));
- if (imm_min < imm_max - 1) {
- immediates.push_back(CreateImmediate(imm_min + 1));
- }
- if (imm_min < imm_max - 2) {
- immediates.push_back(CreateImmediate(imm_min + 2));
- }
- if (imm_min < imm_max - 3) {
- immediates.push_back(CreateImmediate(imm_max - 1));
- }
- if (imm_min < imm_max - 4) {
- immediates.push_back(CreateImmediate((imm_min + imm_max) / 2));
- }
- }
- }
-
- std::string RepeatRRIIC(void (Ass::*f)(Reg, Reg, Imm, Imm, Cond),
- int64_t imm1_min, int64_t imm1_max,
- int64_t imm2_min, int64_t imm2_max,
- std::string fmt) {
- return RepeatTemplatedRRIIC(f, GetRegisters(), GetRegisters(),
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- imm1_min, imm1_max, imm2_min, imm2_max,
- fmt);
- }
-
- template <typename Reg1, typename Reg2>
- std::string RepeatTemplatedRRIIC(void (Ass::*f)(Reg1, Reg2, Imm, Imm, Cond),
- const std::vector<Reg1*> reg1_registers,
- const std::vector<Reg2*> reg2_registers,
- std::string (AssemblerArmTest::*GetName1)(const Reg1&),
- std::string (AssemblerArmTest::*GetName2)(const Reg2&),
- int64_t imm1_min, int64_t imm1_max,
- int64_t imm2_min, int64_t imm2_max,
- std::string fmt) {
- std::vector<Imm> immediates1;
- FillImmediates(immediates1, imm1_min, imm1_max);
- std::vector<Imm> immediates2;
- FillImmediates(immediates2, imm2_min, imm2_max);
-
- std::vector<Cond>& cond = GetConditions();
-
- WarnOnCombinations(cond.size() * immediates1.size() * immediates2.size() *
- reg1_registers.size() * reg2_registers.size());
-
- std::ostringstream oss;
- bool first = true;
- for (Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (Imm i : immediates1) {
- std::string base = after_cond;
-
- size_t imm1_index = base.find(IMM1_TOKEN);
- if (imm1_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << i;
- std::string imm_string = sreg.str();
- base.replace(imm1_index, ConstexprStrLen(IMM1_TOKEN), imm_string);
- }
-
- for (Imm j : immediates2) {
- std::string base2 = base;
-
- size_t imm2_index = base2.find(IMM2_TOKEN);
- if (imm2_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << j;
- std::string imm_string = sreg.str();
- base2.replace(imm2_index, ConstexprStrLen(IMM2_TOKEN), imm_string);
- }
-
- for (auto reg1 : reg1_registers) {
- std::string base3 = base2;
-
- std::string reg1_string = (this->*GetName1)(*reg1);
- size_t reg1_index;
- while ((reg1_index = base3.find(Base::REG1_TOKEN)) != std::string::npos) {
- base3.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
- }
-
- for (auto reg2 : reg2_registers) {
- std::string base4 = base3;
-
- std::string reg2_string = (this->*GetName2)(*reg2);
- size_t reg2_index;
- while ((reg2_index = base4.find(Base::REG2_TOKEN)) != std::string::npos) {
- base4.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << base4;
-
- (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c);
- }
- }
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- std::string RepeatRRiiC(void (Ass::*f)(Reg, Reg, Imm, Imm, Cond),
- std::vector<std::pair<Imm, Imm>>& immediates,
- std::string fmt) {
- return RepeatTemplatedRRiiC<Reg, Reg>(f, GetRegisters(), GetRegisters(),
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- immediates, fmt);
- }
-
- template <typename Reg1, typename Reg2>
- std::string RepeatTemplatedRRiiC(void (Ass::*f)(Reg1, Reg2, Imm, Imm, Cond),
- const std::vector<Reg1*> reg1_registers,
- const std::vector<Reg2*> reg2_registers,
- std::string (AssemblerArmTest::*GetName1)(const Reg1&),
- std::string (AssemblerArmTest::*GetName2)(const Reg2&),
- std::vector<std::pair<Imm, Imm>>& immediates,
- std::string fmt) {
- std::vector<Cond>& cond = GetConditions();
-
- WarnOnCombinations(cond.size() * immediates.size() * reg1_registers.size() *
- reg2_registers.size());
-
- std::ostringstream oss;
- bool first = true;
- for (Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (std::pair<Imm, Imm>& pair : immediates) {
- Imm i = pair.first;
- Imm j = pair.second;
- std::string after_imm1 = after_cond;
-
- size_t imm1_index = after_imm1.find(IMM1_TOKEN);
- if (imm1_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << i;
- std::string imm_string = sreg.str();
- after_imm1.replace(imm1_index, ConstexprStrLen(IMM1_TOKEN), imm_string);
- }
-
- std::string after_imm2 = after_imm1;
-
- size_t imm2_index = after_imm2.find(IMM2_TOKEN);
- if (imm2_index != std::string::npos) {
- std::ostringstream sreg;
- sreg << j;
- std::string imm_string = sreg.str();
- after_imm2.replace(imm2_index, ConstexprStrLen(IMM2_TOKEN), imm_string);
- }
-
- for (auto reg1 : reg1_registers) {
- std::string after_reg1 = after_imm2;
-
- std::string reg1_string = (this->*GetName1)(*reg1);
- size_t reg1_index;
- while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
- after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
- }
-
- for (auto reg2 : reg2_registers) {
- std::string after_reg2 = after_reg1;
-
- std::string reg2_string = (this->*GetName2)(*reg2);
- size_t reg2_index;
- while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
- after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << after_reg2;
-
- (Base::GetAssembler()->*f)(*reg1, *reg2, i, j, c);
- }
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- std::string RepeatRRC(void (Ass::*f)(Reg, Reg, Cond), std::string fmt) {
- return RepeatTemplatedRRC(f, GetRegisters(), GetRegisters(), GetConditions(),
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- fmt);
- }
-
- template <typename Reg1, typename Reg2>
- std::string RepeatTemplatedRRC(void (Ass::*f)(Reg1, Reg2, Cond),
- const std::vector<Reg1*>& reg1_registers,
- const std::vector<Reg2*>& reg2_registers,
- const std::vector<Cond>& cond,
- std::string (AssemblerArmTest::*GetName1)(const Reg1&),
- std::string (AssemblerArmTest::*GetName2)(const Reg2&),
- std::string fmt) {
- WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size());
-
- std::ostringstream oss;
- bool first = true;
- for (const Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (auto reg1 : reg1_registers) {
- std::string after_reg1 = after_cond;
-
- std::string reg1_string = (this->*GetName1)(*reg1);
- size_t reg1_index;
- while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
- after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
- }
-
- for (auto reg2 : reg2_registers) {
- std::string after_reg2 = after_reg1;
-
- std::string reg2_string = (this->*GetName2)(*reg2);
- size_t reg2_index;
- while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
- after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << after_reg2;
-
- (Base::GetAssembler()->*f)(*reg1, *reg2, c);
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- std::string RepeatRRRC(void (Ass::*f)(Reg, Reg, Reg, Cond), std::string fmt) {
- return RepeatTemplatedRRRC(f, GetRegisters(), GetRegisters(), GetRegisters(), GetConditions(),
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- &AssemblerArmTest::template GetRegName<RegisterView::kUsePrimaryName>,
- fmt);
- }
-
- template <typename Reg1, typename Reg2, typename Reg3>
- std::string RepeatTemplatedRRRC(void (Ass::*f)(Reg1, Reg2, Reg3, Cond),
- const std::vector<Reg1*>& reg1_registers,
- const std::vector<Reg2*>& reg2_registers,
- const std::vector<Reg3*>& reg3_registers,
- const std::vector<Cond>& cond,
- std::string (AssemblerArmTest::*GetName1)(const Reg1&),
- std::string (AssemblerArmTest::*GetName2)(const Reg2&),
- std::string (AssemblerArmTest::*GetName3)(const Reg3&),
- std::string fmt) {
- WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size() *
- reg3_registers.size());
-
- std::ostringstream oss;
- bool first = true;
- for (const Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (auto reg1 : reg1_registers) {
- std::string after_reg1 = after_cond;
-
- std::string reg1_string = (this->*GetName1)(*reg1);
- size_t reg1_index;
- while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
- after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
- }
-
- for (auto reg2 : reg2_registers) {
- std::string after_reg2 = after_reg1;
-
- std::string reg2_string = (this->*GetName2)(*reg2);
- size_t reg2_index;
- while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
- after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
- }
-
- for (auto reg3 : reg3_registers) {
- std::string after_reg3 = after_reg2;
-
- std::string reg3_string = (this->*GetName3)(*reg3);
- size_t reg3_index;
- while ((reg3_index = after_reg3.find(REG3_TOKEN)) != std::string::npos) {
- after_reg3.replace(reg3_index, ConstexprStrLen(REG3_TOKEN), reg3_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << after_reg3;
-
- (Base::GetAssembler()->*f)(*reg1, *reg2, *reg3, c);
- }
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- template <typename RegT>
- std::string RepeatTemplatedRSC(void (Ass::*f)(RegT, SOp, Cond),
- const std::vector<RegT*>& registers,
- const std::vector<SOp>& shifts,
- const std::vector<Cond>& cond,
- std::string (AssemblerArmTest::*GetName)(const RegT&),
- std::string fmt) {
- WarnOnCombinations(cond.size() * registers.size() * shifts.size());
-
- std::ostringstream oss;
- bool first = true;
- for (const Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (const SOp& shift : shifts) {
- std::string after_shift = after_cond;
-
- std::string shift_string = GetShiftString(shift);
- size_t shift_index;
- while ((shift_index = after_shift.find(Base::SHIFT_TOKEN)) != std::string::npos) {
- after_shift.replace(shift_index, ConstexprStrLen(Base::SHIFT_TOKEN), shift_string);
- }
-
- for (auto reg : registers) {
- std::string after_reg = after_shift;
-
- std::string reg_string = (this->*GetName)(*reg);
- size_t reg_index;
- while ((reg_index = after_reg.find(Base::REG_TOKEN)) != std::string::npos) {
- after_reg.replace(reg_index, ConstexprStrLen(Base::REG_TOKEN), reg_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << after_reg;
-
- (Base::GetAssembler()->*f)(*reg, shift, c);
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- template <typename Reg1, typename Reg2>
- std::string RepeatTemplatedRRSC(void (Ass::*f)(Reg1, Reg2, const SOp&, Cond),
- const std::vector<Reg1*>& reg1_registers,
- const std::vector<Reg2*>& reg2_registers,
- const std::vector<SOp>& shifts,
- const std::vector<Cond>& cond,
- std::string (AssemblerArmTest::*GetName1)(const Reg1&),
- std::string (AssemblerArmTest::*GetName2)(const Reg2&),
- std::string fmt) {
- WarnOnCombinations(cond.size() * reg1_registers.size() * reg2_registers.size() * shifts.size());
-
- std::ostringstream oss;
- bool first = true;
- for (const Cond& c : cond) {
- std::string after_cond = fmt;
-
- size_t cond_index = after_cond.find(COND_TOKEN);
- if (cond_index != std::string::npos) {
- after_cond.replace(cond_index, ConstexprStrLen(COND_TOKEN), GetConditionString(c));
- }
-
- for (const SOp& shift : shifts) {
- std::string after_shift = after_cond;
-
- std::string shift_string = GetShiftString(shift);
- size_t shift_index;
- while ((shift_index = after_shift.find(SHIFT_TOKEN)) != std::string::npos) {
- after_shift.replace(shift_index, ConstexprStrLen(SHIFT_TOKEN), shift_string);
- }
-
- for (auto reg1 : reg1_registers) {
- std::string after_reg1 = after_shift;
-
- std::string reg1_string = (this->*GetName1)(*reg1);
- size_t reg1_index;
- while ((reg1_index = after_reg1.find(Base::REG1_TOKEN)) != std::string::npos) {
- after_reg1.replace(reg1_index, ConstexprStrLen(Base::REG1_TOKEN), reg1_string);
- }
-
- for (auto reg2 : reg2_registers) {
- std::string after_reg2 = after_reg1;
-
- std::string reg2_string = (this->*GetName2)(*reg2);
- size_t reg2_index;
- while ((reg2_index = after_reg2.find(Base::REG2_TOKEN)) != std::string::npos) {
- after_reg2.replace(reg2_index, ConstexprStrLen(Base::REG2_TOKEN), reg2_string);
- }
-
- if (first) {
- first = false;
- } else {
- oss << "\n";
- }
- oss << after_reg2;
-
- (Base::GetAssembler()->*f)(*reg1, *reg2, shift, c);
- }
- }
- }
- }
- // Add a newline at the end.
- oss << "\n";
-
- return oss.str();
- }
-
- protected:
- AssemblerArmTest() {}
-
- virtual std::vector<Cond>& GetConditions() = 0;
- virtual std::string GetConditionString(Cond c) = 0;
-
- virtual std::vector<SetCc>& GetSetCcs() = 0;
- virtual std::string GetSetCcString(SetCc s) = 0;
-
- virtual std::vector<SOp>& GetShiftOperands() = 0;
- virtual std::string GetShiftString(SOp sop) = 0;
-
- virtual Reg GetPCRegister() = 0;
- virtual std::vector<Reg*> GetRegistersWithoutPC() {
- std::vector<Reg*> without_pc = GetRegisters();
- Reg pc_reg = GetPCRegister();
-
- for (auto it = without_pc.begin(); it != without_pc.end(); ++it) {
- if (**it == pc_reg) {
- without_pc.erase(it);
- break;
- }
- }
-
- return without_pc;
- }
-
- static constexpr const char* IMM1_TOKEN = "{imm1}";
- static constexpr const char* IMM2_TOKEN = "{imm2}";
- static constexpr const char* REG3_TOKEN = "{reg3}";
- static constexpr const char* REG4_TOKEN = "{reg4}";
- static constexpr const char* COND_TOKEN = "{cond}";
- static constexpr const char* SET_CC_TOKEN = "{s}";
- static constexpr const char* SHIFT_TOKEN = "{shift}";
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AssemblerArmTest);
-};
-
-} // namespace art
-
-#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_TEST_H_
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index d6b24da407..05250a4157 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -112,12 +112,14 @@ bool ArmVIXLAssembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
return vixl_masm_.IsModifiedImmediate(immediate);
}
-bool ArmVIXLAssembler::ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc) {
+bool ArmVIXLAssembler::ShifterOperandCanHold(Opcode opcode,
+ uint32_t immediate,
+ vixl::aarch32::FlagsUpdate update_flags) {
switch (opcode) {
case ADD:
case SUB:
// Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
- if (IsUint<12>(immediate) && set_cc != kCcSet) {
+ if (IsUint<12>(immediate) && update_flags != vixl::aarch32::SetFlags) {
return true;
}
return ShifterOperandCanAlwaysHold(immediate);
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 1377e64073..b0310f2fb6 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -218,7 +218,9 @@ class ArmVIXLAssembler FINAL : public Assembler {
void StoreRegisterList(RegList regs, size_t stack_offset);
bool ShifterOperandCanAlwaysHold(uint32_t immediate);
- bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc = kCcDontCare);
+ bool ShifterOperandCanHold(Opcode opcode,
+ uint32_t immediate,
+ vixl::aarch32::FlagsUpdate update_flags = vixl::aarch32::DontCare);
bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
int32_t offset,
/*out*/ int32_t* add_to_base,
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 2b3e979606..065c3de23c 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -492,7 +492,7 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
temps.Exclude(in_reg.AsVIXLRegister());
___ Cmp(in_reg.AsVIXLRegister(), 0);
- if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
if (!out_reg.Equals(in_reg)) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
3 * vixl32::kMaxInstructionSizeInBytes,
@@ -531,7 +531,7 @@ void ArmVIXLJNIMacroAssembler::CreateHandleScopeEntry(FrameOffset out_off,
// e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
___ Cmp(scratch.AsVIXLRegister(), 0);
- if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value(), kCcDontCare)) {
+ if (asm_.ShifterOperandCanHold(ADD, handle_scope_offset.Int32Value())) {
ExactAssemblyScope guard(asm_.GetVIXLAssembler(),
2 * vixl32::kMaxInstructionSizeInBytes,
CodeBufferCheckScope::kMaximumSize);
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 0a094352e4..674dc9a78b 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -153,7 +153,7 @@ const char* const VixlJniHelpersResults[] = {
" 21c: f8d9 8034 ldr.w r8, [r9, #52] ; 0x34\n",
" 220: 4770 bx lr\n",
" 222: 4660 mov r0, ip\n",
- " 224: f8d9 c2c0 ldr.w ip, [r9, #704] ; 0x2c0\n",
+ " 224: f8d9 c2c4 ldr.w ip, [r9, #708] ; 0x2c4\n",
" 228: 47e0 blx ip\n",
nullptr
};
diff --git a/compiler/utils/test_dex_file_builder.h b/compiler/utils/test_dex_file_builder.h
index 04fba51dc1..58f1ec7b08 100644
--- a/compiler/utils/test_dex_file_builder.h
+++ b/compiler/utils/test_dex_file_builder.h
@@ -27,6 +27,7 @@
#include <android-base/logging.h>
#include "base/bit_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "dex/standard_dex_file.h"
@@ -233,7 +234,8 @@ class TestDexFileBuilder {
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
std::string error_msg;
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(
&dex_file_data_[0],
dex_file_data_.size(),
dex_location,
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 6bebf7d2da..3a6c86d768 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -129,6 +129,17 @@ art_cc_binary {
static_libs: [
"libart-dex2oat",
],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ cflags: [
+ // Ignore frame-size increase resulting from instrumentation.
+ "-Wno-frame-larger-than=",
+ "-DART_PGO_INSTRUMENTATION",
+ ],
+ }
}
art_cc_binary {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index dabe07f9ce..7796b3acc6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -1807,9 +1807,7 @@ class Dex2Oat FINAL {
// We do not decompile a RETURN_VOID_NO_BARRIER into a RETURN_VOID, as the quickening
// optimization does not depend on the boot image (the optimization relies on not
// having final fields in a class, which does not change for an app).
- VdexFile::Unquicken(dex_files_,
- input_vdex_file_->GetQuickeningInfo(),
- /* decompile_return_instruction */ false);
+ input_vdex_file_->Unquicken(dex_files_, /* decompile_return_instruction */ false);
} else {
// Create the main VerifierDeps, here instead of in the compiler since we want to aggregate
// the results for all the dex files, not just the results for the current dex file.
@@ -2015,8 +2013,8 @@ class Dex2Oat FINAL {
text_size,
oat_writer->GetBssSize(),
oat_writer->GetBssMethodsOffset(),
- oat_writer->GetBssRootsOffset());
-
+ oat_writer->GetBssRootsOffset(),
+ oat_writer->GetVdexSize());
if (IsImage()) {
// Update oat layout.
DCHECK(image_writer_ != nullptr);
@@ -2043,7 +2041,8 @@ class Dex2Oat FINAL {
// We need to mirror the layout of the ELF file in the compressed debug-info.
// Therefore PrepareDebugInfo() relies on the SetLoadedSectionSizes() call further above.
- elf_writer->PrepareDebugInfo(oat_writer->GetMethodDebugInfo());
+ debug::DebugInfo debug_info = oat_writer->GetDebugInfo(); // Keep the variable alive.
+ elf_writer->PrepareDebugInfo(debug_info); // Processes the data on background thread.
linker::OutputStream*& rodata = rodata_[i];
DCHECK(rodata != nullptr);
@@ -2077,7 +2076,7 @@ class Dex2Oat FINAL {
}
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer->GetDebugInfo());
if (!elf_writer->End()) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
@@ -3063,9 +3062,9 @@ static dex2oat::ReturnCode Dex2oat(int argc, char** argv) {
int main(int argc, char** argv) {
int result = static_cast<int>(art::Dex2oat(argc, argv));
// Everything was done, do an explicit exit here to avoid running Runtime destructors that take
- // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class
- // should not destruct the runtime in this case.
- if (!art::kIsDebugBuild && (RUNNING_ON_MEMORY_TOOL == 0)) {
+ // time (bug 10645725) unless we're a debug or instrumented build or running on valgrind. Note:
+ // The Dex2Oat class should not destruct the runtime in this case.
+ if (!art::kIsDebugBuild && !art::kIsPGOInstrumentation && (RUNNING_ON_MEMORY_TOOL == 0)) {
_exit(result);
}
return result;
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 980363b1bb..05592f1806 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -29,6 +29,7 @@
#include "base/file_utils.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "jit/profile_compilation_info.h"
@@ -65,12 +66,13 @@ class Dex2oatImageTest : public CommonRuntimeTest {
for (const std::string& dex : GetLibCoreDexFileNames()) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- CHECK(DexFileLoader::Open(dex.c_str(),
- dex,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg,
- &dex_files))
+ const ArtDexFileLoader dex_file_loader;
+ CHECK(dex_file_loader.Open(dex.c_str(),
+ dex,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg,
+ &dex_files))
<< error_msg;
for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index d9b4ea7835..a2e2b48956 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -249,8 +249,6 @@ static Parser CreateArgumentParser() {
return parser_builder->Build();
}
-#pragma GCC diagnostic pop
-
std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
const char** argv,
std::string* error_msg) {
@@ -264,4 +262,5 @@ std::unique_ptr<Dex2oatArgumentMap> Dex2oatArgumentMap::Parse(int argc,
return std::unique_ptr<Dex2oatArgumentMap>(new Dex2oatArgumentMap(parser.ReleaseArgumentsMap()));
}
+#pragma GCC diagnostic pop
} // namespace art
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index c91240e19d..8799540fd3 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -30,6 +30,7 @@
#include "base/macros.h"
#include "base/mutex-inl.h"
#include "bytecode_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -44,6 +45,7 @@ namespace art {
static constexpr size_t kMaxMethodIds = 65535;
static constexpr bool kDebugArgs = false;
+static const char* kDisableCompactDex = "--compact-dex-level=none";
using android::base::StringPrintf;
@@ -107,6 +109,8 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
[](const OatFile&) {});
}
+ bool test_accepts_odex_file_on_failure = false;
+
template <typename T>
void GenerateOdexForTest(
const std::string& dex_location,
@@ -123,7 +127,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
&error_msg,
extra_args,
use_fd);
- bool success = (status == 0);
+ bool success = (WIFEXITED(status) && WEXITSTATUS(status) == 0);
if (expect_success) {
ASSERT_TRUE(success) << error_msg << std::endl << output_;
@@ -145,16 +149,18 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
error_msg_ = error_msg;
- // Verify there's no loadable odex file.
- std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
- odex_location.c_str(),
- nullptr,
- nullptr,
- false,
- /*low_4gb*/false,
- dex_location.c_str(),
- &error_msg));
- ASSERT_TRUE(odex_file.get() == nullptr);
+ if (!test_accepts_odex_file_on_failure) {
+ // Verify there's no loadable odex file.
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(odex_location.c_str(),
+ odex_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() == nullptr);
+ }
}
}
@@ -679,7 +685,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_TRUE(dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& dex_file = dex_files[0];
@@ -776,7 +783,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
app_image_file_name,
/* use_fd */ true,
/* num_profile_classes */ 1,
- { input_vdex, output_vdex });
+ { input_vdex, output_vdex, kDisableCompactDex });
EXPECT_GT(vdex_file1->GetLength(), 0u);
}
{
@@ -788,7 +795,7 @@ class Dex2oatLayoutTest : public Dex2oatTest {
app_image_file_name,
/* use_fd */ true,
/* num_profile_classes */ 1,
- { input_vdex, output_vdex },
+ { input_vdex, output_vdex, kDisableCompactDex },
/* expect_success */ true);
EXPECT_GT(vdex_file2.GetFile()->GetLength(), 0u);
}
@@ -814,7 +821,8 @@ class Dex2oatLayoutTest : public Dex2oatTest {
const char* location = dex_location.c_str();
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_TRUE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_TRUE(dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files));
EXPECT_EQ(dex_files.size(), 1U);
std::unique_ptr<const DexFile>& old_dex_file = dex_files[0];
@@ -876,8 +884,6 @@ TEST_F(Dex2oatLayoutTest, TestLayoutAppImage) {
}
TEST_F(Dex2oatLayoutTest, TestVdexLayout) {
- // Disabled until figure out running compact dex + DexLayout causes quickening errors.
- TEST_DISABLED_FOR_COMPACT_DEX();
RunTestVDex();
}
@@ -898,7 +904,7 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kQuicken,
- { input_vdex, output_vdex },
+ { input_vdex, output_vdex, kDisableCompactDex },
/* expect_success */ true,
/* use_fd */ true);
EXPECT_GT(vdex_file1->GetLength(), 0u);
@@ -910,7 +916,7 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
GenerateOdexForTest(dex_location,
odex_location,
CompilerFilter::kVerify,
- { input_vdex, output_vdex },
+ { input_vdex, output_vdex, kDisableCompactDex },
/* expect_success */ true,
/* use_fd */ true);
}
@@ -944,8 +950,8 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
class_it.Next()) {
if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
for (const DexInstructionPcPair& inst :
- CodeItemInstructionAccessor(dex_file.get(), class_it.GetMethodCodeItem())) {
- ASSERT_FALSE(inst->IsQuickened());
+ CodeItemInstructionAccessor(*dex_file, class_it.GetMethodCodeItem())) {
+ ASSERT_FALSE(inst->IsQuickened()) << output_;
}
}
}
@@ -956,8 +962,6 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
};
TEST_F(Dex2oatUnquickenTest, UnquickenMultiDex) {
- // Disabled until figure out running compact dex + DexLayout causes quickening errors.
- TEST_DISABLED_FOR_COMPACT_DEX();
RunUnquickenMultiDex();
}
@@ -996,7 +1000,12 @@ TEST_F(Dex2oatWatchdogTest, TestWatchdogOK) {
TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
TEST_DISABLED_FOR_MEMORY_TOOL_VALGRIND(); // b/63052624
- TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // b/63052624
+
+ // The watchdog is independent of dex2oat and will not delete intermediates. It is possible
+ // that the compilation succeeds and the file is completely written by the time the watchdog
+ // kills dex2oat (but the dex2oat threads must have been scheduled pretty badly).
+ test_accepts_odex_file_on_failure = true;
+
// Check with ten milliseconds.
RunTest(false, { "--watchdog-timeout=10" });
}
diff --git a/dex2oat/linker/elf_writer.h b/dex2oat/linker/elf_writer.h
index 0eb36eda0f..7c4774038e 100644
--- a/dex2oat/linker/elf_writer.h
+++ b/dex2oat/linker/elf_writer.h
@@ -25,6 +25,7 @@
#include "base/array_ref.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "debug/debug_info.h"
#include "os.h"
namespace art {
@@ -55,18 +56,24 @@ class ElfWriter {
virtual ~ElfWriter() {}
virtual void Start() = 0;
+ // Prepares memory layout of the whole ELF file, and creates dynamic symbols
+ // which point to specific areas of interest (usually section begin and end).
+ // This is needed as multi-image needs to know the memory layout of all ELF
+ // files, before starting to write them.
+ // This method must be called before calling GetLoadedSize().
virtual void PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) = 0;
- virtual void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
+ size_t bss_roots_offset,
+ size_t dex_section_size) = 0;
+ virtual void PrepareDebugInfo(const debug::DebugInfo& debug_info) = 0;
virtual OutputStream* StartRoData() = 0;
virtual void EndRoData(OutputStream* rodata) = 0;
virtual OutputStream* StartText() = 0;
virtual void EndText(OutputStream* text) = 0;
virtual void WriteDynamicSection() = 0;
- virtual void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) = 0;
+ virtual void WriteDebugInfo(const debug::DebugInfo& debug_info) = 0;
virtual bool End() = 0;
// Get the ELF writer's stream. This stream can be used for writing data directly
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index aa64b7d59e..707e877cfb 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -54,22 +54,28 @@ class DebugInfoTask : public Task {
public:
DebugInfoTask(InstructionSet isa,
const InstructionSetFeatures* features,
- size_t rodata_section_size,
+ uint64_t text_section_address,
size_t text_section_size,
- const ArrayRef<const debug::MethodDebugInfo>& method_infos)
+ uint64_t dex_section_address,
+ size_t dex_section_size,
+ const debug::DebugInfo& debug_info)
: isa_(isa),
instruction_set_features_(features),
- rodata_section_size_(rodata_section_size),
+ text_section_address_(text_section_address),
text_section_size_(text_section_size),
- method_infos_(method_infos) {
+ dex_section_address_(dex_section_address),
+ dex_section_size_(dex_section_size),
+ debug_info_(debug_info) {
}
void Run(Thread*) {
result_ = debug::MakeMiniDebugInfo(isa_,
instruction_set_features_,
- kPageSize + rodata_section_size_, // .text address.
+ text_section_address_,
text_section_size_,
- method_infos_);
+ dex_section_address_,
+ dex_section_size_,
+ debug_info_);
}
std::vector<uint8_t>* GetResult() {
@@ -79,9 +85,11 @@ class DebugInfoTask : public Task {
private:
InstructionSet isa_;
const InstructionSetFeatures* instruction_set_features_;
- size_t rodata_section_size_;
+ uint64_t text_section_address_;
size_t text_section_size_;
- const ArrayRef<const debug::MethodDebugInfo> method_infos_;
+ uint64_t dex_section_address_;
+ size_t dex_section_size_;
+ const debug::DebugInfo& debug_info_;
std::vector<uint8_t> result_;
};
@@ -99,14 +107,15 @@ class ElfWriterQuick FINAL : public ElfWriter {
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) OVERRIDE;
- void PrepareDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
+ size_t bss_roots_offset,
+ size_t dex_section_size) OVERRIDE;
+ void PrepareDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
OutputStream* StartRoData() OVERRIDE;
void EndRoData(OutputStream* rodata) OVERRIDE;
OutputStream* StartText() OVERRIDE;
void EndText(OutputStream* text) OVERRIDE;
void WriteDynamicSection() OVERRIDE;
- void WriteDebugInfo(const ArrayRef<const debug::MethodDebugInfo>& method_infos) OVERRIDE;
+ void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
bool End() OVERRIDE;
virtual OutputStream* GetStream() OVERRIDE;
@@ -123,6 +132,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
size_t rodata_size_;
size_t text_size_;
size_t bss_size_;
+ size_t dex_section_size_;
std::unique_ptr<BufferedOutputStream> output_stream_;
std::unique_ptr<ElfBuilder<ElfTypes>> builder_;
std::unique_ptr<DebugInfoTask> debug_info_task_;
@@ -162,6 +172,7 @@ ElfWriterQuick<ElfTypes>::ElfWriterQuick(InstructionSet instruction_set,
rodata_size_(0u),
text_size_(0u),
bss_size_(0u),
+ dex_section_size_(0u),
output_stream_(
std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(elf_file))),
builder_(new ElfBuilder<ElfTypes>(instruction_set, features, output_stream_.get())) {}
@@ -183,19 +194,23 @@ void ElfWriterQuick<ElfTypes>::PrepareDynamicSection(size_t rodata_size,
size_t text_size,
size_t bss_size,
size_t bss_methods_offset,
- size_t bss_roots_offset) {
+ size_t bss_roots_offset,
+ size_t dex_section_size) {
DCHECK_EQ(rodata_size_, 0u);
rodata_size_ = rodata_size;
DCHECK_EQ(text_size_, 0u);
text_size_ = text_size;
DCHECK_EQ(bss_size_, 0u);
bss_size_ = bss_size;
+ DCHECK_EQ(dex_section_size_, 0u);
+ dex_section_size_ = dex_section_size;
builder_->PrepareDynamicSection(elf_file_->GetPath(),
rodata_size_,
text_size_,
bss_size_,
bss_methods_offset,
- bss_roots_offset);
+ bss_roots_offset,
+ dex_section_size);
}
template <typename ElfTypes>
@@ -234,17 +249,18 @@ void ElfWriterQuick<ElfTypes>::WriteDynamicSection() {
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
- const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
- if (!method_infos.empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
+void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_info) {
+ if (!debug_info.Empty() && compiler_options_->GetGenerateMiniDebugInfo()) {
// Prepare the mini-debug-info in background while we do other I/O.
Thread* self = Thread::Current();
debug_info_task_ = std::unique_ptr<DebugInfoTask>(
new DebugInfoTask(builder_->GetIsa(),
instruction_set_features_,
- rodata_size_,
+ builder_->GetText()->GetAddress(),
text_size_,
- method_infos));
+ builder_->GetDex()->Exists() ? builder_->GetDex()->GetAddress() : 0,
+ dex_section_size_,
+ debug_info));
debug_info_thread_pool_ = std::unique_ptr<ThreadPool>(
new ThreadPool("Mini-debug-info writer", 1));
debug_info_thread_pool_->AddTask(self, debug_info_task_.get());
@@ -253,12 +269,11 @@ void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(
}
template <typename ElfTypes>
-void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
- const ArrayRef<const debug::MethodDebugInfo>& method_infos) {
- if (!method_infos.empty()) {
+void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info) {
+ if (!debug_info.Empty()) {
if (compiler_options_->GetGenerateDebugInfo()) {
// Generate all the debug information we can.
- debug::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat, true /* write_oat_patches */);
+ debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
}
if (compiler_options_->GetGenerateMiniDebugInfo()) {
// Wait for the mini-debug-info generation to finish and write it to disk.
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index 85145d3d64..637578e622 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -316,7 +316,8 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
text_size,
oat_writer->GetBssSize(),
oat_writer->GetBssMethodsOffset(),
- oat_writer->GetBssRootsOffset());
+ oat_writer->GetBssRootsOffset(),
+ oat_writer->GetVdexSize());
writer->UpdateOatFileLayout(i,
elf_writer->GetLoadedSize(),
@@ -338,7 +339,7 @@ inline void CompilationHelper::Compile(CompilerDriver* driver,
writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer->GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer->GetDebugInfo());
bool success = elf_writer->End();
ASSERT_TRUE(success);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index cb1b80d590..66041bbfad 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -33,6 +33,7 @@
#include "class_table-inl.h"
#include "compiled_method-inl.h"
#include "debug/method_debug_info.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
@@ -57,6 +58,7 @@
#include "mirror/object-inl.h"
#include "oat_quick_method_header.h"
#include "os.h"
+#include "quicken_info.h"
#include "safe_map.h"
#include "scoped_thread_state_change-inl.h"
#include "type_lookup_table.h"
@@ -1334,7 +1336,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
bool has_code_info = method_header->IsOptimized();
// Record debug information for this function if we are doing that.
debug::MethodDebugInfo& info = writer_->method_info_[debug_info_idx];
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = method_ref.dex_file;
info.class_def_index = class_def_index;
info.dex_method_index = method_ref.index;
@@ -2418,7 +2420,7 @@ size_t OatWriter::InitOatCode(size_t offset) {
(field) = compiler_driver_->Create ## fn_name(); \
if (generate_debug_info) { \
debug::MethodDebugInfo info = {}; \
- info.trampoline_name = #fn_name; \
+ info.custom_name = #fn_name; \
info.isa = instruction_set; \
info.is_code_address_text_relative = true; \
/* Use the code offset rather than the `adjusted_offset`. */ \
@@ -2617,42 +2619,54 @@ bool OatWriter::WriteRodata(OutputStream* out) {
return true;
}
-class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
+class OatWriter::WriteQuickeningInfoMethodVisitor {
public:
- WriteQuickeningInfoMethodVisitor(OatWriter* writer,
- OutputStream* out,
- uint32_t offset,
- SafeMap<const uint8_t*, uint32_t>* offset_map)
- : DexMethodVisitor(writer, offset),
- out_(out),
- written_bytes_(0u),
- offset_map_(offset_map) {}
+ WriteQuickeningInfoMethodVisitor(OatWriter* writer, OutputStream* out)
+ : writer_(writer),
+ out_(out) {}
- bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it)
- OVERRIDE {
- uint32_t method_idx = it.GetMemberIndex();
- CompiledMethod* compiled_method =
- writer_->compiler_driver_->GetCompiledMethod(MethodReference(dex_file_, method_idx));
+ bool VisitDexMethods(const std::vector<const DexFile*>& dex_files) {
+ std::vector<uint8_t> empty_quicken_info;
+ {
+ // Since we need to be able to access by dex method index, put a one byte empty quicken info
+ // for any method that isn't quickened.
+ QuickenInfoTable::Builder empty_info(&empty_quicken_info, /*num_elements*/ 0u);
+ CHECK(!empty_quicken_info.empty());
+ }
+ for (const DexFile* dex_file : dex_files) {
+ std::vector<uint32_t>* const offsets =
+ &quicken_info_offset_indices_.Put(dex_file, std::vector<uint32_t>())->second;
+
+ // Every method needs an index in the table.
+ for (uint32_t method_idx = 0; method_idx < dex_file->NumMethodIds(); ++method_idx) {
+ ArrayRef<const uint8_t> map(empty_quicken_info);
+
+ // Use the existing quicken info if it exists.
+ MethodReference method_ref(dex_file, method_idx);
+ CompiledMethod* compiled_method = writer_->compiler_driver_->GetCompiledMethod(method_ref);
+ if (compiled_method != nullptr && HasQuickeningInfo(compiled_method)) {
+ map = compiled_method->GetVmapTable();
+ }
- if (HasQuickeningInfo(compiled_method)) {
- ArrayRef<const uint8_t> map = compiled_method->GetVmapTable();
- // Deduplication is already done on a pointer basis by the compiler driver,
- // so we can simply compare the pointers to find out if things are duplicated.
- if (offset_map_->find(map.data()) == offset_map_->end()) {
- uint32_t length = map.size() * sizeof(map.front());
- offset_map_->Put(map.data(), written_bytes_);
- if (!out_->WriteFully(&length, sizeof(length)) ||
- !out_->WriteFully(map.data(), length)) {
- PLOG(ERROR) << "Failed to write quickening info for "
- << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to "
- << out_->GetLocation();
+ // The current approach prevents deduplication of quicken infos since each method index
+ // has one unique quicken info. Deduplication does not provide much savings for dex indices
+ // since they are rarely duplicated.
+ const uint32_t length = map.size() * sizeof(map.front());
+
+ // Record each index if required. written_bytes_ is the offset from the start of the
+ // quicken info data.
+ if (QuickenInfoOffsetTableAccessor::IsCoveredIndex(method_idx)) {
+ offsets->push_back(written_bytes_);
+ }
+
+ if (!out_->WriteFully(map.data(), length)) {
+ PLOG(ERROR) << "Failed to write quickening info for " << method_ref.PrettyMethod()
+ << " to " << out_->GetLocation();
return false;
}
- written_bytes_ += sizeof(length) + length;
- offset_ += sizeof(length) + length;
+ written_bytes_ += length;
}
}
-
return true;
}
@@ -2660,71 +2674,59 @@ class OatWriter::WriteQuickeningInfoMethodVisitor : public DexMethodVisitor {
return written_bytes_;
}
+ SafeMap<const DexFile*, std::vector<uint32_t>>& GetQuickenInfoOffsetIndicies() {
+ return quicken_info_offset_indices_;
+ }
+
+
private:
+ OatWriter* const writer_;
OutputStream* const out_;
- size_t written_bytes_;
- // Maps quickening map to its offset in the file.
- SafeMap<const uint8_t*, uint32_t>* offset_map_;
+ size_t written_bytes_ = 0u;
+ // Map of offsets for quicken info related to method indices.
+ SafeMap<const DexFile*, std::vector<uint32_t>> quicken_info_offset_indices_;
};
-class OatWriter::WriteQuickeningIndicesMethodVisitor {
+class OatWriter::WriteQuickeningInfoOffsetsMethodVisitor {
public:
- WriteQuickeningIndicesMethodVisitor(OutputStream* out,
- uint32_t quickening_info_bytes,
- const SafeMap<const uint8_t*, uint32_t>& offset_map)
+ WriteQuickeningInfoOffsetsMethodVisitor(
+ OutputStream* out,
+ uint32_t start_offset,
+ SafeMap<const DexFile*, std::vector<uint32_t>>* quicken_info_offset_indices,
+ std::vector<uint32_t>* out_table_offsets)
: out_(out),
- quickening_info_bytes_(quickening_info_bytes),
- written_bytes_(0u),
- offset_map_(offset_map) {}
+ start_offset_(start_offset),
+ quicken_info_offset_indices_(quicken_info_offset_indices),
+ out_table_offsets_(out_table_offsets) {}
- bool VisitDexMethods(const std::vector<const DexFile*>& dex_files, const CompilerDriver& driver) {
+ bool VisitDexMethods(const std::vector<const DexFile*>& dex_files) {
for (const DexFile* dex_file : dex_files) {
- const size_t class_def_count = dex_file->NumClassDefs();
- for (size_t class_def_index = 0; class_def_index != class_def_count; ++class_def_index) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) {
- continue;
- }
- for (ClassDataItemIterator class_it(*dex_file, class_data);
- class_it.HasNext();
- class_it.Next()) {
- if (!class_it.IsAtMethod() || class_it.GetMethodCodeItem() == nullptr) {
- continue;
- }
- uint32_t method_idx = class_it.GetMemberIndex();
- CompiledMethod* compiled_method =
- driver.GetCompiledMethod(MethodReference(dex_file, method_idx));
- const DexFile::CodeItem* code_item = class_it.GetMethodCodeItem();
- CodeItemDebugInfoAccessor accessor(dex_file, code_item);
- const uint32_t existing_debug_info_offset = accessor.DebugInfoOffset();
- // If the existing offset is already out of bounds (and not magic marker 0xFFFFFFFF)
- // we will pretend the method has been quickened.
- bool existing_offset_out_of_bounds =
- (existing_debug_info_offset >= dex_file->Size() &&
- existing_debug_info_offset != 0xFFFFFFFF);
- bool has_quickening_info = HasQuickeningInfo(compiled_method);
- if (has_quickening_info || existing_offset_out_of_bounds) {
- uint32_t new_debug_info_offset =
- dex_file->Size() + quickening_info_bytes_ + written_bytes_;
- // Abort if overflow.
- CHECK_GE(new_debug_info_offset, dex_file->Size());
- const_cast<DexFile::CodeItem*>(code_item)->SetDebugInfoOffset(new_debug_info_offset);
- uint32_t quickening_offset = has_quickening_info
- ? offset_map_.Get(compiled_method->GetVmapTable().data())
- : VdexFile::kNoQuickeningInfoOffset;
- if (!out_->WriteFully(&existing_debug_info_offset,
- sizeof(existing_debug_info_offset)) ||
- !out_->WriteFully(&quickening_offset, sizeof(quickening_offset))) {
- PLOG(ERROR) << "Failed to write quickening info for "
- << dex_file->PrettyMethod(method_idx) << " to "
- << out_->GetLocation();
- return false;
- }
- written_bytes_ += sizeof(existing_debug_info_offset) + sizeof(quickening_offset);
- }
- }
+ auto it = quicken_info_offset_indices_->find(dex_file);
+ DCHECK(it != quicken_info_offset_indices_->end()) << "Failed to find dex file "
+ << dex_file->GetLocation();
+ const std::vector<uint32_t>* const offsets = &it->second;
+
+ const uint32_t current_offset = start_offset_ + written_bytes_;
+ CHECK_ALIGNED_PARAM(current_offset, QuickenInfoOffsetTableAccessor::Alignment());
+
+ // Generate and write the data.
+ std::vector<uint8_t> table_data;
+ QuickenInfoOffsetTableAccessor::Builder builder(&table_data);
+ for (uint32_t offset : *offsets) {
+ builder.AddOffset(offset);
}
+
+ // Store the offset since we need to put those after the dex file. Table offsets are relative
+ // to the start of the quicken info section.
+ out_table_offsets_->push_back(current_offset);
+
+ const uint32_t length = table_data.size() * sizeof(table_data.front());
+ if (!out_->WriteFully(table_data.data(), length)) {
+ PLOG(ERROR) << "Failed to write quickening offset table for " << dex_file->GetLocation()
+ << " to " << out_->GetLocation();
+ return false;
+ }
+ written_bytes_ += length;
}
return true;
}
@@ -2735,14 +2737,16 @@ class OatWriter::WriteQuickeningIndicesMethodVisitor {
private:
OutputStream* const out_;
- const uint32_t quickening_info_bytes_;
- size_t written_bytes_;
- // Maps quickening map to its offset in the file.
- const SafeMap<const uint8_t*, uint32_t>& offset_map_;
+ const uint32_t start_offset_;
+ size_t written_bytes_ = 0u;
+ // Maps containing the offsets for the tables.
+ SafeMap<const DexFile*, std::vector<uint32_t>>* const quicken_info_offset_indices_;
+ std::vector<uint32_t>* const out_table_offsets_;
};
bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
size_t initial_offset = vdex_size_;
+ // Make sure the table is properly aligned.
size_t start_offset = RoundUp(initial_offset, 4u);
off_t actual_offset = vdex_out->Seek(start_offset, kSeekSet);
@@ -2753,36 +2757,71 @@ bool OatWriter::WriteQuickeningInfo(OutputStream* vdex_out) {
return false;
}
- if (compiler_driver_->GetCompilerOptions().IsAnyCompilationEnabled()) {
+ size_t current_offset = start_offset;
+ if (compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
std::vector<uint32_t> dex_files_indices;
- SafeMap<const uint8_t*, uint32_t> offset_map;
- WriteQuickeningInfoMethodVisitor visitor1(this, vdex_out, start_offset, &offset_map);
- if (!VisitDexMethods(&visitor1)) {
+ WriteQuickeningInfoMethodVisitor write_quicken_info_visitor(this, vdex_out);
+ if (!write_quicken_info_visitor.VisitDexMethods(*dex_files_)) {
PLOG(ERROR) << "Failed to write the vdex quickening info. File: " << vdex_out->GetLocation();
return false;
}
- if (visitor1.GetNumberOfWrittenBytes() > 0) {
- WriteQuickeningIndicesMethodVisitor visitor2(vdex_out,
- visitor1.GetNumberOfWrittenBytes(),
- offset_map);
- if (!visitor2.VisitDexMethods(*dex_files_, *compiler_driver_)) {
- PLOG(ERROR) << "Failed to write the vdex quickening info. File: "
- << vdex_out->GetLocation();
+ uint32_t quicken_info_offset = write_quicken_info_visitor.GetNumberOfWrittenBytes();
+ current_offset = current_offset + quicken_info_offset;
+ uint32_t before_offset = current_offset;
+ current_offset = RoundUp(current_offset, QuickenInfoOffsetTableAccessor::Alignment());
+ const size_t extra_bytes = current_offset - before_offset;
+ quicken_info_offset += extra_bytes;
+ actual_offset = vdex_out->Seek(current_offset, kSeekSet);
+ if (actual_offset != static_cast<off_t>(current_offset)) {
+ PLOG(ERROR) << "Failed to seek to quickening offset table section. Actual: " << actual_offset
+ << " Expected: " << current_offset
+ << " Output: " << vdex_out->GetLocation();
+ return false;
+ }
+
+ std::vector<uint32_t> table_offsets;
+ WriteQuickeningInfoOffsetsMethodVisitor table_visitor(
+ vdex_out,
+ quicken_info_offset,
+ &write_quicken_info_visitor.GetQuickenInfoOffsetIndicies(),
+ /*out*/ &table_offsets);
+ if (!table_visitor.VisitDexMethods(*dex_files_)) {
+ PLOG(ERROR) << "Failed to write the vdex quickening info. File: "
+ << vdex_out->GetLocation();
+ return false;
+ }
+
+ CHECK_EQ(table_offsets.size(), dex_files_->size());
+
+ current_offset += table_visitor.GetNumberOfWrittenBytes();
+
+ // Store the offset table offset as a preheader for each dex.
+ size_t index = 0;
+ for (const OatDexFile& oat_dex_file : oat_dex_files_) {
+ const off_t desired_offset = oat_dex_file.dex_file_offset_ -
+ sizeof(VdexFile::QuickeningTableOffsetType);
+ actual_offset = vdex_out->Seek(desired_offset, kSeekSet);
+ if (actual_offset != desired_offset) {
+ PLOG(ERROR) << "Failed to seek to before dex file for writing offset table offset: "
+ << actual_offset << " Expected: " << desired_offset
+ << " Output: " << vdex_out->GetLocation();
return false;
}
-
- if (!vdex_out->Flush()) {
- PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ uint32_t offset = table_offsets[index];
+ if (!vdex_out->WriteFully(reinterpret_cast<const uint8_t*>(&offset), sizeof(offset))) {
+ PLOG(ERROR) << "Failed to write verifier deps."
<< " File: " << vdex_out->GetLocation();
return false;
}
- size_quickening_info_ = visitor1.GetNumberOfWrittenBytes() +
- visitor2.GetNumberOfWrittenBytes();
- } else {
- // We know we did not quicken.
- size_quickening_info_ = 0;
+ ++index;
}
+ if (!vdex_out->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing quickening info."
+ << " File: " << vdex_out->GetLocation();
+ return false;
+ }
+ size_quickening_info_ = current_offset - start_offset;
} else {
// We know we did not quicken.
size_quickening_info_ = 0;
@@ -3323,9 +3362,10 @@ bool OatWriter::WriteDexFile(OutputStream* out,
return false;
}
// update_input_vdex disables compact dex and layout.
- if (!update_input_vdex && (profile_compilation_info_ != nullptr ||
- compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone)) {
- CHECK(!update_input_vdex) << "We should never update the input vdex when doing dexlayout";
+ if (profile_compilation_info_ != nullptr ||
+ compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone) {
+ CHECK(!update_input_vdex)
+ << "We should never update the input vdex when doing dexlayout or compact dex";
if (!LayoutAndWriteDexFile(out, oat_dex_file)) {
return false;
}
@@ -3357,9 +3397,15 @@ bool OatWriter::SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex
// Dex files are required to be 4 byte aligned.
size_t initial_offset = vdex_size_;
size_t start_offset = RoundUp(initial_offset, 4);
- size_t file_offset = start_offset;
size_dex_file_alignment_ += start_offset - initial_offset;
+ // Leave extra room for the quicken offset table offset.
+ start_offset += sizeof(VdexFile::QuickeningTableOffsetType);
+ // TODO: Not count the offset as part of alignment.
+ size_dex_file_alignment_ += sizeof(VdexFile::QuickeningTableOffsetType);
+
+ size_t file_offset = start_offset;
+
// Seek to the start of the dex file and flush any pending operations in the stream.
// Verify that, after flushing the stream, the file is at the same offset as the stream.
off_t actual_offset = out->Seek(file_offset, kSeekSet);
@@ -3388,10 +3434,16 @@ bool OatWriter::SeekToDexFile(OutputStream* out, File* file, OatDexFile* oat_dex
}
bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_file) {
+ // Open dex files and write them into `out`.
+ // Note that we only verify dex files which do not belong to the boot class path.
+ // This is because those have been processed by `hiddenapi` and would not pass
+ // some of the checks. No guarantees are lost, however, as `hiddenapi` verifies
+ // the dex files prior to processing.
TimingLogger::ScopedTiming split("Dex Layout", timings_);
std::string error_msg;
std::string location(oat_dex_file->GetLocation());
std::unique_ptr<const DexFile> dex_file;
+ const ArtDexFileLoader dex_file_loader;
if (oat_dex_file->source_.IsZipEntry()) {
ZipEntry* zip_entry = oat_dex_file->source_.GetZipEntry();
std::unique_ptr<MemMap> mem_map(
@@ -3400,12 +3452,12 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
LOG(ERROR) << "Failed to extract dex file to mem map for layout: " << error_msg;
return false;
}
- dex_file = DexFileLoader::Open(location,
- zip_entry->GetCrc32(),
- std::move(mem_map),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_msg);
+ dex_file = dex_file_loader.Open(location,
+ zip_entry->GetCrc32(),
+ std::move(mem_map),
+ /* verify */ !compiling_boot_image_,
+ /* verify_checksum */ true,
+ &error_msg);
} else if (oat_dex_file->source_.IsRawFile()) {
File* raw_file = oat_dex_file->source_.GetRawFile();
int dup_fd = dup(raw_file->Fd());
@@ -3413,8 +3465,11 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
PLOG(ERROR) << "Failed to dup dex file descriptor (" << raw_file->Fd() << ") at " << location;
return false;
}
- dex_file = DexFileLoader::OpenDex(
- dup_fd, location, /* verify */ true, /* verify_checksum */ true, &error_msg);
+ dex_file = dex_file_loader.OpenDex(dup_fd, location,
+ /* verify */ !compiling_boot_image_,
+ /* verify_checksum */ true,
+ /* mmap_shared */ false,
+ &error_msg);
} else {
// The source data is a vdex file.
CHECK(oat_dex_file->source_.IsRawData())
@@ -3426,14 +3481,14 @@ bool OatWriter::LayoutAndWriteDexFile(OutputStream* out, OatDexFile* oat_dex_fil
DCHECK(ValidateDexFileHeader(raw_dex_file, oat_dex_file->GetLocation()));
const UnalignedDexFileHeader* header = AsUnalignedDexFileHeader(raw_dex_file);
// Since the source may have had its layout changed, or may be quickened, don't verify it.
- dex_file = DexFileLoader::Open(raw_dex_file,
- header->file_size_,
- location,
- oat_dex_file->dex_file_location_checksum_,
- nullptr,
- /* verify */ false,
- /* verify_checksum */ false,
- &error_msg);
+ dex_file = dex_file_loader.Open(raw_dex_file,
+ header->file_size_,
+ location,
+ oat_dex_file->dex_file_location_checksum_,
+ nullptr,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ &error_msg);
}
if (dex_file == nullptr) {
LOG(ERROR) << "Failed to open dex file for layout: " << error_msg;
@@ -3653,6 +3708,7 @@ bool OatWriter::OpenDexFiles(
<< " error: " << error_msg;
return false;
}
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
for (OatDexFile& oat_dex_file : oat_dex_files_) {
const uint8_t* raw_dex_file =
@@ -3674,14 +3730,14 @@ bool OatWriter::OpenDexFiles(
}
// Now, open the dex file.
- dex_files.emplace_back(DexFileLoader::Open(raw_dex_file,
- oat_dex_file.dex_file_size_,
- oat_dex_file.GetLocation(),
- oat_dex_file.dex_file_location_checksum_,
- /* oat_dex_file */ nullptr,
- verify,
- verify,
- &error_msg));
+ dex_files.emplace_back(dex_file_loader.Open(raw_dex_file,
+ oat_dex_file.dex_file_size_,
+ oat_dex_file.GetLocation(),
+ oat_dex_file.dex_file_location_checksum_,
+ /* oat_dex_file */ nullptr,
+ verify,
+ verify,
+ &error_msg));
if (dex_files.back() == nullptr) {
LOG(ERROR) << "Failed to open dex file from oat file. File: " << oat_dex_file.GetLocation()
<< " Error: " << error_msg;
@@ -3689,7 +3745,7 @@ bool OatWriter::OpenDexFiles(
}
// Set the class_offsets size now that we have easy access to the DexFile and
- // it has been verified in DexFileLoader::Open.
+ // it has been verified in dex_file_loader.Open.
oat_dex_file.class_offsets_.resize(dex_files.back()->GetHeader().class_defs_size_);
}
@@ -4160,5 +4216,19 @@ const uint8_t* OatWriter::LookupBootImageClassTableSlot(const DexFile& dex_file,
UNREACHABLE();
}
+debug::DebugInfo OatWriter::GetDebugInfo() const {
+ debug::DebugInfo debug_info{};
+ debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_info_);
+ if (dex_files_ != nullptr) {
+ DCHECK_EQ(dex_files_->size(), oat_dex_files_.size());
+ for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
+ const DexFile* dex_file = (*dex_files_)[i];
+ const OatDexFile& oat_dex_file = oat_dex_files_[i];
+ debug_info.dex_files.emplace(oat_dex_file.dex_file_offset_, dex_file);
+ }
+ }
+ return debug_info;
+}
+
} // namespace linker
} // namespace art
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index ba29e3b3a2..ac96bb8d56 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -25,6 +25,7 @@
#include "base/array_ref.h"
#include "base/dchecked_vector.h"
#include "dex/compact_dex_level.h"
+#include "debug/debug_info.h"
#include "linker/relative_patcher.h" // For RelativePatcherTargetProvider.
#include "mem_map.h"
#include "method_reference.h"
@@ -230,15 +231,17 @@ class OatWriter {
return bss_roots_offset_;
}
+ size_t GetVdexSize() const {
+ return vdex_size_;
+ }
+
size_t GetOatDataOffset() const {
return oat_data_offset_;
}
~OatWriter();
- ArrayRef<const debug::MethodDebugInfo> GetMethodDebugInfo() const {
- return ArrayRef<const debug::MethodDebugInfo>(method_info_);
- }
+ debug::DebugInfo GetDebugInfo() const;
const CompilerDriver* GetCompilerDriver() const {
return compiler_driver_;
@@ -271,7 +274,7 @@ class OatWriter {
class WriteMapMethodVisitor;
class WriteMethodInfoVisitor;
class WriteQuickeningInfoMethodVisitor;
- class WriteQuickeningIndicesMethodVisitor;
+ class WriteQuickeningInfoOffsetsMethodVisitor;
// Visit all the methods in all the compiled dex files in their definition order
// with a given DexMethodVisitor.
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 488806092b..d5a87837f6 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -219,7 +219,8 @@ class OatTest : public CommonCompilerTest {
text_size,
oat_writer.GetBssSize(),
oat_writer.GetBssMethodsOffset(),
- oat_writer.GetBssRootsOffset());
+ oat_writer.GetBssRootsOffset(),
+ oat_writer.GetVdexSize());
std::unique_ptr<BufferedOutputStream> vdex_out =
std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
@@ -249,7 +250,7 @@ class OatTest : public CommonCompilerTest {
}
elf_writer->WriteDynamicSection();
- elf_writer->WriteDebugInfo(oat_writer.GetMethodDebugInfo());
+ elf_writer->WriteDebugInfo(oat_writer.GetDebugInfo());
if (!elf_writer->End()) {
return false;
@@ -484,7 +485,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
EXPECT_EQ(76U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
- EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+ EXPECT_EQ(162 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
@@ -658,7 +659,11 @@ void OatTest::TestDexFileInput(bool verify, bool low_4gb, bool use_profile) {
ASSERT_EQ(dex_file2_data->GetLocation(), opened_dex_file2->GetLocation());
const VdexFile::Header &vdex_header = opened_oat_file->GetVdexFile()->GetHeader();
- ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
+ if (!compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
+ // If quickening is enabled we will always write the table since there is no special logic that
+ // checks for all methods not being quickened (not worth the complexity).
+ ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
+ }
int64_t actual_vdex_size = vdex_file.GetFile()->GetLength();
ASSERT_GE(actual_vdex_size, 0);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 8a06f44628..1518e1d205 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -44,6 +44,7 @@
#include "android-base/stringprintf.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-no_art-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
@@ -736,7 +737,7 @@ static void dumpInterface(const DexFile* pDexFile, const DexFile::TypeItem& pTyp
* Dumps the catches table associated with the code.
*/
static void dumpCatches(const DexFile* pDexFile, const DexFile::CodeItem* pCode) {
- CodeItemDataAccessor accessor(pDexFile, pCode);
+ CodeItemDataAccessor accessor(*pDexFile, pCode);
const u4 triesSize = accessor.TriesSize();
// No catch table.
@@ -951,7 +952,7 @@ static void dumpInstruction(const DexFile* pDexFile,
fprintf(gOutFile, "%06x:", codeOffset + 0x10 + insnIdx * 2);
// Dump (part of) raw bytes.
- CodeItemInstructionAccessor accessor(pDexFile, pCode);
+ CodeItemInstructionAccessor accessor(*pDexFile, pCode);
for (u4 i = 0; i < 8; i++) {
if (i < insnWidth) {
if (i == 7) {
@@ -1169,7 +1170,7 @@ static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
codeOffset, codeOffset, dot.get(), name, signature.ToString().c_str());
// Iterate over all instructions.
- CodeItemDataAccessor accessor(pDexFile, pCode);
+ CodeItemDataAccessor accessor(*pDexFile, pCode);
for (const DexInstructionPcPair& pair : accessor) {
const Instruction* instruction = &pair.Inst();
const u4 insnWidth = instruction->SizeInCodeUnits();
@@ -1186,7 +1187,7 @@ static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
*/
static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags,
const DexFile::CodeItem* pCode, u4 codeOffset) {
- CodeItemDebugInfoAccessor accessor(pDexFile, pCode, pDexFile->GetDebugInfoOffset(pCode));
+ CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
fprintf(gOutFile, " registers : %d\n", accessor.RegistersSize());
fprintf(gOutFile, " ins : %d\n", accessor.InsSize());
@@ -1879,8 +1880,10 @@ int processFile(const char* fileName) {
// all of which are Zip archives with "classes.dex" inside.
const bool kVerifyChecksum = !gOptions.ignoreBadChecksum;
std::string error_msg;
+ // TODO: Use DexFileLoader when that is implemented.
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
+ if (!dex_file_loader.Open(
fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
index f08ea746d3..0e313572bc 100644
--- a/dexdump/dexdump_cfg.cc
+++ b/dexdump/dexdump_cfg.cc
@@ -39,7 +39,7 @@ static void dumpMethodCFGImpl(const DexFile* dex_file,
os << "digraph {\n";
os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
- CodeItemDataAccessor accessor(dex_file, code_item);
+ CodeItemDataAccessor accessor(*dex_file, code_item);
std::set<uint32_t> dex_pc_is_branch_target;
{
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index a02f75ad00..23ad5fd17e 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -34,6 +34,12 @@ art_cc_library {
name: "libart-dexlayout",
defaults: ["libart-dexlayout-defaults"],
shared_libs: ["libart"],
+
+ pgo: {
+ instrumentation: true,
+ profile_file: "art/dex2oat.profdata",
+ benchmarks: ["dex2oat"],
+ }
}
art_cc_library {
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 1c5b16d84b..dd1eee7c59 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -16,10 +16,213 @@
#include "compact_dex_writer.h"
+#include "base/logging.h"
+#include "base/time_utils.h"
+#include "dex/compact_dex_debug_info.h"
#include "dex/compact_dex_file.h"
+#include "dexlayout.h"
namespace art {
+CompactDexWriter::CompactDexWriter(dex_ir::Header* header,
+ MemMap* mem_map,
+ DexLayout* dex_layout,
+ CompactDexLevel compact_dex_level)
+ : DexWriter(header, mem_map, dex_layout, /*compute_offsets*/ true),
+ compact_dex_level_(compact_dex_level),
+ data_dedupe_(/*bucket_count*/ 32,
+ HashedMemoryRange::HashEqual(mem_map->Begin()),
+ HashedMemoryRange::HashEqual(mem_map->Begin())) {
+ CHECK(compact_dex_level_ != CompactDexLevel::kCompactDexLevelNone);
+}
+
+uint32_t CompactDexWriter::WriteDebugInfoOffsetTable(uint32_t offset) {
+ const uint32_t start_offset = offset;
+ const dex_ir::Collections& collections = header_->GetCollections();
+ // Debug offsets for method indexes. 0 means no debug info.
+ std::vector<uint32_t> debug_info_offsets(collections.MethodIdsSize(), 0u);
+
+ static constexpr InvokeType invoke_types[] = {
+ kDirect,
+ kVirtual
+ };
+
+ for (InvokeType invoke_type : invoke_types) {
+ for (const std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ // Skip classes that are not defined in this dex file.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (auto& method : *(invoke_type == InvokeType::kDirect
+ ? class_data->DirectMethods()
+ : class_data->VirtualMethods())) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
+ const uint32_t debug_info_offset = code_item->DebugInfo()->GetOffset();
+ const uint32_t method_idx = method_id->GetIndex();
+ if (debug_info_offsets[method_idx] != 0u) {
+ CHECK_EQ(debug_info_offset, debug_info_offsets[method_idx]);
+ }
+ debug_info_offsets[method_idx] = debug_info_offset;
+ }
+ }
+ }
+ }
+
+ std::vector<uint8_t> data;
+ debug_info_base_ = 0u;
+ debug_info_offsets_table_offset_ = 0u;
+ CompactDexDebugInfoOffsetTable::Build(debug_info_offsets,
+ &data,
+ &debug_info_base_,
+ &debug_info_offsets_table_offset_);
+ // Align the table and write it out.
+ offset = RoundUp(offset, CompactDexDebugInfoOffsetTable::kAlignment);
+ debug_info_offsets_pos_ = offset;
+ offset += Write(data.data(), data.size(), offset);
+
+ // Verify that the whole table decodes as expected and measure average performance.
+ const bool kMeasureAndTestOutput = dex_layout_->GetOptions().verify_output_;
+ if (kMeasureAndTestOutput && !debug_info_offsets.empty()) {
+ uint64_t start_time = NanoTime();
+ CompactDexDebugInfoOffsetTable::Accessor accessor(mem_map_->Begin() + debug_info_offsets_pos_,
+ debug_info_base_,
+ debug_info_offsets_table_offset_);
+
+ for (size_t i = 0; i < debug_info_offsets.size(); ++i) {
+ CHECK_EQ(accessor.GetDebugInfoOffset(i), debug_info_offsets[i]);
+ }
+ uint64_t end_time = NanoTime();
+ VLOG(dex) << "Average lookup time (ns) for debug info offsets: "
+ << (end_time - start_time) / debug_info_offsets.size();
+ }
+
+ return offset - start_offset;
+}
+
+uint32_t CompactDexWriter::WriteCodeItem(dex_ir::CodeItem* code_item,
+ uint32_t offset,
+ bool reserve_only) {
+ DCHECK(code_item != nullptr);
+ DCHECK(!reserve_only) << "Not supported because of deduping.";
+ const uint32_t start_offset = offset;
+
+ // Align to minimum requirements, additional alignment requirements are handled below after we
+ // know the preheader size.
+ offset = RoundUp(offset, CompactDexFile::CodeItem::kAlignment);
+
+ CompactDexFile::CodeItem disk_code_item;
+
+ uint16_t preheader_storage[CompactDexFile::CodeItem::kMaxPreHeaderSize] = {};
+ uint16_t* preheader_end = preheader_storage + CompactDexFile::CodeItem::kMaxPreHeaderSize;
+ const uint16_t* preheader = disk_code_item.Create(
+ code_item->RegistersSize(),
+ code_item->InsSize(),
+ code_item->OutsSize(),
+ code_item->TriesSize(),
+ code_item->InsnsSize(),
+ preheader_end);
+ const size_t preheader_bytes = (preheader_end - preheader) * sizeof(preheader[0]);
+
+ static constexpr size_t kPayloadInstructionRequiredAlignment = 4;
+ const uint32_t current_code_item_start = offset + preheader_bytes;
+ if (!IsAlignedParam(current_code_item_start, kPayloadInstructionRequiredAlignment)) {
+ // If the preheader is going to make the code unaligned, consider adding 2 bytes of padding
+ // before if required.
+ for (const DexInstructionPcPair& instruction : code_item->Instructions()) {
+ const Instruction::Code opcode = instruction->Opcode();
+ // Payload instructions possibly require special alignment for their data.
+ if (opcode == Instruction::FILL_ARRAY_DATA ||
+ opcode == Instruction::PACKED_SWITCH ||
+ opcode == Instruction::SPARSE_SWITCH) {
+ offset += RoundUp(current_code_item_start, kPayloadInstructionRequiredAlignment) -
+ current_code_item_start;
+ break;
+ }
+ }
+ }
+
+ const uint32_t data_start = offset;
+
+ // Write preheader first.
+ offset += Write(reinterpret_cast<const uint8_t*>(preheader), preheader_bytes, offset);
+ // Registered offset is after the preheader.
+ ProcessOffset(&offset, code_item);
+ // Avoid using sizeof so that we don't write the fake instruction array at the end of the code
+ // item.
+ offset += Write(&disk_code_item,
+ OFFSETOF_MEMBER(CompactDexFile::CodeItem, insns_),
+ offset);
+ // Write the instructions.
+ offset += Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t), offset);
+ // Write the post instruction data.
+ offset += WriteCodeItemPostInstructionData(code_item, offset, reserve_only);
+
+ if (dex_layout_->GetOptions().dedupe_code_items_ && compute_offsets_) {
+ // After having written, try to dedupe the whole code item (excluding padding).
+ uint32_t deduped_offset = DedupeData(data_start, offset, code_item->GetOffset());
+ if (deduped_offset != kDidNotDedupe) {
+ code_item->SetOffset(deduped_offset);
+ // Undo the offset for all that we wrote since we deduped.
+ offset = start_offset;
+ }
+ }
+
+ return offset - start_offset;
+}
+
+uint32_t CompactDexWriter::DedupeData(uint32_t data_start,
+ uint32_t data_end,
+ uint32_t item_offset) {
+ HashedMemoryRange range {data_start, data_end - data_start};
+ auto existing = data_dedupe_.emplace(range, item_offset);
+ if (!existing.second) {
+ // Failed to insert, item already existed in the map.
+ return existing.first->second;
+ }
+ return kDidNotDedupe;
+}
+
+void CompactDexWriter::SortDebugInfosByMethodIndex() {
+ dex_ir::Collections& collections = header_->GetCollections();
+ static constexpr InvokeType invoke_types[] = {
+ kDirect,
+ kVirtual
+ };
+ std::map<const dex_ir::DebugInfoItem*, uint32_t> method_idx_map;
+ for (InvokeType invoke_type : invoke_types) {
+ for (std::unique_ptr<dex_ir::ClassDef>& class_def : collections.ClassDefs()) {
+ // Skip classes that are not defined in this dex file.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data == nullptr) {
+ continue;
+ }
+ for (auto& method : *(invoke_type == InvokeType::kDirect
+ ? class_data->DirectMethods()
+ : class_data->VirtualMethods())) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ dex_ir::CodeItem* code_item = method->GetCodeItem();
+ if (code_item != nullptr && code_item->DebugInfo() != nullptr) {
+ const dex_ir::DebugInfoItem* debug_item = code_item->DebugInfo();
+ method_idx_map.insert(std::make_pair(debug_item, method_id->GetIndex()));
+ }
+ }
+ }
+ }
+ std::sort(collections.DebugInfoItems().begin(),
+ collections.DebugInfoItems().end(),
+ [&](const std::unique_ptr<dex_ir::DebugInfoItem>& a,
+ const std::unique_ptr<dex_ir::DebugInfoItem>& b) {
+ auto it_a = method_idx_map.find(a.get());
+ auto it_b = method_idx_map.find(b.get());
+ uint32_t idx_a = it_a != method_idx_map.end() ? it_a->second : 0u;
+ uint32_t idx_b = it_b != method_idx_map.end() ? it_b->second : 0u;
+ return idx_a < idx_b;
+ });
+}
+
void CompactDexWriter::WriteHeader() {
CompactDexFile::Header header;
CompactDexFile::WriteMagic(&header.magic_[0]);
@@ -49,6 +252,11 @@ void CompactDexWriter::WriteHeader() {
header.class_defs_off_ = collections.ClassDefsOffset();
header.data_size_ = header_->DataSize();
header.data_off_ = header_->DataOffset();
+
+ // Compact dex specific flags.
+ header.debug_info_offsets_pos_ = debug_info_offsets_pos_;
+ header.debug_info_offsets_table_offset_ = debug_info_offsets_table_offset_;
+ header.debug_info_base_ = debug_info_base_;
header.feature_flags_ = 0u;
// In cases where apps are converted to cdex during install, maintain feature flags so that
// the verifier correctly verifies apps that aren't targetting default methods.
@@ -62,4 +270,103 @@ size_t CompactDexWriter::GetHeaderSize() const {
return sizeof(CompactDexFile::Header);
}
+void CompactDexWriter::WriteMemMap() {
+ // Starting offset is right after the header.
+ uint32_t offset = GetHeaderSize();
+
+ dex_ir::Collections& collection = header_->GetCollections();
+
+ // Based on: https://source.android.com/devices/tech/dalvik/dex-format
+ // Since the offsets may not be calculated already, the writing must be done in the correct order.
+ const uint32_t string_ids_offset = offset;
+ offset += WriteStringIds(offset, /*reserve_only*/ true);
+ offset += WriteTypeIds(offset);
+ const uint32_t proto_ids_offset = offset;
+ offset += WriteProtoIds(offset, /*reserve_only*/ true);
+ offset += WriteFieldIds(offset);
+ offset += WriteMethodIds(offset);
+ const uint32_t class_defs_offset = offset;
+ offset += WriteClassDefs(offset, /*reserve_only*/ true);
+ const uint32_t call_site_ids_offset = offset;
+ offset += WriteCallSiteIds(offset, /*reserve_only*/ true);
+ offset += WriteMethodHandles(offset);
+
+ uint32_t data_offset_ = 0u;
+ if (compute_offsets_) {
+ // Data section.
+ offset = RoundUp(offset, kDataSectionAlignment);
+ data_offset_ = offset;
+ }
+
+ // Write code item first to minimize the space required for encoded methods.
+ // For cdex, the code items don't depend on the debug info.
+ offset += WriteCodeItems(offset, /*reserve_only*/ false);
+
+ // Sort the debug infos by method index order, this reduces size by ~0.1% by reducing the size of
+ // the debug info offset table.
+ SortDebugInfosByMethodIndex();
+ offset += WriteDebugInfoItems(offset);
+
+ offset += WriteEncodedArrays(offset);
+ offset += WriteAnnotations(offset);
+ offset += WriteAnnotationSets(offset);
+ offset += WriteAnnotationSetRefs(offset);
+ offset += WriteAnnotationsDirectories(offset);
+ offset += WriteTypeLists(offset);
+ offset += WriteClassDatas(offset);
+ offset += WriteStringDatas(offset);
+
+ // Write delayed id sections that depend on data sections.
+ WriteStringIds(string_ids_offset, /*reserve_only*/ false);
+ WriteProtoIds(proto_ids_offset, /*reserve_only*/ false);
+ WriteClassDefs(class_defs_offset, /*reserve_only*/ false);
+ WriteCallSiteIds(call_site_ids_offset, /*reserve_only*/ false);
+
+ // Write the map list.
+ if (compute_offsets_) {
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeMapList));
+ collection.SetMapListOffset(offset);
+ } else {
+ offset = collection.MapListOffset();
+ }
+ offset += GenerateAndWriteMapItems(offset);
+ offset = RoundUp(offset, kDataSectionAlignment);
+
+ // Map items are included in the data section.
+ if (compute_offsets_) {
+ header_->SetDataSize(offset - data_offset_);
+ if (header_->DataSize() != 0) {
+ // Offset must be zero when the size is zero.
+ header_->SetDataOffset(data_offset_);
+ } else {
+ header_->SetDataOffset(0u);
+ }
+ }
+
+ // Write link data if it exists.
+ const std::vector<uint8_t>& link_data = collection.LinkData();
+ if (link_data.size() > 0) {
+ CHECK_EQ(header_->LinkSize(), static_cast<uint32_t>(link_data.size()));
+ if (compute_offsets_) {
+ header_->SetLinkOffset(offset);
+ }
+ offset += Write(&link_data[0], link_data.size(), header_->LinkOffset());
+ }
+
+ // Write debug info offset table last to make dex file verifier happy.
+ offset += WriteDebugInfoOffsetTable(offset);
+
+ // Write header last.
+ if (compute_offsets_) {
+ header_->SetFileSize(offset);
+ }
+ WriteHeader();
+
+ if (dex_layout_->GetOptions().update_checksum_) {
+ header_->SetChecksum(DexFile::CalculateChecksum(mem_map_->Begin(), offset));
+ // Rewrite the header with the calculated checksum.
+ WriteHeader();
+ }
+}
+
} // namespace art
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index d13333bb18..cb53caebc6 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -19,27 +19,84 @@
#ifndef ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
#define ART_DEXLAYOUT_COMPACT_DEX_WRITER_H_
+#include <unordered_map>
+
#include "dex_writer.h"
+#include "utils.h"
namespace art {
+class HashedMemoryRange {
+ public:
+ uint32_t offset_;
+ uint32_t length_;
+
+ class HashEqual {
+ public:
+ explicit HashEqual(const uint8_t* data) : data_(data) {}
+
+ // Equal function.
+ bool operator()(const HashedMemoryRange& a, const HashedMemoryRange& b) const {
+ return a.length_ == b.length_ && std::equal(data_ + a.offset_,
+ data_ + a.offset_ + a.length_,
+ data_ + b.offset_);
+ }
+
+ // Hash function.
+ size_t operator()(const HashedMemoryRange& range) const {
+ return HashBytes(data_ + range.offset_, range.length_);
+ }
+
+ private:
+ const uint8_t* data_;
+ };
+};
+
class CompactDexWriter : public DexWriter {
public:
CompactDexWriter(dex_ir::Header* header,
MemMap* mem_map,
DexLayout* dex_layout,
- CompactDexLevel compact_dex_level)
- : DexWriter(header, mem_map, dex_layout, /*compute_offsets*/ true),
- compact_dex_level_(compact_dex_level) {}
+ CompactDexLevel compact_dex_level);
protected:
+ void WriteMemMap() OVERRIDE;
+
void WriteHeader() OVERRIDE;
size_t GetHeaderSize() const OVERRIDE;
- const CompactDexLevel compact_dex_level_;
+ uint32_t WriteDebugInfoOffsetTable(uint32_t offset);
+
+ uint32_t WriteCodeItem(dex_ir::CodeItem* code_item, uint32_t offset, bool reserve_only) OVERRIDE;
+
+ void SortDebugInfosByMethodIndex();
+
+ // Deduplicate a blob of data that has been written to mem_map. The backing storage is the actual
+ // mem_map contents to reduce RAM usage.
+ // Returns the offset of the deduplicated data or 0 if kDidNotDedupe did not occur.
+ uint32_t DedupeData(uint32_t data_start, uint32_t data_end, uint32_t item_offset);
private:
+ const CompactDexLevel compact_dex_level_;
+
+ static const uint32_t kDidNotDedupe = 0;
+
+ // Position in the compact dex file for the debug info table data starts.
+ uint32_t debug_info_offsets_pos_ = 0u;
+
+ // Offset into the debug info table data where the lookup table is.
+ uint32_t debug_info_offsets_table_offset_ = 0u;
+
+ // Base offset of where debug info starts in the dex file.
+ uint32_t debug_info_base_ = 0u;
+
+ // Dedupe map.
+ std::unordered_map<HashedMemoryRange,
+ uint32_t,
+ HashedMemoryRange::HashEqual,
+ HashedMemoryRange::HashEqual> data_dedupe_;
+
DISALLOW_COPY_AND_ASSIGN(CompactDexWriter);
};
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
index 8ed3a79542..fb7dff63b7 100644
--- a/dexlayout/dex_ir.cc
+++ b/dexlayout/dex_ir.cc
@@ -565,16 +565,23 @@ ParameterAnnotation* Collections::GenerateParameterAnnotation(
return new ParameterAnnotation(method_id, set_ref_list);
}
-CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
- const DexFile::CodeItem& disk_code_item, uint32_t offset) {
- CodeItemDebugInfoAccessor accessor(&dex_file, &disk_code_item);
- const uint16_t registers_size = accessor.RegistersSize();
- const uint16_t ins_size = accessor.InsSize();
- const uint16_t outs_size = accessor.OutsSize();
- const uint32_t tries_size = accessor.TriesSize();
-
- // TODO: Calculate the size of the debug info.
+CodeItem* Collections::DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index) {
+ if (disk_code_item == nullptr) {
+ return nullptr;
+ }
+ CodeItemDebugInfoAccessor accessor(dex_file, disk_code_item, dex_method_index);
const uint32_t debug_info_offset = accessor.DebugInfoOffset();
+
+ // Create the offsets pair and dedupe based on it.
+ std::pair<uint32_t, uint32_t> offsets_pair(offset, debug_info_offset);
+ auto existing = code_items_map_.find(offsets_pair);
+ if (existing != code_items_map_.end()) {
+ return existing->second;
+ }
+
const uint8_t* debug_info_stream = dex_file.GetDebugInfoStream(debug_info_offset);
DebugInfoItem* debug_info = nullptr;
if (debug_info_stream != nullptr) {
@@ -594,7 +601,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
TryItemVector* tries = nullptr;
CatchHandlerVector* handler_list = nullptr;
- if (tries_size > 0) {
+ if (accessor.TriesSize() > 0) {
tries = new TryItemVector();
handler_list = new CatchHandlerVector();
for (const DexFile::TryItem& disk_try_item : accessor.TryItems()) {
@@ -669,11 +676,25 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file,
}
}
- uint32_t size = dex_file.GetCodeItemSize(disk_code_item);
- CodeItem* code_item = new CodeItem(
- registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list);
+ uint32_t size = dex_file.GetCodeItemSize(*disk_code_item);
+ CodeItem* code_item = new CodeItem(accessor.RegistersSize(),
+ accessor.InsSize(),
+ accessor.OutsSize(),
+ debug_info,
+ insns_size,
+ insns,
+ tries,
+ handler_list);
code_item->SetSize(size);
- AddItem(code_items_map_, code_items_, code_item, offset);
+
+ // Add the code item to the map.
+ DCHECK(!code_item->OffsetAssigned());
+ if (eagerly_assign_offsets_) {
+ code_item->SetOffset(offset);
+ }
+ code_items_map_.emplace(offsets_pair, code_item);
+ code_items_.AddItem(code_item);
+
// Add "fixup" references to types, strings, methods, and fields.
// This is temporary, as we will probably want more detailed parsing of the
// instructions here.
@@ -701,14 +722,12 @@ MethodItem* Collections::GenerateMethodItem(const DexFile& dex_file, ClassDataIt
MethodId* method_id = GetMethodId(cdii.GetMemberIndex());
uint32_t access_flags = cdii.GetRawMemberAccessFlags();
const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
- CodeItem* code_item = code_items_map_.GetExistingObject(cdii.GetMethodCodeItemOffset());
- DebugInfoItem* debug_info = nullptr;
- if (disk_code_item != nullptr) {
- if (code_item == nullptr) {
- code_item = CreateCodeItem(dex_file, *disk_code_item, cdii.GetMethodCodeItemOffset());
- }
- debug_info = code_item->DebugInfo();
- }
+ // Temporary hack to prevent incorrectly deduping code items if they have the same offset since
+ // they may have different debug info streams.
+ CodeItem* code_item = DedupeOrCreateCodeItem(dex_file,
+ disk_code_item,
+ cdii.GetMethodCodeItemOffset(),
+ cdii.GetMemberIndex());
return new MethodItem(access_flags, method_id, code_item);
}
@@ -814,16 +833,16 @@ void Collections::CreateMethodHandleItem(const DexFile& dex_file, uint32_t i) {
}
void Collections::SortVectorsByMapOrder() {
- string_datas_map_.SortVectorByMapOrder(string_datas_);
- type_lists_map_.SortVectorByMapOrder(type_lists_);
- encoded_array_items_map_.SortVectorByMapOrder(encoded_array_items_);
- annotation_items_map_.SortVectorByMapOrder(annotation_items_);
- annotation_set_items_map_.SortVectorByMapOrder(annotation_set_items_);
- annotation_set_ref_lists_map_.SortVectorByMapOrder(annotation_set_ref_lists_);
- annotations_directory_items_map_.SortVectorByMapOrder(annotations_directory_items_);
- debug_info_items_map_.SortVectorByMapOrder(debug_info_items_);
- code_items_map_.SortVectorByMapOrder(code_items_);
- class_datas_map_.SortVectorByMapOrder(class_datas_);
+ string_datas_.SortByMapOrder(string_datas_map_.Collection());
+ type_lists_.SortByMapOrder(type_lists_map_.Collection());
+ encoded_array_items_.SortByMapOrder(encoded_array_items_map_.Collection());
+ annotation_items_.SortByMapOrder(annotation_items_map_.Collection());
+ annotation_set_items_.SortByMapOrder(annotation_set_items_map_.Collection());
+ annotation_set_ref_lists_.SortByMapOrder(annotation_set_ref_lists_map_.Collection());
+ annotations_directory_items_.SortByMapOrder(annotations_directory_items_map_.Collection());
+ debug_info_items_.SortByMapOrder(debug_info_items_map_.Collection());
+ code_items_.SortByMapOrder(code_items_map_);
+ class_datas_.SortByMapOrder(class_datas_map_.Collection());
}
static uint32_t HeaderOffset(const dex_ir::Collections& collections ATTRIBUTE_UNUSED) {
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index 6797fa5dd6..3627717abe 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -133,6 +133,21 @@ template<class T> class CollectionVector : public CollectionBase<T> {
uint32_t Size() const { return collection_.size(); }
Vector& Collection() { return collection_; }
+ const Vector& Collection() const { return collection_; }
+
+ // Sort the vector by copying pointers over.
+ template <typename MapType>
+ void SortByMapOrder(const MapType& map) {
+ auto it = map.begin();
+ CHECK_EQ(map.size(), Size());
+ for (size_t i = 0; i < Size(); ++i) {
+ // There are times when the array will temporarily contain the same pointer twice, doing the
+ // release here sure there is no double free errors.
+ Collection()[i].release();
+ Collection()[i].reset(it->second);
+ ++it;
+ }
+ }
protected:
Vector collection_;
@@ -171,22 +186,10 @@ template<class T> class CollectionMap : public CollectionBase<T> {
return it != collection_.end() ? it->second : nullptr;
}
- uint32_t Size() const { return collection_.size(); }
+ // Lower case for template interop with std::map.
+ uint32_t size() const { return collection_.size(); }
std::map<uint32_t, T*>& Collection() { return collection_; }
- // Sort the vector by copying pointers over.
- void SortVectorByMapOrder(CollectionVector<T>& vector) {
- auto it = collection_.begin();
- CHECK_EQ(vector.Size(), Size());
- for (size_t i = 0; i < Size(); ++i) {
- // There are times when the array will temporarily contain the same pointer twice, doing the
- // release here sure there is no double free errors.
- vector.Collection()[i].release();
- vector.Collection()[i].reset(it->second);
- ++it;
- }
- }
-
private:
std::map<uint32_t, T*> collection_;
@@ -230,6 +233,8 @@ class Collections {
CollectionVector<CodeItem>::Vector& CodeItems() { return code_items_.Collection(); }
CollectionVector<ClassData>::Vector& ClassDatas() { return class_datas_.Collection(); }
+ const CollectionVector<ClassDef>::Vector& ClassDefs() const { return class_defs_.Collection(); }
+
void CreateStringId(const DexFile& dex_file, uint32_t i);
void CreateTypeId(const DexFile& dex_file, uint32_t i);
void CreateProtoId(const DexFile& dex_file, uint32_t i);
@@ -251,8 +256,10 @@ class Collections {
const DexFile::AnnotationSetItem* disk_annotations_item, uint32_t offset);
AnnotationsDirectoryItem* CreateAnnotationsDirectoryItem(const DexFile& dex_file,
const DexFile::AnnotationsDirectoryItem* disk_annotations_item, uint32_t offset);
- CodeItem* CreateCodeItem(
- const DexFile& dex_file, const DexFile::CodeItem& disk_code_item, uint32_t offset);
+ CodeItem* DedupeOrCreateCodeItem(const DexFile& dex_file,
+ const DexFile::CodeItem* disk_code_item,
+ uint32_t offset,
+ uint32_t dex_method_index);
ClassData* CreateClassData(const DexFile& dex_file, const uint8_t* encoded_data, uint32_t offset);
void AddAnnotationsFromMapListSection(const DexFile& dex_file,
uint32_t start_offset,
@@ -455,7 +462,10 @@ class Collections {
CollectionMap<AnnotationSetRefList> annotation_set_ref_lists_map_;
CollectionMap<AnnotationsDirectoryItem> annotations_directory_items_map_;
CollectionMap<DebugInfoItem> debug_info_items_map_;
- CollectionMap<CodeItem> code_items_map_;
+ // Code item maps need to check both the debug info offset and debug info offset, do not use
+ // CollectionMap.
+ // First offset is the code item offset, second is the debug info offset.
+ std::map<std::pair<uint32_t, uint32_t>, CodeItem*> code_items_map_;
CollectionMap<ClassData> class_datas_map_;
uint32_t map_list_offset_ = 0;
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index a18a2cfd8a..d26c9481b4 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -30,25 +30,6 @@
namespace art {
-static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
-static constexpr uint32_t kDexSectionWordAlignment = 4;
-
-static constexpr uint32_t SectionAlignment(DexFile::MapItemType type) {
- switch (type) {
- case DexFile::kDexTypeClassDataItem:
- case DexFile::kDexTypeStringDataItem:
- case DexFile::kDexTypeDebugInfoItem:
- case DexFile::kDexTypeAnnotationItem:
- case DexFile::kDexTypeEncodedArrayItem:
- return alignof(uint8_t);
-
- default:
- // All other sections are kDexAlignedSection.
- return kDexSectionWordAlignment;
- }
-}
-
-
size_t EncodeIntValue(int32_t value, uint8_t* buffer) {
size_t length = 0;
if (value >= 0) {
@@ -526,69 +507,96 @@ uint32_t DexWriter::WriteDebugInfoItems(uint32_t offset) {
return offset - start;
}
+uint32_t DexWriter::WriteCodeItemPostInstructionData(dex_ir::CodeItem* code_item,
+ uint32_t offset,
+ bool reserve_only) {
+ const uint32_t start_offset = offset;
+ if (code_item->TriesSize() != 0) {
+ // Align for the try items.
+ offset = RoundUp(offset, DexFile::TryItem::kAlignment);
+ // Write try items.
+ for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
+ DexFile::TryItem disk_try_item;
+ if (!reserve_only) {
+ disk_try_item.start_addr_ = try_item->StartAddr();
+ disk_try_item.insn_count_ = try_item->InsnCount();
+ disk_try_item.handler_off_ = try_item->GetHandlers()->GetListOffset();
+ }
+ offset += Write(&disk_try_item, sizeof(disk_try_item), offset);
+ }
+ size_t max_offset = offset;
+ // Leave offset pointing to the end of the try items.
+ UNUSED(WriteUleb128(code_item->Handlers()->size(), offset));
+ for (std::unique_ptr<const dex_ir::CatchHandler>& handlers : *code_item->Handlers()) {
+ size_t list_offset = offset + handlers->GetListOffset();
+ uint32_t size = handlers->HasCatchAll() ? (handlers->GetHandlers()->size() - 1) * -1 :
+ handlers->GetHandlers()->size();
+ list_offset += WriteSleb128(size, list_offset);
+ for (std::unique_ptr<const dex_ir::TypeAddrPair>& handler : *handlers->GetHandlers()) {
+ if (handler->GetTypeId() != nullptr) {
+ list_offset += WriteUleb128(handler->GetTypeId()->GetIndex(), list_offset);
+ }
+ list_offset += WriteUleb128(handler->GetAddress(), list_offset);
+ }
+ // TODO: Clean this up to write the handlers in address order.
+ max_offset = std::max(max_offset, list_offset);
+ }
+ offset = max_offset;
+ }
+
+ return offset - start_offset;
+}
+
+uint32_t DexWriter::WriteCodeItem(dex_ir::CodeItem* code_item,
+ uint32_t offset,
+ bool reserve_only) {
+ DCHECK(code_item != nullptr);
+ const uint32_t start_offset = offset;
+ offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCodeItem));
+ ProcessOffset(&offset, code_item);
+
+ StandardDexFile::CodeItem disk_code_item;
+ if (!reserve_only) {
+ disk_code_item.registers_size_ = code_item->RegistersSize();
+ disk_code_item.ins_size_ = code_item->InsSize();
+ disk_code_item.outs_size_ = code_item->OutsSize();
+ disk_code_item.tries_size_ = code_item->TriesSize();
+ disk_code_item.debug_info_off_ = code_item->DebugInfo() == nullptr
+ ? 0
+ : code_item->DebugInfo()->GetOffset();
+ disk_code_item.insns_size_in_code_units_ = code_item->InsnsSize();
+ }
+ // Avoid using sizeof so that we don't write the fake instruction array at the end of the code
+ // item.
+ offset += Write(&disk_code_item,
+ OFFSETOF_MEMBER(StandardDexFile::CodeItem, insns_),
+ offset);
+ // Write the instructions.
+ offset += Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t), offset);
+ // Write the post instruction data.
+ offset += WriteCodeItemPostInstructionData(code_item, offset, reserve_only);
+ return offset - start_offset;
+}
+
uint32_t DexWriter::WriteCodeItems(uint32_t offset, bool reserve_only) {
DexLayoutSection* code_section = nullptr;
if (!reserve_only && dex_layout_ != nullptr) {
code_section = &dex_layout_->GetSections().sections_[static_cast<size_t>(
DexLayoutSections::SectionType::kSectionTypeCode)];
}
- uint16_t uint16_buffer[4] = {};
- uint32_t uint32_buffer[2] = {};
uint32_t start = offset;
for (auto& code_item : header_->GetCollections().CodeItems()) {
- offset = RoundUp(offset, SectionAlignment(DexFile::kDexTypeCodeItem));
- ProcessOffset(&offset, code_item.get());
- if (!reserve_only) {
- uint16_buffer[0] = code_item->RegistersSize();
- uint16_buffer[1] = code_item->InsSize();
- uint16_buffer[2] = code_item->OutsSize();
- uint16_buffer[3] = code_item->TriesSize();
- uint32_buffer[0] = code_item->DebugInfo() == nullptr ? 0 :
- code_item->DebugInfo()->GetOffset();
- uint32_buffer[1] = code_item->InsnsSize();
- // Only add the section hotness info once.
- if (code_section != nullptr) {
- auto it = dex_layout_->LayoutHotnessInfo().code_item_layout_.find(code_item.get());
- if (it != dex_layout_->LayoutHotnessInfo().code_item_layout_.end()) {
- code_section->parts_[static_cast<size_t>(it->second)].CombineSection(
- code_item->GetOffset(), code_item->GetOffset() + code_item->GetSize());
- }
- }
- }
- offset += Write(uint16_buffer, 4 * sizeof(uint16_t), offset);
- offset += Write(uint32_buffer, 2 * sizeof(uint32_t), offset);
- offset += Write(code_item->Insns(), code_item->InsnsSize() * sizeof(uint16_t), offset);
- if (code_item->TriesSize() != 0) {
- if (code_item->InsnsSize() % 2 != 0) {
- uint16_t padding[1] = { 0 };
- offset += Write(padding, sizeof(uint16_t), offset);
- }
- uint32_t start_addr[1];
- uint16_t insn_count_and_handler_off[2];
- for (std::unique_ptr<const dex_ir::TryItem>& try_item : *code_item->Tries()) {
- start_addr[0] = try_item->StartAddr();
- insn_count_and_handler_off[0] = try_item->InsnCount();
- insn_count_and_handler_off[1] = try_item->GetHandlers()->GetListOffset();
- offset += Write(start_addr, sizeof(uint32_t), offset);
- offset += Write(insn_count_and_handler_off, 2 * sizeof(uint16_t), offset);
- }
- // Leave offset pointing to the end of the try items.
- UNUSED(WriteUleb128(code_item->Handlers()->size(), offset));
- for (std::unique_ptr<const dex_ir::CatchHandler>& handlers : *code_item->Handlers()) {
- size_t list_offset = offset + handlers->GetListOffset();
- uint32_t size = handlers->HasCatchAll() ? (handlers->GetHandlers()->size() - 1) * -1 :
- handlers->GetHandlers()->size();
- list_offset += WriteSleb128(size, list_offset);
- for (std::unique_ptr<const dex_ir::TypeAddrPair>& handler : *handlers->GetHandlers()) {
- if (handler->GetTypeId() != nullptr) {
- list_offset += WriteUleb128(handler->GetTypeId()->GetIndex(), list_offset);
- }
- list_offset += WriteUleb128(handler->GetAddress(), list_offset);
- }
+ const size_t code_item_size = WriteCodeItem(code_item.get(), offset, reserve_only);
+ // Only add the section hotness info once.
+ if (!reserve_only && code_section != nullptr) {
+ auto it = dex_layout_->LayoutHotnessInfo().code_item_layout_.find(code_item.get());
+ if (it != dex_layout_->LayoutHotnessInfo().code_item_layout_.end()) {
+ code_section->parts_[static_cast<size_t>(it->second)].CombineSection(
+ offset,
+ offset + code_item_size);
}
}
- // TODO: Clean this up to properly calculate the size instead of assuming it doesn't change.
- offset = code_item->GetOffset() + code_item->GetSize();
+ offset += code_item_size;
}
if (compute_offsets_ && start != offset) {
@@ -774,9 +782,16 @@ uint32_t DexWriter::GenerateAndWriteMapItems(uint32_t offset) {
void DexWriter::WriteHeader() {
StandardDexFile::Header header;
- static constexpr size_t kMagicAndVersionLen =
- StandardDexFile::kDexMagicSize + StandardDexFile::kDexVersionLen;
- std::copy_n(header_->Magic(), kMagicAndVersionLen, header.magic_);
+ if (CompactDexFile::IsMagicValid(header_->Magic())) {
+ StandardDexFile::WriteMagic(header.magic_);
+ // TODO: Should we write older versions based on the feature flags?
+ StandardDexFile::WriteCurrentVersion(header.magic_);
+ } else {
+ // Standard dex -> standard dex, just reuse the same header.
+ static constexpr size_t kMagicAndVersionLen =
+ StandardDexFile::kDexMagicSize + StandardDexFile::kDexVersionLen;
+ std::copy_n(header_->Magic(), kMagicAndVersionLen, header.magic_);
+ }
header.checksum_ = header_->Checksum();
std::copy_n(header_->Signature(), DexFile::kSha1DigestSize, header.signature_);
header.file_size_ = header_->FileSize();
diff --git a/dexlayout/dex_writer.h b/dexlayout/dex_writer.h
index 92a002edc7..892ea7414b 100644
--- a/dexlayout/dex_writer.h
+++ b/dexlayout/dex_writer.h
@@ -23,6 +23,7 @@
#include "base/unix_file/fd_file.h"
#include "dex/compact_dex_level.h"
+#include "dex/dex_file.h"
#include "dex_ir.h"
#include "mem_map.h"
#include "os.h"
@@ -59,6 +60,24 @@ class MapItemQueue : public
class DexWriter {
public:
+ static constexpr uint32_t kDataSectionAlignment = sizeof(uint32_t) * 2;
+ static constexpr uint32_t kDexSectionWordAlignment = 4;
+
+ static inline constexpr uint32_t SectionAlignment(DexFile::MapItemType type) {
+ switch (type) {
+ case DexFile::kDexTypeClassDataItem:
+ case DexFile::kDexTypeStringDataItem:
+ case DexFile::kDexTypeDebugInfoItem:
+ case DexFile::kDexTypeAnnotationItem:
+ case DexFile::kDexTypeEncodedArrayItem:
+ return alignof(uint8_t);
+
+ default:
+ // All other sections are kDexAlignedSection.
+ return DexWriter::kDexSectionWordAlignment;
+ }
+ }
+
DexWriter(dex_ir::Header* header,
MemMap* mem_map,
DexLayout* dex_layout,
@@ -77,7 +96,7 @@ class DexWriter {
virtual ~DexWriter() {}
protected:
- void WriteMemMap();
+ virtual void WriteMemMap();
size_t Write(const void* buffer, size_t length, size_t offset) WARN_UNUSED;
size_t WriteSleb128(uint32_t value, size_t offset) WARN_UNUSED;
@@ -118,6 +137,11 @@ class DexWriter {
uint32_t WriteMapItems(uint32_t offset, MapItemQueue* queue);
uint32_t GenerateAndWriteMapItems(uint32_t offset);
+ virtual uint32_t WriteCodeItemPostInstructionData(dex_ir::CodeItem* item,
+ uint32_t offset,
+ bool reserve_only);
+ virtual uint32_t WriteCodeItem(dex_ir::CodeItem* item, uint32_t offset, bool reserve_only);
+
// Process an offset, if compute_offset is set, write into the dex ir item, otherwise read the
// existing offset and use that for writing.
void ProcessOffset(uint32_t* const offset, dex_ir::Item* item) {
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 47a3e943a5..3d3b121190 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -34,6 +34,7 @@
#include "android-base/stringprintf.h"
#include "base/logging.h" // For VLOG_IS_ON.
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_layout.h"
#include "dex/dex_file_loader.h"
@@ -1819,8 +1820,13 @@ void DexLayout::OutputDexFile(const DexFile* dex_file, bool compute_offsets) {
// Since we allow dex growth, we need to size the map larger than the original input to be safe.
// Reserve an extra 10% to add some buffer room. Note that this is probably more than
// necessary.
- constexpr size_t kReserveFraction = 10;
- const size_t max_size = header_->FileSize() + header_->FileSize() / kReserveFraction;
+ static constexpr size_t kReserveFraction = 10;
+ // Add an extra constant amount since the compact dex header and extra tables may cause more
+ // expansion than fits in the reserve fraction for small dex files.
+ // TODO: Move to using a resizable buffer like a vector.
+ static constexpr size_t kExtraReserve = 128 * KB;
+ const size_t max_size = header_->FileSize() + kExtraReserve +
+ header_->FileSize() / kReserveFraction;
if (!options_.output_to_memmap_) {
std::string output_location(options_.output_dex_directory_);
size_t last_slash = dex_file_location.rfind('/');
@@ -1912,7 +1918,8 @@ void DexLayout::ProcessDexFile(const char* file_name,
if (do_layout) {
LayoutOutputFile(dex_file);
}
- OutputDexFile(dex_file, do_layout);
+ // If we didn't set the offsets eagerly, we definitely need to compute them here.
+ OutputDexFile(dex_file, do_layout || !eagerly_assign_offsets);
// Clear header before verifying to reduce peak RAM usage.
const size_t file_size = header_->FileSize();
@@ -1922,14 +1929,18 @@ void DexLayout::ProcessDexFile(const char* file_name,
if (options_.verify_output_) {
std::string error_msg;
std::string location = "memory mapped file for " + std::string(file_name);
- std::unique_ptr<const DexFile> output_dex_file(DexFileLoader::Open(mem_map_->Begin(),
- file_size,
- location,
- /* checksum */ 0,
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg));
+ // Dex file verifier cannot handle compact dex.
+ bool verify = options_.compact_dex_level_ == CompactDexLevel::kCompactDexLevelNone;
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> output_dex_file(
+ dex_file_loader.Open(mem_map_->Begin(),
+ file_size,
+ location,
+ /* checksum */ 0,
+ /*oat_dex_file*/ nullptr,
+ verify,
+ /*verify_checksum*/ false,
+ &error_msg));
CHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg;
// Do IR-level comparison between input and output. This check ignores potential differences
@@ -1960,8 +1971,9 @@ int DexLayout::ProcessFile(const char* file_name) {
// all of which are Zip archives with "classes.dex" inside.
const bool verify_checksum = !options_.ignore_bad_checksum_;
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
+ if (!dex_file_loader.Open(
file_name, file_name, /* verify */ true, verify_checksum, &error_msg, &dex_files)) {
// Display returned error message to user. Note that this error behavior
// differs from the error messages shown by the original Dalvik dexdump.
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index 25afb773bd..cb0eabc7db 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -65,6 +65,8 @@ class Options {
bool visualize_pattern_ = false;
bool update_checksum_ = false;
CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+ // Disabled until dex2oat properly handles quickening of deduped code items.
+ bool dedupe_code_items_ = false;
OutputFormat output_format_ = kOutputPlain;
const char* output_dex_directory_ = nullptr;
const char* output_file_name_ = nullptr;
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 5bb7196531..83fb99a734 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -62,6 +62,7 @@ static void Usage(void) {
fprintf(stderr, " -t : display file section sizes\n");
fprintf(stderr, " -v : verify output file is canonical to input (IR level comparison)\n");
fprintf(stderr, " -w : output dex directory \n");
+ fprintf(stderr, " -x : compact dex generation level, either 'none' or 'fast'\n");
}
/*
@@ -79,7 +80,7 @@ int DexlayoutDriver(int argc, char** argv) {
// Parse all arguments.
while (1) {
- const int ic = getopt(argc, argv, "abcdefghil:mo:p:stvw:");
+ const int ic = getopt(argc, argv, "abcdefghil:mo:p:stvw:x:");
if (ic < 0) {
break; // done
}
@@ -141,6 +142,15 @@ int DexlayoutDriver(int argc, char** argv) {
case 'w': // output dex files directory
options.output_dex_directory_ = optarg;
break;
+ case 'x': // compact dex level
+ if (strcmp(optarg, "none") == 0) {
+ options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
+ } else if (strcmp(optarg, "fast") == 0) {
+ options.compact_dex_level_ = CompactDexLevel::kCompactDexLevelFast;
+ } else {
+ want_usage = true;
+ }
+ break;
default:
want_usage = true;
break;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index b8cff6dc59..3a7f9eeda6 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -23,6 +23,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -318,12 +319,13 @@ class DexLayoutTest : public CommonRuntimeTest {
bool MutateDexFile(File* output_dex, const std::string& input_jar, const Mutator& mutator) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- CHECK(DexFileLoader::Open(input_jar.c_str(),
- input_jar.c_str(),
- /*verify*/ true,
- /*verify_checksum*/ true,
- &error_msg,
- &dex_files)) << error_msg;
+ const ArtDexFileLoader dex_file_loader;
+ CHECK(dex_file_loader.Open(input_jar.c_str(),
+ input_jar.c_str(),
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error_msg,
+ &dex_files)) << error_msg;
EXPECT_EQ(dex_files.size(), 1u) << "Only one input dex is supported";
for (const std::unique_ptr<const DexFile>& dex : dex_files) {
CHECK(dex->EnableWrite()) << "Failed to enable write";
@@ -344,12 +346,13 @@ class DexLayoutTest : public CommonRuntimeTest {
const std::string& dex_location) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
- bool result = DexFileLoader::Open(input_dex.c_str(),
- input_dex,
- /*verify*/ true,
- /*verify_checksum*/ false,
- &error_msg,
- &dex_files);
+ const ArtDexFileLoader dex_file_loader;
+ bool result = dex_file_loader.Open(input_dex.c_str(),
+ input_dex,
+ /*verify*/ true,
+ /*verify_checksum*/ false,
+ &error_msg,
+ &dex_files);
ASSERT_TRUE(result) << error_msg;
ASSERT_GE(dex_files.size(), 1u);
@@ -699,7 +702,7 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
while (it.HasNextMethod()) {
DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
if (item != nullptr) {
- CodeItemInstructionAccessor instructions(dex, item);
+ CodeItemInstructionAccessor instructions(*dex, item);
if (instructions.begin() != instructions.end()) {
DexInstructionIterator last_instruction = instructions.begin();
for (auto dex_it = instructions.begin(); dex_it != instructions.end(); ++dex_it) {
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 348f501ef5..1ced8ca771 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
#include "base/logging.h" // For InitLogging.
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-no_art-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -100,7 +101,7 @@ static void dumpMethod(const DexFile* pDexFile,
if (pCode == nullptr || codeOffset == 0) {
return;
}
- CodeItemDebugInfoAccessor accessor(pDexFile, pCode, pDexFile->GetDebugInfoOffset(pCode));
+ CodeItemDebugInfoAccessor accessor(*pDexFile, pCode, idx);
// Method information.
const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
@@ -174,7 +175,8 @@ static int processFile(const char* fileName) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(
fileName, fileName, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
fputs(error_msg.c_str(), stderr);
fputc('\n', stderr);
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 6a99c5ab2f..fcbf2f1c0a 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -38,6 +38,7 @@
#include "class_linker-inl.h"
#include "class_linker.h"
#include "compiled_method.h"
+#include "debug/debug_info.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
#include "dex/code_item_accessors-inl.h"
@@ -171,14 +172,15 @@ class OatSymbolizer FINAL {
text_size,
oat_file_->BssSize(),
oat_file_->BssMethodsOffset(),
- oat_file_->BssRootsOffset());
+ oat_file_->BssRootsOffset(),
+ oat_file_->VdexSize());
builder_->WriteDynamicSection();
const OatHeader& oat_header = oat_file_->GetOatHeader();
#define DO_TRAMPOLINE(fn_name) \
if (oat_header.Get ## fn_name ## Offset() != 0) { \
debug::MethodDebugInfo info = {}; \
- info.trampoline_name = #fn_name; \
+ info.custom_name = #fn_name; \
info.isa = oat_header.GetInstructionSet(); \
info.is_code_address_text_relative = true; \
size_t code_offset = oat_header.Get ## fn_name ## Offset(); \
@@ -201,8 +203,13 @@ class OatSymbolizer FINAL {
// TODO: Try to symbolize link-time thunks?
// This would require disassembling all methods to find branches outside the method code.
+ // TODO: Add symbols for dex bytecode in the .dex section.
+
+ debug::DebugInfo debug_info{};
+ debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_debug_infos_);
+
debug::WriteDebugInfo(builder_.get(),
- ArrayRef<const debug::MethodDebugInfo>(method_debug_infos_),
+ debug_info,
dwarf::DW_DEBUG_FRAME_FORMAT,
true /* write_oat_patches */);
@@ -301,7 +308,7 @@ class OatSymbolizer FINAL {
const void* code_address = EntryPointToCodePointer(reinterpret_cast<void*>(entry_point));
debug::MethodDebugInfo info = {};
- DCHECK(info.trampoline_name.empty());
+ DCHECK(info.custom_name.empty());
info.dex_file = &dex_file;
info.class_def_index = class_def_index;
info.dex_method_index = dex_method_index;
@@ -717,7 +724,6 @@ class OatDumper {
}
vdex_file->Unquicken(MakeNonOwningPointerVector(tmp_dex_files),
- vdex_file->GetQuickeningInfo(),
/* decompile_return_instruction */ true);
*dex_files = std::move(tmp_dex_files);
@@ -993,7 +999,7 @@ class OatDumper {
if (code_item == nullptr) {
return;
}
- CodeItemInstructionAccessor instructions(&dex_file, code_item);
+ CodeItemInstructionAccessor instructions(dex_file, code_item);
// If we inserted a new dex code item pointer, add to total code bytes.
const uint16_t* code_ptr = instructions.Insns();
@@ -1261,7 +1267,7 @@ class OatDumper {
bool* addr_found) {
bool success = true;
- CodeItemDataAccessor code_item_accessor(&dex_file, code_item);
+ CodeItemDataAccessor code_item_accessor(dex_file, code_item);
// TODO: Support regex
std::string method_name = dex_file.GetMethodName(dex_file.GetMethodId(dex_method_idx));
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 9ba7068176..0283999d54 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -68,6 +68,7 @@ art_cc_library {
shared_libs: [
"libart",
"libart-compiler",
+ "libart-dexlayout",
],
}
@@ -80,5 +81,6 @@ art_cc_library {
shared_libs: [
"libartd",
"libartd-compiler",
+ "libartd-dexlayout",
],
}
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index aae805569f..027635bbb5 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -83,6 +83,12 @@ DeoptManager gDeoptManager;
} \
} while (false)
+// Returns whether we are able to use all jvmti features.
+static bool IsFullJvmtiAvailable() {
+ art::Runtime* runtime = art::Runtime::Current();
+ return runtime->GetInstrumentation()->IsForcedInterpretOnly() || runtime->IsJavaDebuggable();
+}
+
class JvmtiFunctions {
private:
static jvmtiError getEnvironmentError(jvmtiEnv* env) {
@@ -1092,10 +1098,64 @@ class JvmtiFunctions {
&gEventHandler);
}
+#define FOR_ALL_CAPABILITIES(FUN) \
+ FUN(can_tag_objects) \
+ FUN(can_generate_field_modification_events) \
+ FUN(can_generate_field_access_events) \
+ FUN(can_get_bytecodes) \
+ FUN(can_get_synthetic_attribute) \
+ FUN(can_get_owned_monitor_info) \
+ FUN(can_get_current_contended_monitor) \
+ FUN(can_get_monitor_info) \
+ FUN(can_pop_frame) \
+ FUN(can_redefine_classes) \
+ FUN(can_signal_thread) \
+ FUN(can_get_source_file_name) \
+ FUN(can_get_line_numbers) \
+ FUN(can_get_source_debug_extension) \
+ FUN(can_access_local_variables) \
+ FUN(can_maintain_original_method_order) \
+ FUN(can_generate_single_step_events) \
+ FUN(can_generate_exception_events) \
+ FUN(can_generate_frame_pop_events) \
+ FUN(can_generate_breakpoint_events) \
+ FUN(can_suspend) \
+ FUN(can_redefine_any_class) \
+ FUN(can_get_current_thread_cpu_time) \
+ FUN(can_get_thread_cpu_time) \
+ FUN(can_generate_method_entry_events) \
+ FUN(can_generate_method_exit_events) \
+ FUN(can_generate_all_class_hook_events) \
+ FUN(can_generate_compiled_method_load_events) \
+ FUN(can_generate_monitor_events) \
+ FUN(can_generate_vm_object_alloc_events) \
+ FUN(can_generate_native_method_bind_events) \
+ FUN(can_generate_garbage_collection_events) \
+ FUN(can_generate_object_free_events) \
+ FUN(can_force_early_return) \
+ FUN(can_get_owned_monitor_stack_depth_info) \
+ FUN(can_get_constant_pool) \
+ FUN(can_set_native_method_prefix) \
+ FUN(can_retransform_classes) \
+ FUN(can_retransform_any_class) \
+ FUN(can_generate_resource_exhaustion_heap_events) \
+ FUN(can_generate_resource_exhaustion_threads_events)
+
static jvmtiError GetPotentialCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
*capabilities_ptr = kPotentialCapabilities;
+ if (UNLIKELY(!IsFullJvmtiAvailable())) {
+#define REMOVE_NONDEBUGGABLE_UNSUPPORTED(e) \
+ do { \
+ if (kNonDebuggableUnsupportedCapabilities.e == 1) { \
+ capabilities_ptr->e = 0; \
+ } \
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(REMOVE_NONDEBUGGABLE_UNSUPPORTED);
+#undef REMOVE_NONDEBUGGABLE_UNSUPPORTED
+ }
return OK;
}
@@ -1122,49 +1182,9 @@ class JvmtiFunctions {
ret = ERR(NOT_AVAILABLE); \
} \
} \
- } while (false)
-
- ADD_CAPABILITY(can_tag_objects);
- ADD_CAPABILITY(can_generate_field_modification_events);
- ADD_CAPABILITY(can_generate_field_access_events);
- ADD_CAPABILITY(can_get_bytecodes);
- ADD_CAPABILITY(can_get_synthetic_attribute);
- ADD_CAPABILITY(can_get_owned_monitor_info);
- ADD_CAPABILITY(can_get_current_contended_monitor);
- ADD_CAPABILITY(can_get_monitor_info);
- ADD_CAPABILITY(can_pop_frame);
- ADD_CAPABILITY(can_redefine_classes);
- ADD_CAPABILITY(can_signal_thread);
- ADD_CAPABILITY(can_get_source_file_name);
- ADD_CAPABILITY(can_get_line_numbers);
- ADD_CAPABILITY(can_get_source_debug_extension);
- ADD_CAPABILITY(can_access_local_variables);
- ADD_CAPABILITY(can_maintain_original_method_order);
- ADD_CAPABILITY(can_generate_single_step_events);
- ADD_CAPABILITY(can_generate_exception_events);
- ADD_CAPABILITY(can_generate_frame_pop_events);
- ADD_CAPABILITY(can_generate_breakpoint_events);
- ADD_CAPABILITY(can_suspend);
- ADD_CAPABILITY(can_redefine_any_class);
- ADD_CAPABILITY(can_get_current_thread_cpu_time);
- ADD_CAPABILITY(can_get_thread_cpu_time);
- ADD_CAPABILITY(can_generate_method_entry_events);
- ADD_CAPABILITY(can_generate_method_exit_events);
- ADD_CAPABILITY(can_generate_all_class_hook_events);
- ADD_CAPABILITY(can_generate_compiled_method_load_events);
- ADD_CAPABILITY(can_generate_monitor_events);
- ADD_CAPABILITY(can_generate_vm_object_alloc_events);
- ADD_CAPABILITY(can_generate_native_method_bind_events);
- ADD_CAPABILITY(can_generate_garbage_collection_events);
- ADD_CAPABILITY(can_generate_object_free_events);
- ADD_CAPABILITY(can_force_early_return);
- ADD_CAPABILITY(can_get_owned_monitor_stack_depth_info);
- ADD_CAPABILITY(can_get_constant_pool);
- ADD_CAPABILITY(can_set_native_method_prefix);
- ADD_CAPABILITY(can_retransform_classes);
- ADD_CAPABILITY(can_retransform_any_class);
- ADD_CAPABILITY(can_generate_resource_exhaustion_heap_events);
- ADD_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(ADD_CAPABILITY);
#undef ADD_CAPABILITY
gEventHandler.HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
@@ -1186,49 +1206,9 @@ class JvmtiFunctions {
changed.e = 1; \
} \
} \
- } while (false)
-
- DEL_CAPABILITY(can_tag_objects);
- DEL_CAPABILITY(can_generate_field_modification_events);
- DEL_CAPABILITY(can_generate_field_access_events);
- DEL_CAPABILITY(can_get_bytecodes);
- DEL_CAPABILITY(can_get_synthetic_attribute);
- DEL_CAPABILITY(can_get_owned_monitor_info);
- DEL_CAPABILITY(can_get_current_contended_monitor);
- DEL_CAPABILITY(can_get_monitor_info);
- DEL_CAPABILITY(can_pop_frame);
- DEL_CAPABILITY(can_redefine_classes);
- DEL_CAPABILITY(can_signal_thread);
- DEL_CAPABILITY(can_get_source_file_name);
- DEL_CAPABILITY(can_get_line_numbers);
- DEL_CAPABILITY(can_get_source_debug_extension);
- DEL_CAPABILITY(can_access_local_variables);
- DEL_CAPABILITY(can_maintain_original_method_order);
- DEL_CAPABILITY(can_generate_single_step_events);
- DEL_CAPABILITY(can_generate_exception_events);
- DEL_CAPABILITY(can_generate_frame_pop_events);
- DEL_CAPABILITY(can_generate_breakpoint_events);
- DEL_CAPABILITY(can_suspend);
- DEL_CAPABILITY(can_redefine_any_class);
- DEL_CAPABILITY(can_get_current_thread_cpu_time);
- DEL_CAPABILITY(can_get_thread_cpu_time);
- DEL_CAPABILITY(can_generate_method_entry_events);
- DEL_CAPABILITY(can_generate_method_exit_events);
- DEL_CAPABILITY(can_generate_all_class_hook_events);
- DEL_CAPABILITY(can_generate_compiled_method_load_events);
- DEL_CAPABILITY(can_generate_monitor_events);
- DEL_CAPABILITY(can_generate_vm_object_alloc_events);
- DEL_CAPABILITY(can_generate_native_method_bind_events);
- DEL_CAPABILITY(can_generate_garbage_collection_events);
- DEL_CAPABILITY(can_generate_object_free_events);
- DEL_CAPABILITY(can_force_early_return);
- DEL_CAPABILITY(can_get_owned_monitor_stack_depth_info);
- DEL_CAPABILITY(can_get_constant_pool);
- DEL_CAPABILITY(can_set_native_method_prefix);
- DEL_CAPABILITY(can_retransform_classes);
- DEL_CAPABILITY(can_retransform_any_class);
- DEL_CAPABILITY(can_generate_resource_exhaustion_heap_events);
- DEL_CAPABILITY(can_generate_resource_exhaustion_threads_events);
+ } while (false);
+
+ FOR_ALL_CAPABILITIES(DEL_CAPABILITY);
#undef DEL_CAPABILITY
gEventHandler.HandleChangedCapabilities(ArtJvmTiEnv::AsArtJvmTiEnv(env),
changed,
@@ -1236,6 +1216,8 @@ class JvmtiFunctions {
return OK;
}
+#undef FOR_ALL_CAPABILITIES
+
static jvmtiError GetCapabilities(jvmtiEnv* env, jvmtiCapabilities* capabilities_ptr) {
ENSURE_VALID_ENV(env);
ENSURE_NON_NULL(capabilities_ptr);
@@ -1341,7 +1323,7 @@ class JvmtiFunctions {
static jvmtiError GetVersionNumber(jvmtiEnv* env, jint* version_ptr) {
ENSURE_VALID_ENV(env);
- *version_ptr = JVMTI_VERSION;
+ *version_ptr = ArtJvmTiEnv::AsArtJvmTiEnv(env)->ti_version;
return OK;
}
@@ -1495,9 +1477,10 @@ static bool IsJvmtiVersion(jint version) {
extern const jvmtiInterface_1 gJvmtiInterface;
-ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
+ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint version)
: art_vm(runtime),
local_data(nullptr),
+ ti_version(version),
capabilities(),
event_info_mutex_("jvmtiEnv_EventInfoMutex") {
object_tag_table = std::unique_ptr<ObjectTagTable>(new ObjectTagTable(event_handler, this));
@@ -1506,8 +1489,8 @@ ArtJvmTiEnv::ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler)
// Creates a jvmtiEnv and returns it with the art::ti::Env that is associated with it. new_art_ti
// is a pointer to the uninitialized memory for an art::ti::Env.
-static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
- struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler);
+static void CreateArtJvmTiEnv(art::JavaVMExt* vm, jint version, /*out*/void** new_jvmtiEnv) {
+ struct ArtJvmTiEnv* env = new ArtJvmTiEnv(vm, &gEventHandler, version);
*new_jvmtiEnv = env;
gEventHandler.RegisterArtJvmTiEnv(env);
@@ -1520,8 +1503,14 @@ static void CreateArtJvmTiEnv(art::JavaVMExt* vm, /*out*/void** new_jvmtiEnv) {
// places the return value in 'env' if this library can handle the GetEnv request. Otherwise
// returns false and does not modify the 'env' pointer.
static jint GetEnvHandler(art::JavaVMExt* vm, /*out*/void** env, jint version) {
- if (IsJvmtiVersion(version)) {
- CreateArtJvmTiEnv(vm, env);
+ // JavaDebuggable will either be set by the runtime as it is starting up or the plugin if it's
+ // loaded early enough. If this is false we cannot guarantee conformance to all JVMTI behaviors
+ // due to optimizations. We will only allow agents to get ArtTiEnvs using the kArtTiVersion.
+ if (IsFullJvmtiAvailable() && IsJvmtiVersion(version)) {
+ CreateArtJvmTiEnv(vm, JVMTI_VERSION, env);
+ return JNI_OK;
+ } else if (version == kArtTiVersion) {
+ CreateArtJvmTiEnv(vm, kArtTiVersion, env);
return JNI_OK;
} else {
printf("version 0x%x is not valid!", version);
@@ -1547,6 +1536,12 @@ extern "C" bool ArtPlugin_Initialize() {
SearchUtil::Register();
HeapUtil::Register();
+ {
+ // Make sure we can deopt anything we need to.
+ art::ScopedObjectAccess soa(art::Thread::Current());
+ gDeoptManager.FinishSetup();
+ }
+
runtime->GetJavaVM()->AddEnvironmentHook(GetEnvHandler);
return true;
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 2a8c2e91df..73cc601e3e 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -62,10 +62,22 @@ namespace openjdkjvmti {
class ObjectTagTable;
+// A special version that we use to identify special tooling interface versions which mostly matches
+// the jvmti spec but everything is best effort. This is used to implement the userdebug
+// 'debug-anything' behavior.
+//
+// This is the value 0x70010200.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
// A structure that is a jvmtiEnv with additional information for the runtime.
struct ArtJvmTiEnv : public jvmtiEnv {
art::JavaVMExt* art_vm;
void* local_data;
+
+ // The ti_version we are compatible with. This is only for giving the correct value for GetVersion
+ // when running on a userdebug/eng device.
+ jint ti_version;
+
jvmtiCapabilities capabilities;
EventMasks event_masks;
@@ -90,7 +102,7 @@ struct ArtJvmTiEnv : public jvmtiEnv {
// RW lock to protect access to all of the event data.
art::ReaderWriterMutex event_info_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler);
+ ArtJvmTiEnv(art::JavaVMExt* runtime, EventHandler* event_handler, jint ti_version);
static ArtJvmTiEnv* AsArtJvmTiEnv(jvmtiEnv* env) {
return art::down_cast<ArtJvmTiEnv*>(env);
@@ -272,6 +284,60 @@ const jvmtiCapabilities kPotentialCapabilities = {
.can_generate_resource_exhaustion_threads_events = 0,
};
+// These are capabilities that are disabled if we were loaded without being debuggable.
+//
+// This includes the following capabilities:
+// can_retransform_any_class:
+// can_retransform_classes:
+// can_redefine_any_class:
+// can_redefine_classes:
+// We need to ensure that inlined code is either not present or can always be deoptimized. This
+// is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
+// on a threads stack.
+const jvmtiCapabilities kNonDebuggableUnsupportedCapabilities = {
+ .can_tag_objects = 0,
+ .can_generate_field_modification_events = 0,
+ .can_generate_field_access_events = 0,
+ .can_get_bytecodes = 0,
+ .can_get_synthetic_attribute = 0,
+ .can_get_owned_monitor_info = 0,
+ .can_get_current_contended_monitor = 0,
+ .can_get_monitor_info = 0,
+ .can_pop_frame = 0,
+ .can_redefine_classes = 1,
+ .can_signal_thread = 0,
+ .can_get_source_file_name = 0,
+ .can_get_line_numbers = 0,
+ .can_get_source_debug_extension = 0,
+ .can_access_local_variables = 0,
+ .can_maintain_original_method_order = 0,
+ .can_generate_single_step_events = 0,
+ .can_generate_exception_events = 0,
+ .can_generate_frame_pop_events = 0,
+ .can_generate_breakpoint_events = 0,
+ .can_suspend = 0,
+ .can_redefine_any_class = 1,
+ .can_get_current_thread_cpu_time = 0,
+ .can_get_thread_cpu_time = 0,
+ .can_generate_method_entry_events = 0,
+ .can_generate_method_exit_events = 0,
+ .can_generate_all_class_hook_events = 0,
+ .can_generate_compiled_method_load_events = 0,
+ .can_generate_monitor_events = 0,
+ .can_generate_vm_object_alloc_events = 0,
+ .can_generate_native_method_bind_events = 0,
+ .can_generate_garbage_collection_events = 0,
+ .can_generate_object_free_events = 0,
+ .can_force_early_return = 0,
+ .can_get_owned_monitor_stack_depth_info = 0,
+ .can_get_constant_pool = 0,
+ .can_set_native_method_prefix = 0,
+ .can_retransform_classes = 1,
+ .can_retransform_any_class = 1,
+ .can_generate_resource_exhaustion_heap_events = 0,
+ .can_generate_resource_exhaustion_threads_events = 0,
+};
+
} // namespace openjdkjvmti
#endif // ART_OPENJDKJVMTI_ART_JVMTI_H_
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index aced769cb5..53d84836fc 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -68,7 +68,9 @@ bool JvmtiMethodInspectionCallback::IsMethodSafeToJit(art::ArtMethod* method) {
}
DeoptManager::DeoptManager()
- : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock"),
+ : deoptimization_status_lock_("JVMTI_DeoptimizationStatusLock",
+ static_cast<art::LockLevel>(
+ art::LockLevel::kClassLinkerClassesLock + 1)),
deoptimization_condition_("JVMTI_DeoptimizationCondition", deoptimization_status_lock_),
performing_deoptimization_(false),
global_deopt_count_(0),
@@ -91,6 +93,33 @@ void DeoptManager::Shutdown() {
callbacks->RemoveMethodInspectionCallback(&inspection_callback_);
}
+void DeoptManager::FinishSetup() {
+ art::Thread* self = art::Thread::Current();
+ art::MutexLock mu(self, deoptimization_status_lock_);
+
+ art::Runtime* runtime = art::Runtime::Current();
+ // See if we need to do anything.
+ if (!runtime->IsJavaDebuggable()) {
+ // See if we can enable all JVMTI functions. If this is false, only kArtTiVersion agents can be
+ // retrieved and they will all be best-effort.
+ if (PhaseUtil::GetPhaseUnchecked() == JVMTI_PHASE_ONLOAD) {
+ // We are still early enough to change the compiler options and get full JVMTI support.
+ LOG(INFO) << "Openjdkjvmti plugin loaded on a non-debuggable runtime. Changing runtime to "
+ << "debuggable state. Please pass '--debuggable' to dex2oat and "
+ << "'-Xcompiler-option --debuggable' to dalvikvm in the future.";
+ DCHECK(runtime->GetJit() == nullptr) << "Jit should not be running yet!";
+ runtime->AddCompilerOption("--debuggable");
+ runtime->SetJavaDebuggable(true);
+ } else {
+ LOG(WARNING) << "Openjdkjvmti plugin was loaded on a non-debuggable Runtime. Plugin was "
+ << "loaded too late to change runtime state to DEBUGGABLE. Only kArtTiVersion "
+ << "(0x" << std::hex << kArtTiVersion << ") environments are available. Some "
+ << "functionality might not work properly.";
+ }
+ runtime->DeoptimizeBootImage();
+ }
+}
+
bool DeoptManager::MethodHasBreakpoints(art::ArtMethod* method) {
art::MutexLock lk(art::Thread::Current(), deoptimization_status_lock_);
return MethodHasBreakpointsLocked(method);
diff --git a/openjdkjvmti/deopt_manager.h b/openjdkjvmti/deopt_manager.h
index b265fa8ec2..a495b6835c 100644
--- a/openjdkjvmti/deopt_manager.h
+++ b/openjdkjvmti/deopt_manager.h
@@ -101,6 +101,10 @@ class DeoptManager {
void DeoptimizeThread(art::Thread* target) REQUIRES_SHARED(art::Locks::mutator_lock_);
void DeoptimizeAllThreads() REQUIRES_SHARED(art::Locks::mutator_lock_);
+ void FinishSetup()
+ REQUIRES(!deoptimization_status_lock_, !art::Roles::uninterruptible_)
+ REQUIRES_SHARED(art::Locks::mutator_lock_);
+
static DeoptManager* Get();
private:
@@ -141,9 +145,8 @@ class DeoptManager {
REQUIRES(!art::Roles::uninterruptible_, !art::Locks::mutator_lock_);
static constexpr const char* kDeoptManagerInstrumentationKey = "JVMTI_DeoptManager";
- // static constexpr const char* kDeoptManagerThreadName = "JVMTI_DeoptManagerWorkerThread";
- art::Mutex deoptimization_status_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ art::Mutex deoptimization_status_lock_ ACQUIRED_BEFORE(art::Locks::classlinker_classes_lock_);
art::ConditionVariable deoptimization_condition_ GUARDED_BY(deoptimization_status_lock_);
bool performing_deoptimization_ GUARDED_BY(deoptimization_status_lock_);
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index ad928d9b37..dcc834abe9 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -30,11 +30,14 @@
*/
#include "fixed_up_dex_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
// Runtime includes.
+#include "dex/compact_dex_level.h"
#include "dex_to_dex_decompiler.h"
+#include "dexlayout.h"
#include "oat_file.h"
#include "vdex_file.h"
@@ -60,8 +63,7 @@ static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile&
if (vdex == nullptr) {
return;
}
- art::VdexFile::UnquickenDexFile(
- new_dex_file, vdex->GetQuickeningInfo(), /* decompile_return_instruction */true);
+ vdex->UnquickenDexFile(new_dex_file, original_dex_file, /* decompile_return_instruction */true);
}
std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& original) {
@@ -70,7 +72,8 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi
data.resize(original.Size());
memcpy(data.data(), original.Begin(), original.Size());
std::string error;
- std::unique_ptr<const art::DexFile> new_dex_file(art::DexFileLoader::Open(
+ const art::ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const art::DexFile> new_dex_file(dex_file_loader.Open(
data.data(),
data.size(),
/*location*/"Unquickening_dexfile.dex",
@@ -85,6 +88,33 @@ std::unique_ptr<FixedUpDexFile> FixedUpDexFile::Create(const art::DexFile& origi
}
DoDexUnquicken(*new_dex_file, original);
+
+ if (original.IsCompactDexFile()) {
+ // Since we are supposed to return a standard dex, convert back using dexlayout.
+ art::Options options;
+ options.output_to_memmap_ = true;
+ options.compact_dex_level_ = art::CompactDexLevel::kCompactDexLevelNone;
+ options.update_checksum_ = true;
+ art::DexLayout dex_layout(options, nullptr, nullptr);
+ dex_layout.ProcessDexFile(new_dex_file->GetLocation().c_str(), new_dex_file.get(), 0);
+ std::unique_ptr<art::MemMap> mem_map(dex_layout.GetAndReleaseMemMap());
+
+ const uint32_t dex_file_size =
+ reinterpret_cast<const art::DexFile::Header*>(mem_map->Begin())->file_size_;
+ // Overwrite the dex file stored in data with the new result.
+ data.clear();
+ data.insert(data.end(), mem_map->Begin(), mem_map->Begin() + dex_file_size);
+ new_dex_file = dex_file_loader.Open(
+ data.data(),
+ data.size(),
+ /*location*/"Unquickening_dexfile.dex",
+ /*location_checksum*/0,
+ /*oat_dex_file*/nullptr,
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error);
+ }
+
RecomputeDexChecksum(const_cast<art::DexFile*>(new_dex_file.get()));
std::unique_ptr<FixedUpDexFile> ret(new FixedUpDexFile(std::move(new_dex_file), std::move(data)));
return ret;
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index f9eb008af2..b3f5c1886e 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -42,6 +42,7 @@
#include "class_linker.h"
#include "class_table-inl.h"
#include "common_throws.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_annotations.h"
#include "dex/dex_file_loader.h"
#include "events-inl.h"
@@ -107,12 +108,13 @@ static std::unique_ptr<const art::DexFile> MakeSingleDexFile(art::Thread* self,
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
std::string map_name = map->GetName();
- std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map_name,
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- &error_msg));
+ const art::ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map_name,
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ &error_msg));
if (dex_file.get() == nullptr) {
LOG(WARNING) << "Unable to load modified dex file for " << descriptor << ": " << error_msg;
art::ThrowClassFormatError(nullptr,
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 6194d1e42c..717b2ba669 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -43,6 +43,7 @@
#include "base/stringpiece.h"
#include "class_linker-inl.h"
#include "debugger.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
@@ -426,12 +427,13 @@ jvmtiError Redefiner::AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition
return ERR(INVALID_CLASS_FORMAT);
}
uint32_t checksum = reinterpret_cast<const art::DexFile::Header*>(map->Begin())->checksum_;
- std::unique_ptr<const art::DexFile> dex_file(art::DexFileLoader::Open(map->GetName(),
- checksum,
- std::move(map),
- /*verify*/true,
- /*verify_checksum*/true,
- error_msg_));
+ const art::ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const art::DexFile> dex_file(dex_file_loader.Open(map->GetName(),
+ checksum,
+ std::move(map),
+ /*verify*/true,
+ /*verify_checksum*/true,
+ error_msg_));
if (dex_file.get() == nullptr) {
os << "Unable to load modified dex file for " << def.GetName() << ": " << *error_msg_;
*error_msg_ = os.str();
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 9d5f4ea3f9..cbb7b53bff 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -38,6 +38,7 @@
#include "base/enums.h"
#include "base/macros.h"
#include "class_linker.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "jni_internal.h"
@@ -227,7 +228,8 @@ jvmtiError SearchUtil::AddToBootstrapClassLoaderSearch(jvmtiEnv* env ATTRIBUTE_U
std::string error_msg;
std::vector<std::unique_ptr<const art::DexFile>> dex_files;
- if (!art::DexFileLoader::Open(
+ const art::ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(
segment, segment, /* verify */ true, /* verify_checksum */ true, &error_msg, &dex_files)) {
LOG(WARNING) << "Could not open " << segment << " for boot classpath extension: " << error_msg;
return ERR(ILLEGAL_ARGUMENT);
diff --git a/patchoat/Android.bp b/patchoat/Android.bp
index 0902823644..0e8e517cd4 100644
--- a/patchoat/Android.bp
+++ b/patchoat/Android.bp
@@ -26,6 +26,7 @@ cc_defaults {
},
shared_libs: [
"libbase",
+ "libcrypto", // For computing the digest of image file
],
}
@@ -58,5 +59,6 @@ art_cc_test {
],
shared_libs: [
"libartd",
+ "libcrypto", // For computing the digest of image file
],
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index eb648cba18..6c9cf864b3 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -15,6 +15,7 @@
*/
#include "patchoat.h"
+#include <openssl/sha.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
@@ -42,6 +43,7 @@
#include "gc/space/image_space.h"
#include "image-inl.h"
#include "intern_table.h"
+#include "leb128.h"
#include "mirror/dex_cache.h"
#include "mirror/executable.h"
#include "mirror/method.h"
@@ -58,6 +60,8 @@
namespace art {
+using android::base::StringPrintf;
+
static const OatHeader* GetOatHeader(const ElfFile* elf_file) {
uint64_t off = 0;
if (!elf_file->GetSectionOffsetAndSize(".rodata", &off, nullptr)) {
@@ -120,11 +124,134 @@ static bool SymlinkFile(const std::string& input_filename, const std::string& ou
return true;
}
+bool PatchOat::GeneratePatch(
+ const MemMap& original,
+ const MemMap& relocated,
+ std::vector<uint8_t>* output,
+ std::string* error_msg) {
+ // FORMAT of the patch (aka image relocation) file:
+ // * SHA-256 digest (32 bytes) of original/unrelocated file (e.g., the one from /system)
+ // * List of monotonically increasing offsets (max value defined by uint32_t) at which relocations
+ // occur.
+ // Each element is represented as the delta from the previous offset in the list (first element
+ // is a delta from 0). Each delta is encoded using unsigned LEB128: little-endian
+ // variable-length 7 bits per byte encoding, where all bytes have the highest bit (0x80) set
+ // except for the final byte which does not have that bit set. For example, 0x3f is offset 0x3f,
+ // whereas 0xbf 0x05 is offset (0x3f & 0x7f) | (0x5 << 7) which is 0x2bf. Most deltas end up
+ // being encoding using just one byte, achieving ~4x decrease in relocation file size compared
+ // to the encoding where offsets are stored verbatim, as uint32_t.
+
+ size_t original_size = original.Size();
+ size_t relocated_size = relocated.Size();
+ if (original_size != relocated_size) {
+ *error_msg =
+ StringPrintf(
+ "Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
+ return false;
+ }
+ if ((original_size % 4) != 0) {
+ *error_msg = StringPrintf("Image size not multiple of 4: %zu", original_size);
+ return false;
+ }
+ if (original_size > UINT32_MAX) {
+ *error_msg = StringPrintf("Image too large: %zu" , original_size);
+ return false;
+ }
+
+ const ImageHeader& relocated_header =
+ *reinterpret_cast<const ImageHeader*>(relocated.Begin());
+ // Offsets are supposed to differ between original and relocated by this value
+ off_t expected_diff = relocated_header.GetPatchDelta();
+ if (expected_diff == 0) {
+ // Can't identify offsets which are supposed to differ due to relocation
+ *error_msg = "Relocation delta is 0";
+ return false;
+ }
+
+ // Output the SHA-256 digest of the original
+ output->resize(SHA256_DIGEST_LENGTH);
+ const uint8_t* original_bytes = original.Begin();
+ SHA256(original_bytes, original_size, output->data());
+
+ // Output the list of offsets at which the original and patched images differ
+ size_t last_diff_offset = 0;
+ size_t diff_offset_count = 0;
+ const uint8_t* relocated_bytes = relocated.Begin();
+ for (size_t offset = 0; offset < original_size; offset += 4) {
+ uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
+ uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
+ off_t diff = relocated_value - original_value;
+ if (diff == 0) {
+ continue;
+ } else if (diff != expected_diff) {
+ *error_msg =
+ StringPrintf(
+ "Unexpected diff at offset %zu. Expected: %jd, but was: %jd",
+ offset,
+ (intmax_t) expected_diff,
+ (intmax_t) diff);
+ return false;
+ }
+
+ uint32_t offset_diff = offset - last_diff_offset;
+ last_diff_offset = offset;
+ diff_offset_count++;
+
+ EncodeUnsignedLeb128(output, offset_diff);
+ }
+
+ if (diff_offset_count == 0) {
+ *error_msg = "Original and patched images are identical";
+ return false;
+ }
+
+ return true;
+}
+
+static bool WriteRelFile(
+ const MemMap& original,
+ const MemMap& relocated,
+ const std::string& rel_filename,
+ std::string* error_msg) {
+ std::vector<uint8_t> output;
+ if (!PatchOat::GeneratePatch(original, relocated, &output, error_msg)) {
+ return false;
+ }
+
+ std::unique_ptr<File> rel_file(OS::CreateEmptyFileWriteOnly(rel_filename.c_str()));
+ if (rel_file.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to create/open output file %s", rel_filename.c_str());
+ return false;
+ }
+ if (!rel_file->WriteFully(output.data(), output.size())) {
+ *error_msg = StringPrintf("Failed to write to %s", rel_filename.c_str());
+ return false;
+ }
+ if (rel_file->FlushCloseOrErase() != 0) {
+ *error_msg = StringPrintf("Failed to flush and close %s", rel_filename.c_str());
+ return false;
+ }
+
+ return true;
+}
+
bool PatchOat::Patch(const std::string& image_location,
off_t delta,
- const std::string& output_directory,
+ const std::string& output_image_directory,
+ const std::string& output_image_relocation_directory,
InstructionSet isa,
TimingLogger* timings) {
+ bool output_image = !output_image_directory.empty();
+ bool output_image_relocation = !output_image_relocation_directory.empty();
+ if ((!output_image) && (!output_image_relocation)) {
+ // Nothing to do
+ return true;
+ }
+ if ((output_image_relocation) && (delta == 0)) {
+ LOG(ERROR) << "Cannot output image relocation information when requested relocation delta is 0";
+ return false;
+ }
+
CHECK(Runtime::Current() == nullptr);
CHECK(!image_location.empty()) << "image file must have a filename.";
@@ -221,32 +348,35 @@ bool PatchOat::Patch(const std::string& image_location,
return false;
}
- MaybePic is_oat_pic = IsOatPic(elf.get());
- if (is_oat_pic >= ERROR_FIRST) {
- // Error logged by IsOatPic
- return false;
- } else if (is_oat_pic == NOT_PIC) {
- LOG(ERROR) << "patchoat cannot be used on non-PIC oat file: " << input_oat_file->GetPath();
- return false;
- } else {
- CHECK(is_oat_pic == PIC);
-
- // Create a symlink.
- std::string converted_image_filename = space->GetImageLocation();
- std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
- std::string output_image_filename = output_directory +
- (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
- std::string output_vdex_filename =
- ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
- std::string output_oat_filename =
- ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
-
- if (!ReplaceOatFileWithSymlink(input_oat_file->GetPath(),
- output_oat_filename) ||
- !SymlinkFile(input_vdex_filename, output_vdex_filename)) {
- // Errors already logged by above call.
+ if (output_image) {
+ MaybePic is_oat_pic = IsOatPic(elf.get());
+ if (is_oat_pic >= ERROR_FIRST) {
+ // Error logged by IsOatPic
return false;
+ } else if (is_oat_pic == NOT_PIC) {
+ LOG(ERROR) << "patchoat cannot be used on non-PIC oat file: " << input_oat_file->GetPath();
+ return false;
+ } else {
+ CHECK(is_oat_pic == PIC);
+
+ // Create a symlink.
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(
+ converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_image_directory +
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::string output_vdex_filename =
+ ImageHeader::GetVdexLocationFromImageLocation(output_image_filename);
+ std::string output_oat_filename =
+ ImageHeader::GetOatLocationFromImageLocation(output_image_filename);
+
+ if (!ReplaceOatFileWithSymlink(input_oat_file->GetPath(),
+ output_oat_filename) ||
+ !SymlinkFile(input_vdex_filename, output_vdex_filename)) {
+ // Errors already logged by above call.
+ return false;
+ }
}
}
@@ -267,28 +397,72 @@ bool PatchOat::Patch(const std::string& image_location,
}
}
- // Write the patched image spaces.
- for (size_t i = 0; i < spaces.size(); ++i) {
- gc::space::ImageSpace* space = spaces[i];
+ if (output_image) {
+ // Write the patched image spaces.
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
- t.NewTiming("Writing image");
- std::string converted_image_filename = space->GetImageLocation();
- std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
- std::string output_image_filename = output_directory +
- (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
- converted_image_filename;
- std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
- if (output_image_file.get() == nullptr) {
- LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
- return false;
+ t.NewTiming("Writing image");
+ std::string converted_image_filename = space->GetImageLocation();
+ std::replace(converted_image_filename.begin() + 1, converted_image_filename.end(), '/', '@');
+ std::string output_image_filename = output_image_directory +
+ (android::base::StartsWith(converted_image_filename, "/") ? "" : "/") +
+ converted_image_filename;
+ std::unique_ptr<File> output_image_file(CreateOrOpen(output_image_filename.c_str()));
+ if (output_image_file.get() == nullptr) {
+ LOG(ERROR) << "Failed to open output image file at " << output_image_filename;
+ return false;
+ }
+
+ PatchOat& p = space_to_patchoat_map.find(space)->second;
+
+ bool success = p.WriteImage(output_image_file.get());
+ success = FinishFile(output_image_file.get(), success);
+ if (!success) {
+ return false;
+ }
}
+ }
- PatchOat& p = space_to_patchoat_map.find(space)->second;
+ if (output_image_relocation) {
+ // Write the image relocation information for each space.
+ for (size_t i = 0; i < spaces.size(); ++i) {
+ gc::space::ImageSpace* space = spaces[i];
+
+ t.NewTiming("Writing image relocation");
+ std::string original_image_filename(space->GetImageLocation() + ".rel");
+ std::string image_relocation_filename =
+ output_image_relocation_directory
+ + (android::base::StartsWith(original_image_filename, "/") ? "" : "/")
+ + original_image_filename.substr(original_image_filename.find_last_of("/"));
+ File& input_image = *space_to_file_map.find(space)->second;
+ int64_t input_image_size = input_image.GetLength();
+ if (input_image_size < 0) {
+ LOG(ERROR) << "Error while getting input image size";
+ return false;
+ }
+ std::string error_msg;
+ std::unique_ptr<MemMap> original(MemMap::MapFile(input_image_size,
+ PROT_READ,
+ MAP_PRIVATE,
+ input_image.Fd(),
+ 0,
+ /*low_4gb*/false,
+ input_image.GetPath().c_str(),
+ &error_msg));
+ if (original.get() == nullptr) {
+ LOG(ERROR) << "Unable to map image file " << input_image.GetPath() << " : " << error_msg;
+ return false;
+ }
- bool success = p.WriteImage(output_image_file.get());
- success = FinishFile(output_image_file.get(), success);
- if (!success) {
- return false;
+ PatchOat& p = space_to_patchoat_map.find(space)->second;
+ const MemMap* relocated = p.image_;
+
+ if (!WriteRelFile(*original, *relocated, image_relocation_filename, &error_msg)) {
+ LOG(ERROR) << "Failed to create image relocation file " << image_relocation_filename
+ << ": " << error_msg;
+ return false;
+ }
}
}
@@ -739,6 +913,9 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --output-image-file=<file.art>: Specifies the exact file to write the patched");
UsageError(" image file to.");
UsageError("");
+ UsageError(" --output-image-relocation-file=<file.art.rel>: Specifies the exact file to write");
+ UsageError(" the image relocation information to.");
+ UsageError("");
UsageError(" --base-offset-delta=<delta>: Specify the amount to change the old base-offset by.");
UsageError(" This value may be negative.");
UsageError("");
@@ -754,12 +931,13 @@ static int patchoat_image(TimingLogger& timings,
InstructionSet isa,
const std::string& input_image_location,
const std::string& output_image_filename,
+ const std::string& output_image_relocation_filename,
off_t base_delta,
bool base_delta_set,
bool debug) {
CHECK(!input_image_location.empty());
- if (output_image_filename.empty()) {
- Usage("Image patching requires --output-image-file");
+ if ((output_image_filename.empty()) && (output_image_relocation_filename.empty())) {
+ Usage("Image patching requires --output-image-file or --output-image-relocation-file");
}
if (!base_delta_set) {
@@ -778,9 +956,19 @@ static int patchoat_image(TimingLogger& timings,
TimingLogger::ScopedTiming pt("patch image and oat", &timings);
- std::string output_directory =
+ std::string output_image_directory =
output_image_filename.substr(0, output_image_filename.find_last_of('/'));
- bool ret = PatchOat::Patch(input_image_location, base_delta, output_directory, isa, &timings);
+ std::string output_image_relocation_directory =
+ output_image_relocation_filename.substr(
+ 0, output_image_relocation_filename.find_last_of('/'));
+ bool ret =
+ PatchOat::Patch(
+ input_image_location,
+ base_delta,
+ output_image_directory,
+ output_image_relocation_directory,
+ isa,
+ &timings);
if (kIsDebugBuild) {
LOG(INFO) << "Exiting with return ... " << ret;
@@ -811,6 +999,7 @@ static int patchoat(int argc, char **argv) {
InstructionSet isa = InstructionSet::kNone;
std::string input_image_location;
std::string output_image_filename;
+ std::string output_image_relocation_filename;
off_t base_delta = 0;
bool base_delta_set = false;
bool dump_timings = kIsDebugBuild;
@@ -832,6 +1021,9 @@ static int patchoat(int argc, char **argv) {
input_image_location = option.substr(strlen("--input-image-location=")).data();
} else if (option.starts_with("--output-image-file=")) {
output_image_filename = option.substr(strlen("--output-image-file=")).data();
+ } else if (option.starts_with("--output-image-relocation-file=")) {
+ output_image_relocation_filename =
+ option.substr(strlen("--output-image-relocation-file=")).data();
} else if (option.starts_with("--base-offset-delta=")) {
const char* base_delta_str = option.substr(strlen("--base-offset-delta=")).data();
base_delta_set = true;
@@ -856,6 +1048,7 @@ static int patchoat(int argc, char **argv) {
isa,
input_image_location,
output_image_filename,
+ output_image_relocation_filename,
base_delta,
base_delta_set,
debug);
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 83516845d8..1033a2e5e1 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -44,12 +44,25 @@ class Class;
class PatchOat {
public:
+ // Relocates the provided image by the specified offset. If output_image_directory is non-empty,
+ // outputs the relocated image into that directory. If output_image_relocation_directory is
+ // non-empty, outputs image relocation files (see GeneratePatch) into that directory.
static bool Patch(const std::string& image_location,
off_t delta,
- const std::string& output_directory,
+ const std::string& output_image_directory,
+ const std::string& output_image_relocation_directory,
InstructionSet isa,
TimingLogger* timings);
+ // Generates a patch which can be used to efficiently relocate the original file or to check that
+ // a relocated file matches the original. The patch is generated from the difference of the
+ // |original| and the already |relocated| image, and written to |output| in the form of unsigned
+ // LEB128 for each relocation position.
+ static bool GeneratePatch(const MemMap& original,
+ const MemMap& relocated,
+ std::vector<uint8_t>* output,
+ std::string* error_msg);
+
~PatchOat() {}
PatchOat(PatchOat&&) = default;
diff --git a/patchoat/patchoat_test.cc b/patchoat/patchoat_test.cc
index 86e851c72b..90cb4f8310 100644
--- a/patchoat/patchoat_test.cc
+++ b/patchoat/patchoat_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <openssl/sha.h>
#include <dirent.h>
#include <sys/types.h>
@@ -24,6 +25,7 @@
#include "android-base/strings.h"
#include "dexopt_test.h"
+#include "leb128.h"
#include "runtime.h"
#include <gtest/gtest.h>
@@ -137,6 +139,21 @@ class PatchoatTest : public DexoptTest {
return RunDex2OatOrPatchoat(argv, error_msg);
}
+ bool GenerateBootImageRelFile(const std::string& input_image_location,
+ const std::string& output_rel_filename,
+ off_t base_offset_delta,
+ std::string* error_msg) {
+ Runtime* const runtime = Runtime::Current();
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetPatchoatExecutable());
+ argv.push_back("--input-image-location=" + input_image_location);
+ argv.push_back("--output-image-relocation-file=" + output_rel_filename);
+ argv.push_back(StringPrintf("--base-offset-delta=0x%jx", (intmax_t) base_offset_delta));
+ argv.push_back(StringPrintf("--instruction-set=%s", GetInstructionSetString(kRuntimeISA)));
+
+ return RunDex2OatOrPatchoat(argv, error_msg);
+ }
+
bool RunDex2OatOrPatchoat(const std::vector<std::string>& args, std::string* error_msg) {
int link[2];
@@ -263,6 +280,34 @@ class PatchoatTest : public DexoptTest {
}
bool BinaryDiff(
+ const std::string& filename1,
+ const std::vector<uint8_t>& data1,
+ const std::string& filename2,
+ const std::vector<uint8_t>& data2,
+ std::string* error_msg) {
+ if (data1.size() != data1.size()) {
+ *error_msg =
+ StringPrintf(
+ "%s and %s are of different size: %zu vs %zu",
+ filename1.c_str(),
+ filename2.c_str(),
+ data1.size(),
+ data2.size());
+ return true;
+ }
+ size_t size = data1.size();
+ for (size_t i = 0; i < size; i++) {
+ if (data1[i] != data2[i]) {
+ *error_msg =
+ StringPrintf("%s and %s differ at offset %zu", filename1.c_str(), filename2.c_str(), i);
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool BinaryDiff(
const std::string& filename1, const std::string& filename2, std::string* error_msg) {
std::string read_error_msg;
std::vector<uint8_t> image1;
@@ -275,26 +320,97 @@ class PatchoatTest : public DexoptTest {
*error_msg = StringPrintf("Failed to read %s: %s", filename2.c_str(), read_error_msg.c_str());
return true;
}
- if (image1.size() != image2.size()) {
+ return BinaryDiff(filename1, image1, filename2, image2, error_msg);
+ }
+
+ bool IsImageIdenticalToOriginalExceptForRelocation(
+ const std::string& relocated_filename,
+ const std::string& original_filename,
+ const std::string& rel_filename,
+ std::string* error_msg) {
+ *error_msg = "";
+ std::string read_error_msg;
+ std::vector<uint8_t> rel;
+ if (!ReadFully(rel_filename, &rel, &read_error_msg)) {
+ *error_msg =
+ StringPrintf("Failed to read %s: %s", rel_filename.c_str(), read_error_msg.c_str());
+ return false;
+ }
+ std::vector<uint8_t> relocated;
+ if (!ReadFully(relocated_filename, &relocated, &read_error_msg)) {
+ *error_msg =
+ StringPrintf("Failed to read %s: %s", relocated_filename.c_str(), read_error_msg.c_str());
+ return false;
+ }
+
+ size_t image_size = relocated.size();
+ if ((image_size % 4) != 0) {
*error_msg =
StringPrintf(
- "%s and %s are of different size: %zu vs %zu",
- filename1.c_str(),
- filename2.c_str(),
- image1.size(),
- image2.size());
- return true;
+ "Relocated image file %s size not multiple of 4: %zu",
+ relocated_filename.c_str(), image_size);
+ return false;
}
- size_t size = image1.size();
- for (size_t i = 0; i < size; i++) {
- if (image1[i] != image2[i]) {
+ if (image_size > UINT32_MAX) {
+ *error_msg =
+ StringPrintf(
+ "Relocated image file %s too large: %zu" , relocated_filename.c_str(), image_size);
+ return false;
+ }
+
+ const ImageHeader& relocated_header = *reinterpret_cast<const ImageHeader*>(relocated.data());
+ off_t expected_diff = relocated_header.GetPatchDelta();
+
+ if (expected_diff != 0) {
+ // Relocated image is expected to differ from the original due to relocation.
+ // Unrelocate the image in memory to compensate.
+ uint8_t* image_start = relocated.data();
+ const uint8_t* rel_end = &rel[rel.size()];
+ if (rel.size() < SHA256_DIGEST_LENGTH) {
*error_msg =
- StringPrintf("%s and %s differ at offset %zu", filename1.c_str(), filename2.c_str(), i);
- return true;
+ StringPrintf("Malformed image relocation file %s: too short", rel_filename.c_str());
+ return false;
+ }
+ const uint8_t* rel_ptr = &rel[SHA256_DIGEST_LENGTH];
+ // The remaining .rel file consists of offsets at which relocation should've occurred.
+ // For each offset, we "unrelocate" the image by subtracting the expected relocation
+ // diff value (as specified in the image header).
+ //
+ // Each offset is encoded as a delta/diff relative to the previous offset. With the
+ // very first offset being encoded relative to offset 0.
+ // Deltas are encoded using little-endian 7 bits per byte encoding, with all bytes except
+ // the last one having the highest bit set.
+ uint32_t offset = 0;
+ while (rel_ptr != rel_end) {
+ uint32_t offset_delta = 0;
+ if (DecodeUnsignedLeb128Checked(&rel_ptr, rel_end, &offset_delta)) {
+ offset += offset_delta;
+ uint32_t *image_value = reinterpret_cast<uint32_t*>(image_start + offset);
+ *image_value -= expected_diff;
+ } else {
+ *error_msg =
+ StringPrintf(
+ "Malformed image relocation file %s: "
+ "last byte has it's most significant bit set",
+ rel_filename.c_str());
+ return false;
+ }
}
}
- return false;
+ // Image in memory is now supposed to be identical to the original. Compare it to the original.
+ std::vector<uint8_t> original;
+ if (!ReadFully(original_filename, &original, &read_error_msg)) {
+ *error_msg =
+ StringPrintf("Failed to read %s: %s", original_filename.c_str(), read_error_msg.c_str());
+ return false;
+ }
+ if (BinaryDiff(relocated_filename, relocated, original_filename, original, error_msg)) {
+ return false;
+ }
+
+ // Relocated image is identical to the original, once relocations are taken into account
+ return true;
}
};
@@ -408,4 +524,140 @@ TEST_F(PatchoatTest, PatchoatRelocationSameAsDex2oatRelocation) {
#endif
}
+TEST_F(PatchoatTest, RelFileSufficientToUnpatch) {
+ // This test checks that a boot image relocated using patchoat can be unrelocated using the .rel
+ // file created by patchoat.
+
+ // This test doesn't work when heap poisoning is enabled because some of the
+ // references are negated. b/72117833 is tracking the effort to have patchoat
+ // and its tests support heap poisoning.
+ TEST_DISABLED_FOR_HEAP_POISONING();
+
+ // Compile boot image into a random directory using dex2oat
+ ScratchFile dex2oat_orig_scratch;
+ dex2oat_orig_scratch.Unlink();
+ std::string dex2oat_orig_dir = dex2oat_orig_scratch.GetFilename();
+ ASSERT_EQ(0, mkdir(dex2oat_orig_dir.c_str(), 0700));
+ const uint32_t orig_base_addr = 0x60000000;
+ std::vector<std::string> dex2oat_extra_args;
+ std::string error_msg;
+ if (!CompileBootImageToDir(dex2oat_orig_dir, dex2oat_extra_args, orig_base_addr, &error_msg)) {
+ FAIL() << "CompileBootImage1 failed: " << error_msg;
+ }
+
+ // Generate image relocation file for the original boot image
+ ScratchFile rel_scratch;
+ rel_scratch.Unlink();
+ std::string rel_dir = rel_scratch.GetFilename();
+ ASSERT_EQ(0, mkdir(rel_dir.c_str(), 0700));
+ std::string dex2oat_orig_with_arch_dir =
+ dex2oat_orig_dir + "/" + GetInstructionSetString(kRuntimeISA);
+ // The arch-including symlink is needed by patchoat
+ ASSERT_EQ(0, symlink(dex2oat_orig_dir.c_str(), dex2oat_orig_with_arch_dir.c_str()));
+ off_t base_addr_delta = 0x100000;
+ if (!GenerateBootImageRelFile(
+ dex2oat_orig_dir + "/boot.art",
+ rel_dir + "/boot.art.rel",
+ base_addr_delta,
+ &error_msg)) {
+ FAIL() << "RelocateBootImage failed: " << error_msg;
+ }
+
+ // Relocate the original boot image using patchoat
+ ScratchFile relocated_scratch;
+ relocated_scratch.Unlink();
+ std::string relocated_dir = relocated_scratch.GetFilename();
+ ASSERT_EQ(0, mkdir(relocated_dir.c_str(), 0700));
+ // Use a different relocation delta from the one used when generating .rel files above. This is
+ // to make sure .rel files are not specific to a particular relocation delta.
+ base_addr_delta -= 0x10000;
+ if (!RelocateBootImage(
+ dex2oat_orig_dir + "/boot.art",
+ relocated_dir + "/boot.art",
+ base_addr_delta,
+ &error_msg)) {
+ FAIL() << "RelocateBootImage failed: " << error_msg;
+ }
+
+ // Assert that patchoat created the same set of .art and .art.rel files
+ std::vector<std::string> rel_basenames;
+ std::vector<std::string> relocated_image_basenames;
+ if (!ListDirFilesEndingWith(rel_dir, "", &rel_basenames, &error_msg)) {
+ FAIL() << "Failed to list *.art.rel files in " << rel_dir << ": " << error_msg;
+ }
+ if (!ListDirFilesEndingWith(relocated_dir, ".art", &relocated_image_basenames, &error_msg)) {
+ FAIL() << "Failed to list *.art files in " << relocated_dir << ": " << error_msg;
+ }
+ std::sort(rel_basenames.begin(), rel_basenames.end());
+ std::sort(relocated_image_basenames.begin(), relocated_image_basenames.end());
+
+ // .art and .art.rel file names output by patchoat look like
+ // tmp@art-data-<random>-<random>@boot*.art, encoding the name of the directory in their name.
+ // To compare these with each other, we retain only the part of the file name after the last @,
+ // and we also drop the extension.
+ std::vector<std::string> rel_shortened_basenames(rel_basenames.size());
+ std::vector<std::string> relocated_image_shortened_basenames(relocated_image_basenames.size());
+ for (size_t i = 0; i < rel_basenames.size(); i++) {
+ rel_shortened_basenames[i] = rel_basenames[i].substr(rel_basenames[i].find_last_of("@") + 1);
+ rel_shortened_basenames[i] =
+ rel_shortened_basenames[i].substr(0, rel_shortened_basenames[i].find("."));
+ }
+ for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
+ relocated_image_shortened_basenames[i] =
+ relocated_image_basenames[i].substr(relocated_image_basenames[i].find_last_of("@") + 1);
+ relocated_image_shortened_basenames[i] =
+ relocated_image_shortened_basenames[i].substr(
+ 0, relocated_image_shortened_basenames[i].find("."));
+ }
+ ASSERT_EQ(rel_shortened_basenames, relocated_image_shortened_basenames);
+
+ // For each image file, assert that unrelocating the image produces its original version
+ for (size_t i = 0; i < relocated_image_basenames.size(); i++) {
+ const std::string& original_image_filename =
+ dex2oat_orig_dir + "/" + relocated_image_shortened_basenames[i] + ".art";
+ const std::string& relocated_image_filename =
+ relocated_dir + "/" + relocated_image_basenames[i];
+ const std::string& rel_filename = rel_dir + "/" + rel_basenames[i];
+
+ // Assert that relocated image differs from the original
+ if (!BinaryDiff(original_image_filename, relocated_image_filename, &error_msg)) {
+ FAIL() << "Relocated image " << relocated_image_filename
+ << " identical to the original image " << original_image_filename;
+ }
+
+ // Assert that relocated image is identical to the original except for relocations described in
+ // the .rel file
+ if (!IsImageIdenticalToOriginalExceptForRelocation(
+ relocated_image_filename, original_image_filename, rel_filename, &error_msg)) {
+ FAIL() << "Unrelocating " << relocated_image_filename << " using " << rel_filename
+ << " did not produce the same output as " << original_image_filename << ": " << error_msg;
+ }
+
+ // Assert that the digest of original image in .rel file is as expected
+ std::vector<uint8_t> original;
+ if (!ReadFully(original_image_filename, &original, &error_msg)) {
+ FAIL() << "Failed to read original image " << original_image_filename;
+ }
+ std::vector<uint8_t> rel;
+ if (!ReadFully(rel_filename, &rel, &error_msg)) {
+ FAIL() << "Failed to read image relocation file " << rel_filename;
+ }
+ uint8_t original_image_digest[SHA256_DIGEST_LENGTH];
+ SHA256(original.data(), original.size(), original_image_digest);
+ const uint8_t* original_image_digest_in_rel_file = rel.data();
+ if (memcmp(original_image_digest_in_rel_file, original_image_digest, SHA256_DIGEST_LENGTH)) {
+ FAIL() << "Digest of original image in " << rel_filename << " does not match the original"
+ " image " << original_image_filename;
+ }
+ }
+
+ ClearDirectory(dex2oat_orig_dir.c_str(), /*recursive*/ true);
+ ClearDirectory(rel_dir.c_str(), /*recursive*/ true);
+ ClearDirectory(relocated_dir.c_str(), /*recursive*/ true);
+
+ rmdir(dex2oat_orig_dir.c_str());
+ rmdir(rel_dir.c_str());
+ rmdir(relocated_dir.c_str());
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index 71f7f9d669..9f3e3b6ac5 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -39,6 +39,7 @@
#include "base/unix_file/fd_file.h"
#include "boot_image_profile.h"
#include "bytecode_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
@@ -329,25 +330,26 @@ class ProfMan FINAL {
static constexpr bool kVerifyChecksum = true;
for (size_t i = 0; i < dex_locations_.size(); ++i) {
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files_for_location;
if (use_apk_fd_list) {
- if (DexFileLoader::OpenZip(apks_fd_[i],
- dex_locations_[i],
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (dex_file_loader.OpenZip(apks_fd_[i],
+ dex_locations_[i],
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
continue;
}
} else {
- if (DexFileLoader::Open(apk_files_[i].c_str(),
- dex_locations_[i],
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &dex_files_for_location)) {
+ if (dex_file_loader.Open(apk_files_[i].c_str(),
+ dex_locations_[i],
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &dex_files_for_location)) {
} else {
LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
continue;
@@ -727,7 +729,7 @@ class ProfMan FINAL {
const DexFile::CodeItem* code_item = dex_file->GetCodeItem(offset);
bool found_invoke = false;
- for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(dex_file, code_item)) {
+ for (const DexInstructionPcPair& inst : CodeItemInstructionAccessor(*dex_file, code_item)) {
if (inst->Opcode() == Instruction::INVOKE_VIRTUAL) {
if (found_invoke) {
LOG(ERROR) << "Multiple invoke INVOKE_VIRTUAL found: "
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 2657f4fa86..07764b8151 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -21,6 +21,76 @@
JIT_DEBUG_REGISTER_CODE_LDFLAGS = ["-Wl,--keep-unique,__jit_debug_register_code"]
cc_defaults {
+ name: "libdexfile_defaults",
+ defaults: ["art_defaults"],
+ host_supported: true,
+ srcs: [
+ "dex/compact_dex_debug_info.cc",
+ "dex/compact_dex_file.cc",
+ "dex/dex_file.cc",
+ "dex/dex_file_exception_helpers.cc",
+ "dex/dex_file_loader.cc",
+ "dex/dex_file_tracking_registrar.cc",
+ "dex/dex_file_verifier.cc",
+ "dex/dex_instruction.cc",
+ "dex/standard_dex_file.cc",
+ "utf.cc",
+ "utils.cc",
+ ],
+
+ target: {
+ android: {
+ static_libs: [
+ "libz",
+ "libbase",
+ ],
+ },
+ host: {
+ shared_libs: [
+ "libz",
+ ],
+ },
+ },
+ header_libs: [
+ "jni_headers",
+ ],
+ generated_sources: ["art_operator_srcs"],
+ // asm_support_gen.h (used by asm_support.h) is generated with cpp-define-generator
+ generated_headers: ["cpp-define-generator-asm-support"],
+ // export our headers so the libart-gtest targets can use it as well.
+ export_generated_headers: ["cpp-define-generator-asm-support"],
+ include_dirs: [
+ "external/icu/icu4c/source/common",
+ "external/zlib",
+ ],
+ shared_libs: [
+ "liblog",
+ // For common macros.
+ "libbase",
+ "libz",
+ ],
+
+ // Exporting "." would shadow the system elf.h with our elf.h,
+ // which in turn breaks any tools that reference this library.
+ // export_include_dirs: ["."],
+
+ // ART's macros.h depends on libbase's macros.h.
+ // Note: runtime_options.h depends on cmdline. But we don't really want to export this
+ // generically. dex2oat takes care of it itself.
+ export_shared_lib_headers: ["libbase"],
+}
+
+art_cc_library {
+ name: "libdexfile",
+ defaults: ["libdexfile_defaults"],
+ // Leave the symbols in the shared library so that stack unwinders can
+ // produce meaningful name resolution.
+ strip: {
+ keep_symbols: true,
+ },
+}
+
+cc_defaults {
name: "libart_defaults",
defaults: ["art_defaults"],
host_supported: true,
@@ -56,12 +126,14 @@ cc_defaults {
"common_throws.cc",
"compiler_filter.cc",
"debugger.cc",
+ "dex/compact_dex_debug_info.cc",
"dex/compact_dex_file.cc",
"dex/dex_file.cc",
"dex/dex_file_annotations.cc",
"dex/dex_file_exception_helpers.cc",
"dex/dex_file_layout.cc",
"dex/dex_file_loader.cc",
+ "dex/art_dex_file_loader.cc",
"dex/dex_file_tracking_registrar.cc",
"dex/dex_file_verifier.cc",
"dex/dex_instruction.cc",
@@ -572,6 +644,7 @@ art_cc_test {
"class_table_test.cc",
"compiler_filter_test.cc",
"dex/code_item_accessors_test.cc",
+ "dex/compact_dex_debug_info_test.cc",
"dex/compact_dex_file_test.cc",
"dex/dex_file_test.cc",
"dex/dex_file_verifier_test.cc",
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 78b9e46d77..80080e9832 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -144,6 +144,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index c09baea72a..737d2a86a1 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -794,27 +794,24 @@ END art_quick_unlock_object_no_inline
.extern artInstanceOfFromCode
.extern artThrowClassCastExceptionForObject
ENTRY art_quick_check_instance_of
- push {r0-r1, lr} @ save arguments, link register and pad
- .cfi_adjust_cfa_offset 12
+ push {r0-r2, lr} @ save arguments, padding (r2) and link register
+ .cfi_adjust_cfa_offset 16
.cfi_rel_offset r0, 0
.cfi_rel_offset r1, 4
- .cfi_rel_offset lr, 8
- sub sp, #4
- .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset lr, 12
bl artInstanceOfFromCode
cbz r0, .Lthrow_class_cast_exception
- add sp, #4
- .cfi_adjust_cfa_offset -4
- pop {r0-r1, pc}
- .cfi_adjust_cfa_offset 4 @ Reset unwind info so following code unwinds.
+ pop {r0-r2, pc}
+
.Lthrow_class_cast_exception:
- add sp, #4
- .cfi_adjust_cfa_offset -4
- pop {r0-r1, lr}
- .cfi_adjust_cfa_offset -12
+ pop {r0-r2, lr}
+ .cfi_adjust_cfa_offset -16
.cfi_restore r0
.cfi_restore r1
+ .cfi_restore r2
.cfi_restore lr
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r2 @ save all registers as basis for long jump context
mov r2, r9 @ pass Thread::Current
bl artThrowClassCastExceptionForObject @ (Object*, Class*, Thread*)
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 80bf3abc6f..4c43b7ed3d 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -168,6 +168,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 96a1cadab9..b0e7b0a964 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1341,12 +1341,14 @@ ENTRY art_quick_check_instance_of
// Call runtime code
bl artInstanceOfFromCode
+ // Restore LR.
+ RESTORE_REG xLR, 24
+
// Check for exception
cbz x0, .Lthrow_class_cast_exception
// Restore and return
.cfi_remember_state
- RESTORE_REG xLR, 24
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
ret
.cfi_restore_state // Reset unwind info so following code unwinds.
@@ -1354,7 +1356,6 @@ ENTRY art_quick_check_instance_of
.Lthrow_class_cast_exception:
// Restore
- RESTORE_REG xLR, 24
RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 209f36705a..badee59568 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -348,6 +348,8 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
static_assert(IsDirectEntrypoint(kQuickAtan), "Direct C stub marked non-direct.");
qpoints->pAtan2 = atan2;
static_assert(IsDirectEntrypoint(kQuickAtan2), "Direct C stub marked non-direct.");
+ qpoints->pPow = pow;
+ static_assert(IsDirectEntrypoint(kQuickPow), "Direct C stub marked non-direct.");
qpoints->pCbrt = cbrt;
static_assert(IsDirectEntrypoint(kQuickCbrt), "Direct C stub marked non-direct.");
qpoints->pCosh = cosh;
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 35cbd1dcc0..bdfb9421df 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -165,6 +165,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index 24bf9cc07c..ffb0c94cc7 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -68,6 +68,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 93cb6656dc..5a28120b30 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1436,17 +1436,18 @@ DEFINE_FUNCTION art_quick_check_instance_of
PUSH eax // pass arg1 - obj
call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testl %eax, %eax
- jz 1f // jump forward if not assignable
+ jz .Lthrow_class_cast_exception // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
-
CFI_ADJUST_CFA_OFFSET(12) // Reset unwind info so following code unwinds.
-1:
+
+.Lthrow_class_cast_exception:
POP eax // pop arguments
POP ecx
addl LITERAL(4), %esp
CFI_ADJUST_CFA_OFFSET(-4)
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME ebx, ebx // save all registers as basis for long jump context
// Outgoing argument set up
PUSH eax // alignment padding
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 3656f83b58..6bae69c495 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -91,6 +91,7 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
qpoints->pAsin = asin;
qpoints->pAtan = atan;
qpoints->pAtan2 = atan2;
+ qpoints->pPow = pow;
qpoints->pCbrt = cbrt;
qpoints->pCosh = cosh;
qpoints->pExp = exp;
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 85f972309b..781ade99ce 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1410,21 +1410,21 @@ DEFINE_FUNCTION art_quick_check_instance_of
SETUP_FP_CALLEE_SAVE_FRAME
call SYMBOL(artInstanceOfFromCode) // (Object* obj, Class* ref_klass)
testq %rax, %rax
- jz 1f // jump forward if not assignable
+ jz .Lthrow_class_cast_exception // jump forward if not assignable
+ CFI_REMEMBER_STATE
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(24), %rsp // pop arguments
CFI_ADJUST_CFA_OFFSET(-24)
-
-.Lreturn:
ret
+ CFI_RESTORE_STATE // Reset unwind info so following code unwinds.
- CFI_ADJUST_CFA_OFFSET(24 + 4 * 8) // Reset unwind info so following code unwinds.
-1:
+.Lthrow_class_cast_exception:
RESTORE_FP_CALLEE_SAVE_FRAME
addq LITERAL(8), %rsp // pop padding
CFI_ADJUST_CFA_OFFSET(-8)
POP rsi // Pop arguments
POP rdi
+
SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
call SYMBOL(artThrowClassCastExceptionForObject) // (Object* src, Class* dest, Thread*)
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index bdebe2d9e9..c9a77331a7 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -398,6 +398,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
bool is_default_conflict = IsDefaultConflicting();
bool is_compilable = IsCompilable();
bool must_count_locks = MustCountLocks();
+ HiddenApiAccessFlags::ApiList hidden_api_list = GetHiddenApiAccessFlags();
SetAccessFlags(new_value);
DCHECK_EQ(java_flags, (GetAccessFlags() & kAccJavaFlagsMask));
DCHECK_EQ(is_constructor, IsConstructor());
@@ -411,6 +412,7 @@ inline void ArtMethod::SetIntrinsic(uint32_t intrinsic) {
DCHECK_EQ(is_default_conflict, IsDefaultConflicting());
DCHECK_EQ(is_compilable, IsCompilable());
DCHECK_EQ(must_count_locks, MustCountLocks());
+ DCHECK_EQ(hidden_api_list, GetHiddenApiAccessFlags());
} else {
SetAccessFlags(new_value);
}
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 44a5dde485..96468bba60 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -562,14 +562,14 @@ bool ArtMethod::EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> param
return true;
}
-const uint8_t* ArtMethod::GetQuickenedInfo() {
+ArrayRef<const uint8_t> ArtMethod::GetQuickenedInfo() {
const DexFile& dex_file = GetDeclaringClass()->GetDexFile();
const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
- return nullptr;
+ return ArrayRef<const uint8_t>();
}
- return oat_dex_file->GetOatFile()->GetVdexFile()->GetQuickenedInfoOf(
- dex_file, GetCodeItemOffset());
+ return oat_dex_file->GetOatFile()->GetVdexFile()->GetQuickenedInfoOf(dex_file,
+ GetDexMethodIndex());
}
const OatQuickMethodHeader* ArtMethod::GetOatQuickMethodHeader(uintptr_t pc) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index c4a586ed92..4501450e05 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
+#include "base/array_ref.h"
#include "base/bit_utils.h"
#include "base/casts.h"
#include "base/enums.h"
@@ -335,6 +336,10 @@ class ArtMethod FINAL {
AddAccessFlags(kAccMustCountLocks);
}
+ HiddenApiAccessFlags::ApiList GetHiddenApiAccessFlags() {
+ return HiddenApiAccessFlags::DecodeFromRuntime(GetAccessFlags());
+ }
+
// Returns true if this method could be overridden by a default method.
bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -662,7 +667,7 @@ class ArtMethod FINAL {
return hotness_count_;
}
- const uint8_t* GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
+ ArrayRef<const uint8_t> GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
// methods will return null for this method, as they are not oat based.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 3cf2b93690..2f7d6ab98f 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -73,7 +73,7 @@ ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
// Offset of field Thread::tlsPtr_.mterp_current_ibase.
#define THREAD_CURRENT_IBASE_OFFSET \
- (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 161) * __SIZEOF_POINTER__)
+ (THREAD_LOCAL_OBJECTS_OFFSET + __SIZEOF_SIZE_T__ + (1 + 162) * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_CURRENT_IBASE_OFFSET,
art::Thread::MterpCurrentIBaseOffset<POINTER_SIZE>().Int32Value())
// Offset of field Thread::tlsPtr_.mterp_default_ibase.
diff --git a/runtime/base/bit_string.h b/runtime/base/bit_string.h
index bfbe8eaf71..7d9fb70de7 100644
--- a/runtime/base/bit_string.h
+++ b/runtime/base/bit_string.h
@@ -114,13 +114,13 @@ inline std::ostream& operator<<(std::ostream& os, const BitStringChar& bc) {
/**
* BitString
*
- * lsb (least significant bit) msb
- * +------------+------------+------------+-----+------------+
- * | | | | | |
- * | Char0 | Char1 | Char2 | ... | CharN |
- * | | | | | |
- * +------------+------------+------------+-----+------------+
- * <- len[0] -> <- len[1] -> <- len[2] -> ... <- len[N] ->
+ * MSB (most significant bit) LSB
+ * +------------+-----+------------+------------+------------+
+ * | | | | | |
+ * | CharN | ... | Char2 | Char1 | Char0 |
+ * | | | | | |
+ * +------------+-----+------------+------------+------------+
+ * <- len[N] -> ... <- len[2] -> <- len[1] -> <- len[0] ->
*
* Stores up to "N+1" characters in a subset of a machine word. Each character has a different
* bitlength, as defined by len[pos]. This BitString can be nested inside of a BitStruct
@@ -145,7 +145,7 @@ struct BitString {
// As this is meant to be used only with "SubtypeCheckInfo",
// the bitlengths and the maximum string length is tuned by maximizing the coverage of "Assigned"
// bitstrings for instance-of and check-cast targets during Optimizing compilation.
- static constexpr size_t kBitSizeAtPosition[] = {12, 3, 8}; // len[] from header docs.
+ static constexpr size_t kBitSizeAtPosition[] = {12, 4, 11}; // len[] from header docs.
static constexpr size_t kCapacity = arraysize(kBitSizeAtPosition); // MaxBitstringLen above.
// How many bits are needed to represent BitString[0..position)?
@@ -165,8 +165,7 @@ struct BitString {
// (e.g. to use with BitField{Insert,Extract,Clear}.)
static constexpr size_t GetLsbForPosition(size_t position) {
DCHECK_GE(kCapacity, position);
- constexpr size_t kMaximumBitLength = GetBitLengthTotalAtPosition(kCapacity);
- return kMaximumBitLength - GetBitLengthTotalAtPosition(position + 1u);
+ return GetBitLengthTotalAtPosition(position);
}
// How many bits are needed for a BitStringChar at the position?
@@ -183,9 +182,7 @@ struct BitString {
BitStringChar operator[](size_t idx) const {
DCHECK_LT(idx, kCapacity);
- StorageType data =
- BitFieldExtract(storage_,
- GetLsbForPosition(idx), kBitSizeAtPosition[idx]);
+ StorageType data = BitFieldExtract(storage_, GetLsbForPosition(idx), kBitSizeAtPosition[idx]);
return BitStringChar(data, kBitSizeAtPosition[idx]);
}
@@ -259,17 +256,10 @@ struct BitString {
DCHECK_GE(kCapacity, end);
BitString copy = *this;
- size_t bit_size = 0;
- for (size_t idx = end; idx < kCapacity; ++idx) {
- bit_size += kBitSizeAtPosition[idx];
- }
- // TODO: precompute above table.
-
- if (bit_size > 0) {
- StorageType data =
- BitFieldClear(copy.storage_,
- GetLsbForPosition(kCapacity),
- bit_size);
+ if (end < kCapacity) {
+ size_t lsb = GetLsbForPosition(end);
+ size_t bit_size = GetLsbForPosition(kCapacity) - lsb;
+ StorageType data = BitFieldClear(copy.storage_, lsb, bit_size);
copy.storage_ = data;
}
diff --git a/runtime/base/bit_string_test.cc b/runtime/base/bit_string_test.cc
index 96aa154ef3..23274e3f2f 100644
--- a/runtime/base/bit_string_test.cc
+++ b/runtime/base/bit_string_test.cc
@@ -65,7 +65,7 @@ size_t AsUint(const T& value) {
return uint_value;
}
-// Make max bitstring, e.g. BitString[4095,7,255] for {12,3,8}
+// Make max bitstring, e.g. BitString[4095,15,2047] for {12,4,11}
template <size_t kCount = BitString::kCapacity>
BitString MakeBitStringMax() {
BitString bs{};
@@ -87,15 +87,14 @@ BitString SetBitStringCharAt(BitString bit_string, size_t i, size_t val) {
#define EXPECT_BITSTRING_STR(expected_str, actual_value) \
EXPECT_STREQ((expected_str), Stringify((actual_value)).c_str())
+// TODO: Consider removing this test, it's kind of replicating the logic in GetLsbForPosition().
TEST(InstanceOfBitString, GetLsbForPosition) {
ASSERT_LE(3u, BitString::kCapacity);
// Test will fail if kCapacity is not at least 3. Update it.
- EXPECT_EQ(0u, BitString::GetLsbForPosition(BitString::kCapacity - 1u));
- EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u],
- BitString::GetLsbForPosition(BitString::kCapacity - 2u));
- EXPECT_EQ(BitString::kBitSizeAtPosition[BitString::kCapacity - 1u] +
- BitString::kBitSizeAtPosition[BitString::kCapacity - 2u],
- BitString::GetLsbForPosition(BitString::kCapacity - 3u));
+ EXPECT_EQ(0u, BitString::GetLsbForPosition(0u));
+ EXPECT_EQ(BitString::kBitSizeAtPosition[0u], BitString::GetLsbForPosition(1u));
+ EXPECT_EQ(BitString::kBitSizeAtPosition[0u] + BitString::kBitSizeAtPosition[1u],
+ BitString::GetLsbForPosition(2u));
}
TEST(InstanceOfBitString, ToString) {
@@ -126,8 +125,8 @@ TEST(InstanceOfBitString, ReadWrite) {
// Each maximal value should be tested here for each position.
uint32_t max_bitstring_ints[] = {
MaxInt<uint32_t>(12),
- MaxInt<uint32_t>(3),
- MaxInt<uint32_t>(8),
+ MaxInt<uint32_t>(4),
+ MaxInt<uint32_t>(11),
};
// Update tests if changing the tuning values above.
@@ -151,14 +150,13 @@ constexpr auto MaxForPos() {
}
TEST(InstanceOfBitString, MemoryRepresentation) {
- // Verify that the lower positions are stored in more significant bits.
+ // Verify that the lower positions are stored in less significant bits.
BitString bs = MakeBitString({MaxForPos<0>(), MaxForPos<1>()});
BitString::StorageType as_int = static_cast<BitString::StorageType>(bs);
- // Below tests assumes the capacity is 3. Update if it this changes.
- ASSERT_EQ(3u, BitString::kCapacity);
- EXPECT_EQ(MaxForPos<0>() << (BitString::kBitSizeAtPosition[2] + BitString::kBitSizeAtPosition[1]) |
- (MaxForPos<1>() << BitString::kBitSizeAtPosition[2]),
+ // Below tests assumes the capacity is at least 3.
+ ASSERT_LE(3u, BitString::kCapacity);
+ EXPECT_EQ((MaxForPos<0>() << 0) | (MaxForPos<1>() << BitString::kBitSizeAtPosition[0]),
as_int);
}
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 34cddbff6a..d2a99f1a39 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -46,10 +46,14 @@ template<typename T>
constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
- static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
- "T too large, must be smaller than long long");
+ static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
+ static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t),
+ "Unsupported sizeof(T)");
DCHECK_NE(x, 0u);
- return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x);
+ constexpr bool is_64_bit = (sizeof(T) == sizeof(uint64_t));
+ constexpr size_t adjustment =
+ is_64_bit ? 0u : std::numeric_limits<uint32_t>::digits - std::numeric_limits<T>::digits;
+ return is_64_bit ? __builtin_clzll(x) : __builtin_clz(x) - adjustment;
}
// Similar to CLZ except that on zero input it returns bitwidth and supports signed integers.
@@ -65,10 +69,10 @@ constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
// that T is an unsigned type.
- static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
- "T too large, must be smaller than long long");
+ static_assert(sizeof(T) == sizeof(uint64_t) || sizeof(T) <= sizeof(uint32_t),
+ "Unsupported sizeof(T)");
DCHECK_NE(x, static_cast<T>(0));
- return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x);
+ return (sizeof(T) == sizeof(uint64_t)) ? __builtin_ctzll(x) : __builtin_ctz(x);
}
// Similar to CTZ except that on zero input it returns bitwidth and supports signed integers.
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 7077298ca9..d541b79a98 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -101,6 +101,7 @@ enum LockLevel {
kAllocatedThreadIdsLock,
kMonitorPoolLock,
kClassLinkerClassesLock, // TODO rename.
+ kDexToDexCompilerLock,
kJitCodeCacheLock,
kCHALock,
kSubtypeCheckLock,
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 877654247c..b61fb4afe9 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3286,7 +3286,15 @@ void ClassLinker::LoadField(const ClassDataItemIterator& it,
const uint32_t field_idx = it.GetMemberIndex();
dst->SetDexFieldIndex(field_idx);
dst->SetDeclaringClass(klass.Get());
- dst->SetAccessFlags(it.GetFieldAccessFlags());
+
+ // Get access flags from the DexFile. If this is a boot class path class,
+ // also set its runtime hidden API access flags.
+ uint32_t access_flags = it.GetFieldAccessFlags();
+ if (klass->IsBootStrapClassLoaded()) {
+ access_flags =
+ HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags());
+ }
+ dst->SetAccessFlags(access_flags);
}
void ClassLinker::LoadMethod(const DexFile& dex_file,
@@ -3302,8 +3310,15 @@ void ClassLinker::LoadMethod(const DexFile& dex_file,
dst->SetDeclaringClass(klass.Get());
dst->SetCodeItemOffset(it.GetMethodCodeItemOffset());
+ // Get access flags from the DexFile. If this is a boot class path class,
+ // also set its runtime hidden API access flags.
uint32_t access_flags = it.GetMethodAccessFlags();
+ if (klass->IsBootStrapClassLoaded()) {
+ access_flags =
+ HiddenApiAccessFlags::EncodeForRuntime(access_flags, it.DecodeHiddenAccessFlags());
+ }
+
if (UNLIKELY(strcmp("finalize", method_name) == 0)) {
// Set finalizable flag on declaring class.
if (strcmp("V", dex_file.GetShorty(method_id.proto_idx_)) == 0) {
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 3ec5335a80..e646520f3d 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -21,6 +21,7 @@
#include "base/stl_util.h"
#include "class_linker.h"
#include "class_loader_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "handle_scope-inl.h"
@@ -203,6 +204,7 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
// We may get resource-only apks which we cannot load.
// TODO(calin): Refine the dex opening interface to be able to tell if an archive contains
// no dex files. So that we can distinguish the real failures...
+ const ArtDexFileLoader dex_file_loader;
for (ClassLoaderInfo& info : class_loader_chain_) {
size_t opened_dex_files_index = info.opened_dex_files.size();
for (const std::string& cp_elem : info.classpath) {
@@ -215,12 +217,12 @@ bool ClassLoaderContext::OpenDexFiles(InstructionSet isa, const std::string& cla
std::string error_msg;
// When opening the dex files from the context we expect their checksum to match their
// contents. So pass true to verify_checksum.
- if (!DexFileLoader::Open(location.c_str(),
- location.c_str(),
- Runtime::Current()->IsVerificationEnabled(),
- /*verify_checksum*/ true,
- &error_msg,
- &info.opened_dex_files)) {
+ if (!dex_file_loader.Open(location.c_str(),
+ location.c_str(),
+ Runtime::Current()->IsVerificationEnabled(),
+ /*verify_checksum*/ true,
+ &error_msg,
+ &info.opened_dex_files)) {
// If we fail to open the dex file because it's been stripped, try to open the dex file
// from its corresponding oat file.
// This could happen when we need to recompile a pre-build whose dex code has been stripped.
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index bc726354a8..4689ae4c3f 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -278,14 +278,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFiles) {
VerifyOpenDexFiles(context.get(), 1, &all_dex_files1);
}
-static std::string CreateRelativeString(const std::string& in, const char* cwd) {
+// Creates a relative path from cwd to 'in'. Returns false if it cannot be done.
+// TODO We should somehow support this in all situations. b/72042237.
+static bool CreateRelativeString(const std::string& in, const char* cwd, std::string* out) {
int cwd_len = strlen(cwd);
if (!android::base::StartsWith(in, cwd) || (cwd_len < 1)) {
- LOG(FATAL) << in << " " << cwd;
+ return false;
}
bool contains_trailing_slash = (cwd[cwd_len - 1] == '/');
int start_position = cwd_len + (contains_trailing_slash ? 0 : 1);
- return in.substr(start_position);
+ *out = in.substr(start_position);
+ return true;
}
TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) {
@@ -293,9 +296,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesRelative) {
if (getcwd(cwd_buf, arraysize(cwd_buf)) == nullptr) {
PLOG(FATAL) << "Could not get working directory";
}
- std::string multidex_name = CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf);
- std::string myclass_dex_name = CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf);
- std::string dex_name = CreateRelativeString(GetTestDexFileName("Main"), cwd_buf);
+ std::string multidex_name;
+ std::string myclass_dex_name;
+ std::string dex_name;
+ if (!CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf, &multidex_name) ||
+ !CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf, &myclass_dex_name) ||
+ !CreateRelativeString(GetTestDexFileName("Main"), cwd_buf, &dex_name)) {
+ LOG(ERROR) << "Test OpenValidDexFilesRelative cannot be run because target dex files have no "
+ << "relative path.";
+ SUCCEED();
+ return;
+ }
std::unique_ptr<ClassLoaderContext> context =
@@ -321,10 +332,17 @@ TEST_F(ClassLoaderContextTest, OpenValidDexFilesClasspathDir) {
if (getcwd(cwd_buf, arraysize(cwd_buf)) == nullptr) {
PLOG(FATAL) << "Could not get working directory";
}
- std::string multidex_name = CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf);
- std::string myclass_dex_name = CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf);
- std::string dex_name = CreateRelativeString(GetTestDexFileName("Main"), cwd_buf);
-
+ std::string multidex_name;
+ std::string myclass_dex_name;
+ std::string dex_name;
+ if (!CreateRelativeString(GetTestDexFileName("MultiDex"), cwd_buf, &multidex_name) ||
+ !CreateRelativeString(GetTestDexFileName("MyClass"), cwd_buf, &myclass_dex_name) ||
+ !CreateRelativeString(GetTestDexFileName("Main"), cwd_buf, &dex_name)) {
+ LOG(ERROR) << "Test OpenValidDexFilesClasspathDir cannot be run because target dex files have "
+ << "no relative path.";
+ SUCCEED();
+ return;
+ }
std::unique_ptr<ClassLoaderContext> context =
ClassLoaderContext::Create(
"PCL[" + multidex_name + ":" + myclass_dex_name + "];" +
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index 96d660fd64..39dbebfdf2 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -35,6 +35,7 @@
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
#include "compiler_callbacks.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "gc/heap.h"
@@ -375,7 +376,8 @@ std::unique_ptr<const DexFile> CommonRuntimeTestImpl::LoadExpectSingleDexFile(
std::string error_msg;
MemMap::Init();
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
@@ -574,12 +576,13 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTestImpl::OpenTestDexFi
std::string filename = GetTestDexFileName(name);
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
+ const ArtDexFileLoader dex_file_loader;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = DexFileLoader::Open(filename.c_str(),
- filename.c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg, &dex_files);
+ bool success = dex_file_loader.Open(filename.c_str(),
+ filename.c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg, &dex_files);
CHECK(success) << "Failed to open '" << filename << "': " << error_msg;
for (auto& dex_file : dex_files) {
CHECK_EQ(PROT_READ, dex_file->GetPermissions());
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1c73240eea..0aed70a330 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -311,6 +311,12 @@ class CheckJniAbortCatcher {
printf("WARNING: TEST DISABLED FOR COMPACT DEX\n"); \
return; \
}
+
+#define TEST_DISABLED_FOR_HEAP_POISONING() \
+ if (kPoisonHeapReferences) { \
+ printf("WARNING: TEST DISABLED FOR HEAP POISONING\n"); \
+ return; \
+ }
} // namespace art
#endif // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/dex/art_dex_file_loader.cc b/runtime/dex/art_dex_file_loader.cc
new file mode 100644
index 0000000000..dee736ecff
--- /dev/null
+++ b/runtime/dex/art_dex_file_loader.cc
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_dex_file_loader.h"
+
+#include <sys/mman.h> // For the PROT_* and MAP_* constants.
+#include <sys/stat.h>
+
+#include "android-base/stringprintf.h"
+
+#include "base/file_magic.h"
+#include "base/stl_util.h"
+#include "base/systrace.h"
+#include "base/unix_file/fd_file.h"
+#include "compact_dex_file.h"
+#include "dex_file.h"
+#include "dex_file_verifier.h"
+#include "standard_dex_file.h"
+#include "zip_archive.h"
+
+namespace art {
+
+namespace {
+
+class MemMapContainer : public DexFileContainer {
+ public:
+ explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
+ virtual ~MemMapContainer() OVERRIDE { }
+
+ int GetPermissions() OVERRIDE {
+ if (mem_map_.get() == nullptr) {
+ return 0;
+ } else {
+ return mem_map_->GetProtect();
+ }
+ }
+
+ bool IsReadOnly() OVERRIDE {
+ return GetPermissions() == PROT_READ;
+ }
+
+ bool EnableWrite() OVERRIDE {
+ CHECK(IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ | PROT_WRITE);
+ }
+ }
+
+ bool DisableWrite() OVERRIDE {
+ CHECK(!IsReadOnly());
+ if (mem_map_.get() == nullptr) {
+ return false;
+ } else {
+ return mem_map_->Protect(PROT_READ);
+ }
+ }
+
+ private:
+ std::unique_ptr<MemMap> mem_map_;
+ DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
+};
+
+} // namespace
+
+using android::base::StringPrintf;
+
+static constexpr OatDexFile* kNoOatDexFile = nullptr;
+
+
+bool ArtDexFileLoader::GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd) const {
+ CHECK(checksums != nullptr);
+ uint32_t magic;
+
+ File fd;
+ if (zip_fd != -1) {
+ if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
+ fd = File(zip_fd, false /* check_usage */);
+ }
+ } else {
+ fd = OpenAndReadMagic(filename, &magic, error_msg);
+ }
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
+ if (zip_archive.get() == nullptr) {
+ *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
+ error_msg->c_str());
+ return false;
+ }
+
+ uint32_t i = 0;
+ std::string zip_entry_name = GetMultiDexClassesDexName(i++);
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ if (zip_entry.get() == nullptr) {
+ *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
+ zip_entry_name.c_str(), error_msg->c_str());
+ return false;
+ }
+
+ do {
+ checksums->push_back(zip_entry->GetCrc32());
+ zip_entry_name = GetMultiDexClassesDexName(i++);
+ zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
+ } while (zip_entry.get() != nullptr);
+ return true;
+ }
+ if (IsMagicValid(magic)) {
+ std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
+ filename,
+ /* verify */ false,
+ /* verify_checksum */ false,
+ /* mmap_shared */ false,
+ error_msg));
+ if (dex_file == nullptr) {
+ return false;
+ }
+ checksums->push_back(dex_file->GetHeader().checksum_);
+ return true;
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file from RAM ") + location);
+ return OpenCommon(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ verify,
+ verify_checksum,
+ error_msg,
+ /*container*/ nullptr,
+ /*verify_result*/ nullptr);
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
+ CHECK(map.get() != nullptr);
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ location_checksum,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
+ return dex_file;
+}
+
+bool ArtDexFileLoader::Open(const char* filename,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
+ uint32_t magic;
+ File fd = OpenAndReadMagic(filename, &magic, error_msg);
+ if (fd.Fd() == -1) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ if (IsZipMagic(magic)) {
+ return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
+ }
+ if (IsMagicValid(magic)) {
+ std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
+ location,
+ verify,
+ verify_checksum,
+ /* mmap_shared */ false,
+ error_msg));
+ if (dex_file.get() != nullptr) {
+ dex_files->push_back(std::move(dex_file));
+ return true;
+ } else {
+ return false;
+ }
+ }
+ *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+ return false;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenDex(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const {
+ ScopedTrace trace("Open dex file " + std::string(location));
+ return OpenFile(fd, location, verify, verify_checksum, mmap_shared, error_msg);
+}
+
+bool ArtDexFileLoader::OpenZip(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace("Dex file open Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
+ std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
+ if (zip_archive.get() == nullptr) {
+ DCHECK(!error_msg->empty());
+ return false;
+ }
+ return OpenAllDexFilesFromZip(
+ *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const {
+ ScopedTrace trace(std::string("Open dex file ") + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<MemMap> map;
+ {
+ File delayed_close(fd, /* check_usage */ false);
+ struct stat sbuf;
+ memset(&sbuf, 0, sizeof(sbuf));
+ if (fstat(fd, &sbuf) == -1) {
+ *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
+ strerror(errno));
+ return nullptr;
+ }
+ if (S_ISDIR(sbuf.st_mode)) {
+ *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
+ return nullptr;
+ }
+ size_t length = sbuf.st_size;
+ map.reset(MemMap::MapFile(length,
+ PROT_READ,
+ mmap_shared ? MAP_SHARED : MAP_PRIVATE,
+ fd,
+ 0,
+ /*low_4gb*/false,
+ location.c_str(),
+ error_msg));
+ if (map == nullptr) {
+ DCHECK(!error_msg->empty());
+ return nullptr;
+ }
+ }
+
+ if (map->Size() < sizeof(DexFile::Header)) {
+ *error_msg = StringPrintf(
+ "DexFile: failed to open dex file '%s' that is too short to have a header",
+ location.c_str());
+ return nullptr;
+ }
+
+ const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
+
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ dex_header->checksum_,
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ /*verify_result*/ nullptr);
+
+ return dex_file;
+}
+
+std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
+ const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const {
+ ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
+ CHECK(!location.empty());
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
+ if (zip_entry == nullptr) {
+ *error_code = ZipOpenErrorCode::kEntryNotFound;
+ return nullptr;
+ }
+ if (zip_entry->GetUncompressedLength() == 0) {
+ *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ return nullptr;
+ }
+
+ std::unique_ptr<MemMap> map;
+ if (zip_entry->IsUncompressed()) {
+ if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
+ // Do not mmap unaligned ZIP entries because
+ // doing so would fail dex verification which requires 4 byte alignment.
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
+ << "Falling back to extracting file.";
+ } else {
+ // Map uncompressed files within zip as file-backed to avoid a dirty copy.
+ map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
+ if (map == nullptr) {
+ LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
+ << "is your ZIP file corrupted? Falling back to extraction.";
+ // Try again with Extraction which still has a chance of recovery.
+ }
+ }
+ }
+
+ if (map == nullptr) {
+ // Default path for compressed ZIP entries,
+ // and fallback for stored ZIP entries.
+ map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
+ }
+
+ if (map == nullptr) {
+ *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
+ error_msg->c_str());
+ *error_code = ZipOpenErrorCode::kExtractToMemoryError;
+ return nullptr;
+ }
+ VerifyResult verify_result;
+ std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
+ map->Size(),
+ location,
+ zip_entry->GetCrc32(),
+ kNoOatDexFile,
+ verify,
+ verify_checksum,
+ error_msg,
+ new MemMapContainer(std::move(map)),
+ &verify_result);
+ if (dex_file == nullptr) {
+ if (verify_result == VerifyResult::kVerifyNotAttempted) {
+ *error_code = ZipOpenErrorCode::kDexFileError;
+ } else {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ }
+ return nullptr;
+ }
+ if (!dex_file->DisableWrite()) {
+ *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
+ *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
+ return nullptr;
+ }
+ CHECK(dex_file->IsReadOnly()) << location;
+ if (verify_result != VerifyResult::kVerifySucceeded) {
+ *error_code = ZipOpenErrorCode::kVerifyError;
+ return nullptr;
+ }
+ *error_code = ZipOpenErrorCode::kNoError;
+ return dex_file;
+}
+
+// Technically we do not have a limitation with respect to the number of dex files that can be in a
+// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
+// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
+// seems an excessive number.
+static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
+
+bool ArtDexFileLoader::OpenAllDexFilesFromZip(
+ const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ ScopedTrace trace("Dex file open from Zip " + std::string(location));
+ DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
+ ZipOpenErrorCode error_code;
+ std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
+ kClassesDex,
+ location,
+ verify,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (dex_file.get() == nullptr) {
+ return false;
+ } else {
+ // Had at least classes.dex.
+ dex_files->push_back(std::move(dex_file));
+
+ // Now try some more.
+
+ // We could try to avoid std::string allocations by working on a char array directly. As we
+ // do not expect a lot of iterations, this seems too involved and brittle.
+
+ for (size_t i = 1; ; ++i) {
+ std::string name = GetMultiDexClassesDexName(i);
+ std::string fake_location = GetMultiDexLocation(i, location.c_str());
+ std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
+ name.c_str(),
+ fake_location,
+ verify,
+ verify_checksum,
+ error_msg,
+ &error_code));
+ if (next_dex_file.get() == nullptr) {
+ if (error_code != ZipOpenErrorCode::kEntryNotFound) {
+ LOG(WARNING) << "Zip open failed: " << *error_msg;
+ }
+ break;
+ } else {
+ dex_files->push_back(std::move(next_dex_file));
+ }
+
+ if (i == kWarnOnManyDexFilesThreshold) {
+ LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
+ << " dex files. Please consider coalescing and shrinking the number to "
+ " avoid runtime overhead.";
+ }
+
+ if (i == std::numeric_limits<size_t>::max()) {
+ LOG(ERROR) << "Overflow in number of dex files!";
+ break;
+ }
+ }
+
+ return true;
+ }
+}
+
+} // namespace art
diff --git a/runtime/dex/art_dex_file_loader.h b/runtime/dex/art_dex_file_loader.h
new file mode 100644
index 0000000000..8c12bf3137
--- /dev/null
+++ b/runtime/dex/art_dex_file_loader.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+#define ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "dex_file_loader.h"
+#include "base/macros.h"
+
+namespace art {
+
+class DexFile;
+class DexFileContainer;
+class MemMap;
+class OatDexFile;
+class ZipArchive;
+
+// Class that is used to open dex files and deal with corresponding multidex and location logic.
+class ArtDexFileLoader : public DexFileLoader {
+ public:
+ virtual ~ArtDexFileLoader() { }
+
+ // Returns the checksums of a file for comparison with GetLocationChecksum().
+ // For .dex files, this is the single header checksum.
+ // For zip files, this is the zip entry CRC32 checksum for classes.dex and
+ // each additional multidex entry classes2.dex, classes3.dex, etc.
+ // If a valid zip_fd is provided the file content will be read directly from
+ // the descriptor and `filename` will be used as alias for error logging. If
+ // zip_fd is -1, the method will try to open the `filename` and read the
+ // content from it.
+ // Return true if the checksums could be found, false otherwise.
+ bool GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd = -1) const OVERRIDE;
+
+ // Opens .dex file, backed by existing memory
+ std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const OVERRIDE;
+
+ // Opens .dex file that has been memory-mapped by the caller.
+ std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const OVERRIDE;
+
+ // Opens all .dex files found in the file, guessing the container format based on file extension.
+ bool Open(const char* filename,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const OVERRIDE;
+
+ // Open a single dex file from an fd. This function closes the fd.
+ std::unique_ptr<const DexFile> OpenDex(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const OVERRIDE;
+
+ // Opens dex files from within a .jar, .zip, or .apk file
+ bool OpenZip(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const OVERRIDE;
+
+ private:
+ std::unique_ptr<const DexFile> OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const OVERRIDE;
+
+ // Open all classesXXX.dex files from a zip archive.
+ bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files)
+ const OVERRIDE;
+
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
+ // return.
+ std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const OVERRIDE;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_ART_DEX_FILE_LOADER_H_
diff --git a/runtime/dex/code_item_accessors-inl.h b/runtime/dex/code_item_accessors-inl.h
index 2fdf262b7d..63fd120991 100644
--- a/runtime/dex/code_item_accessors-inl.h
+++ b/runtime/dex/code_item_accessors-inl.h
@@ -28,21 +28,15 @@
namespace art {
inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(ArtMethod* method)
- : CodeItemInstructionAccessor(method->GetDexFile(), method->GetCodeItem()) {}
+ : CodeItemInstructionAccessor(*method->GetDexFile(), method->GetCodeItem()) {}
inline CodeItemDataAccessor::CodeItemDataAccessor(ArtMethod* method)
- : CodeItemDataAccessor(method->GetDexFile(), method->GetCodeItem()) {}
+ : CodeItemDataAccessor(*method->GetDexFile(), method->GetCodeItem()) {}
inline CodeItemDebugInfoAccessor::CodeItemDebugInfoAccessor(ArtMethod* method)
- : CodeItemDebugInfoAccessor(method->GetDexFile(), method->GetCodeItem()) {}
-
-inline CodeItemDebugInfoAccessor::CodeItemDebugInfoAccessor(const DexFile* dex_file,
- const DexFile::CodeItem* code_item) {
- if (code_item == nullptr) {
- return;
- }
- Init(dex_file, code_item, OatFile::GetDebugInfoOffset(*dex_file, code_item->debug_info_off_));
-}
+ : CodeItemDebugInfoAccessor(*method->GetDexFile(),
+ method->GetCodeItem(),
+ method->GetDexMethodIndex()) {}
} // namespace art
diff --git a/runtime/dex/code_item_accessors-no_art-inl.h b/runtime/dex/code_item_accessors-no_art-inl.h
index 016923d035..6a99009b00 100644
--- a/runtime/dex/code_item_accessors-no_art-inl.h
+++ b/runtime/dex/code_item_accessors-no_art-inl.h
@@ -26,32 +26,42 @@
// The no ART version is used by binaries that don't include the whole runtime.
namespace art {
+inline void CodeItemInstructionAccessor::Init(uint32_t insns_size_in_code_units,
+ const uint16_t* insns) {
+ insns_size_in_code_units_ = insns_size_in_code_units;
+ insns_ = insns;
+}
+
inline void CodeItemInstructionAccessor::Init(const CompactDexFile::CodeItem& code_item) {
- insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
- insns_ = code_item.insns_;
+ uint32_t insns_size_in_code_units;
+ code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ true>(
+ &insns_size_in_code_units,
+ /*registers_size*/ nullptr,
+ /*ins_size*/ nullptr,
+ /*outs_size*/ nullptr,
+ /*tries_size*/ nullptr);
+ Init(insns_size_in_code_units, code_item.insns_);
}
inline void CodeItemInstructionAccessor::Init(const StandardDexFile::CodeItem& code_item) {
- insns_size_in_code_units_ = code_item.insns_size_in_code_units_;
- insns_ = code_item.insns_;
+ Init(code_item.insns_size_in_code_units_, code_item.insns_);
}
-inline void CodeItemInstructionAccessor::Init(const DexFile* dex_file,
+inline void CodeItemInstructionAccessor::Init(const DexFile& dex_file,
const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
- DCHECK(dex_file->HasAddress(code_item));
- DCHECK(dex_file != nullptr);
- if (dex_file->IsCompactDexFile()) {
+ DCHECK(dex_file.HasAddress(code_item));
+ if (dex_file.IsCompactDexFile()) {
Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
} else {
- DCHECK(dex_file->IsStandardDexFile());
+ DCHECK(dex_file.IsStandardDexFile());
Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
}
}
}
inline CodeItemInstructionAccessor::CodeItemInstructionAccessor(
- const DexFile* dex_file,
+ const DexFile& dex_file,
const DexFile::CodeItem* code_item) {
Init(dex_file, code_item);
}
@@ -73,11 +83,13 @@ inline IterationRange<DexInstructionIterator> CodeItemInstructionAccessor::Instr
}
inline void CodeItemDataAccessor::Init(const CompactDexFile::CodeItem& code_item) {
- CodeItemInstructionAccessor::Init(code_item);
- registers_size_ = code_item.registers_size_;
- ins_size_ = code_item.ins_size_;
- outs_size_ = code_item.outs_size_;
- tries_size_ = code_item.tries_size_;
+ uint32_t insns_size_in_code_units;
+ code_item.DecodeFields</*kDecodeOnlyInstructionCount*/ false>(&insns_size_in_code_units,
+ &registers_size_,
+ &ins_size_,
+ &outs_size_,
+ &tries_size_);
+ CodeItemInstructionAccessor::Init(insns_size_in_code_units, code_item.insns_);
}
inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_item) {
@@ -88,20 +100,19 @@ inline void CodeItemDataAccessor::Init(const StandardDexFile::CodeItem& code_ite
tries_size_ = code_item.tries_size_;
}
-inline void CodeItemDataAccessor::Init(const DexFile* dex_file,
+inline void CodeItemDataAccessor::Init(const DexFile& dex_file,
const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
- DCHECK(dex_file != nullptr);
- if (dex_file->IsCompactDexFile()) {
+ if (dex_file.IsCompactDexFile()) {
CodeItemDataAccessor::Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
} else {
- DCHECK(dex_file->IsStandardDexFile());
+ DCHECK(dex_file.IsStandardDexFile());
CodeItemDataAccessor::Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
}
}
}
-inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile* dex_file,
+inline CodeItemDataAccessor::CodeItemDataAccessor(const DexFile& dex_file,
const DexFile::CodeItem* code_item) {
Init(dex_file, code_item);
}
@@ -125,24 +136,51 @@ inline const DexFile::TryItem* CodeItemDataAccessor::FindTryItem(uint32_t try_de
return index != -1 ? &try_items.begin()[index] : nullptr;
}
-inline void CodeItemDebugInfoAccessor::Init(const DexFile* dex_file,
+inline const void* CodeItemDataAccessor::CodeItemDataEnd() const {
+ const uint8_t* handler_data = GetCatchHandlerData();
+
+ if (TriesSize() == 0 || handler_data == nullptr) {
+ return &end().Inst();
+ }
+ // Get the start of the handler data.
+ const uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
+ // Manually read each handler.
+ for (uint32_t i = 0; i < handlers_size; ++i) {
+ int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
+ if (uleb128_count <= 0) {
+ uleb128_count = -uleb128_count + 1;
+ }
+ for (int32_t j = 0; j < uleb128_count; ++j) {
+ DecodeUnsignedLeb128(&handler_data);
+ }
+ }
+ return reinterpret_cast<const void*>(handler_data);
+}
+
+inline void CodeItemDebugInfoAccessor::Init(const DexFile& dex_file,
const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset) {
- dex_file_ = dex_file;
- debug_info_offset_ = debug_info_offset;
- if (dex_file->IsCompactDexFile()) {
- Init(down_cast<const CompactDexFile::CodeItem&>(*code_item));
+ uint32_t dex_method_index) {
+ if (code_item == nullptr) {
+ return;
+ }
+ dex_file_ = &dex_file;
+ if (dex_file.IsCompactDexFile()) {
+ Init(down_cast<const CompactDexFile::CodeItem&>(*code_item), dex_method_index);
} else {
- DCHECK(dex_file->IsStandardDexFile());
+ DCHECK(dex_file.IsStandardDexFile());
Init(down_cast<const StandardDexFile::CodeItem&>(*code_item));
}
}
-inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item) {
+inline void CodeItemDebugInfoAccessor::Init(const CompactDexFile::CodeItem& code_item,
+ uint32_t dex_method_index) {
+ debug_info_offset_ = down_cast<const CompactDexFile*>(dex_file_)->GetDebugInfoOffset(
+ dex_method_index);
CodeItemDataAccessor::Init(code_item);
}
inline void CodeItemDebugInfoAccessor::Init(const StandardDexFile::CodeItem& code_item) {
+ debug_info_offset_ = code_item.debug_info_off_;
CodeItemDataAccessor::Init(code_item);
}
diff --git a/runtime/dex/code_item_accessors.h b/runtime/dex/code_item_accessors.h
index 65cc0bf996..08f823cae8 100644
--- a/runtime/dex/code_item_accessors.h
+++ b/runtime/dex/code_item_accessors.h
@@ -33,7 +33,7 @@ class ArtMethod;
// StandardDexFile.
class CodeItemInstructionAccessor {
public:
- ALWAYS_INLINE CodeItemInstructionAccessor(const DexFile* dex_file,
+ ALWAYS_INLINE CodeItemInstructionAccessor(const DexFile& dex_file,
const DexFile::CodeItem* code_item);
ALWAYS_INLINE explicit CodeItemInstructionAccessor(ArtMethod* method);
@@ -66,9 +66,10 @@ class CodeItemInstructionAccessor {
protected:
CodeItemInstructionAccessor() = default;
+ ALWAYS_INLINE void Init(uint32_t insns_size_in_code_units, const uint16_t* insns);
ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
private:
// size of the insns array, in 2 byte code units. 0 if there is no code item.
@@ -82,7 +83,7 @@ class CodeItemInstructionAccessor {
// StandardDexFile.
class CodeItemDataAccessor : public CodeItemInstructionAccessor {
public:
- ALWAYS_INLINE CodeItemDataAccessor(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE CodeItemDataAccessor(const DexFile& dex_file, const DexFile::CodeItem* code_item);
ALWAYS_INLINE explicit CodeItemDataAccessor(ArtMethod* method);
@@ -108,12 +109,14 @@ class CodeItemDataAccessor : public CodeItemInstructionAccessor {
const DexFile::TryItem* FindTryItem(uint32_t try_dex_pc) const;
+ inline const void* CodeItemDataEnd() const;
+
protected:
CodeItemDataAccessor() = default;
ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
- ALWAYS_INLINE void Init(const DexFile* dex_file, const DexFile::CodeItem* code_item);
+ ALWAYS_INLINE void Init(const DexFile& dex_file, const DexFile::CodeItem* code_item);
private:
// Fields mirrored from the dex/cdex code item.
@@ -129,20 +132,16 @@ class CodeItemDebugInfoAccessor : public CodeItemDataAccessor {
public:
CodeItemDebugInfoAccessor() = default;
- // Handles null code items, but not null dex files.
- ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile* dex_file,
- const DexFile::CodeItem* code_item);
-
// Initialize with an existing offset.
- ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile* dex_file,
+ ALWAYS_INLINE CodeItemDebugInfoAccessor(const DexFile& dex_file,
const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset) {
- Init(dex_file, code_item, debug_info_offset);
+ uint32_t dex_method_index) {
+ Init(dex_file, code_item, dex_method_index);
}
- ALWAYS_INLINE void Init(const DexFile* dex_file,
+ ALWAYS_INLINE void Init(const DexFile& dex_file,
const DexFile::CodeItem* code_item,
- uint32_t debug_info_offset);
+ uint32_t dex_method_index);
ALWAYS_INLINE explicit CodeItemDebugInfoAccessor(ArtMethod* method);
@@ -157,7 +156,7 @@ class CodeItemDebugInfoAccessor : public CodeItemDataAccessor {
void* context) const;
protected:
- ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item);
+ ALWAYS_INLINE void Init(const CompactDexFile::CodeItem& code_item, uint32_t dex_method_index);
ALWAYS_INLINE void Init(const StandardDexFile::CodeItem& code_item);
private:
diff --git a/runtime/dex/code_item_accessors_test.cc b/runtime/dex/code_item_accessors_test.cc
index 57a5573d8d..3380be8acf 100644
--- a/runtime/dex/code_item_accessors_test.cc
+++ b/runtime/dex/code_item_accessors_test.cc
@@ -19,6 +19,7 @@
#include <memory>
#include "common_runtime_test.h"
+#include "art_dex_file_loader.h"
#include "dex_file_loader.h"
#include "mem_map.h"
@@ -44,13 +45,13 @@ std::unique_ptr<const DexFile> CreateFakeDex(bool compact_dex) {
StandardDexFile::WriteMagic(map->Begin());
StandardDexFile::WriteCurrentVersion(map->Begin());
}
- std::unique_ptr<const DexFile> dex(
- DexFileLoader::Open("location",
- /*location_checksum*/ 123,
- std::move(map),
- /*verify*/false,
- /*verify_checksum*/false,
- &error_msg));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open("location",
+ /*location_checksum*/ 123,
+ std::move(map),
+ /*verify*/false,
+ /*verify_checksum*/false,
+ &error_msg));
CHECK(dex != nullptr) << error_msg;
return dex;
}
@@ -61,8 +62,8 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
ASSERT_TRUE(standard_dex != nullptr);
std::unique_ptr<const DexFile> compact_dex(CreateFakeDex(/*compact_dex*/true));
ASSERT_TRUE(compact_dex != nullptr);
- static constexpr uint16_t kRegisterSize = 1;
- static constexpr uint16_t kInsSize = 2;
+ static constexpr uint16_t kRegisterSize = 2;
+ static constexpr uint16_t kInsSize = 1;
static constexpr uint16_t kOutsSize = 3;
static constexpr uint16_t kTriesSize = 4;
// debug_info_off_ is not accessible from the helpers yet.
@@ -71,12 +72,12 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
auto verify_code_item = [&](const DexFile* dex,
const DexFile::CodeItem* item,
const uint16_t* insns) {
- CodeItemInstructionAccessor insns_accessor(dex, item);
+ CodeItemInstructionAccessor insns_accessor(*dex, item);
EXPECT_TRUE(insns_accessor.HasCodeItem());
ASSERT_EQ(insns_accessor.InsnsSizeInCodeUnits(), kInsnsSizeInCodeUnits);
EXPECT_EQ(insns_accessor.Insns(), insns);
- CodeItemDataAccessor data_accessor(dex, item);
+ CodeItemDataAccessor data_accessor(*dex, item);
EXPECT_TRUE(data_accessor.HasCodeItem());
EXPECT_EQ(data_accessor.InsnsSizeInCodeUnits(), kInsnsSizeInCodeUnits);
EXPECT_EQ(data_accessor.Insns(), insns);
@@ -96,12 +97,16 @@ TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor) {
verify_code_item(standard_dex.get(), dex_code_item, dex_code_item->insns_);
CompactDexFile::CodeItem* cdex_code_item =
- reinterpret_cast<CompactDexFile::CodeItem*>(const_cast<uint8_t*>(compact_dex->Begin()));
- cdex_code_item->registers_size_ = kRegisterSize;
- cdex_code_item->ins_size_ = kInsSize;
- cdex_code_item->outs_size_ = kOutsSize;
- cdex_code_item->tries_size_ = kTriesSize;
- cdex_code_item->insns_size_in_code_units_ = kInsnsSizeInCodeUnits;
+ reinterpret_cast<CompactDexFile::CodeItem*>(const_cast<uint8_t*>(compact_dex->Begin() +
+ CompactDexFile::CodeItem::kMaxPreHeaderSize * sizeof(uint16_t)));
+ std::vector<uint16_t> preheader;
+ cdex_code_item->Create(kRegisterSize,
+ kInsSize,
+ kOutsSize,
+ kTriesSize,
+ kInsnsSizeInCodeUnits,
+ cdex_code_item->GetPreHeader());
+
verify_code_item(compact_dex.get(), cdex_code_item, cdex_code_item->insns_);
}
diff --git a/runtime/dex/compact_dex_debug_info.cc b/runtime/dex/compact_dex_debug_info.cc
new file mode 100644
index 0000000000..19495ca92c
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compact_dex_debug_info.h"
+
+#include "compact_dex_utils.h"
+#include "leb128.h"
+
+namespace art {
+
+constexpr size_t CompactDexDebugInfoOffsetTable::kElementsPerIndex;
+
+CompactDexDebugInfoOffsetTable::Accessor::Accessor(const uint8_t* data_begin,
+ uint32_t debug_info_base,
+ uint32_t debug_info_table_offset)
+ : table_(reinterpret_cast<const uint32_t*>(data_begin + debug_info_table_offset)),
+ debug_info_base_(debug_info_base),
+ data_begin_(data_begin) {}
+
+uint32_t CompactDexDebugInfoOffsetTable::Accessor::GetDebugInfoOffset(uint32_t method_idx) const {
+ const uint32_t offset = table_[method_idx / kElementsPerIndex];
+ const size_t bit_index = method_idx % kElementsPerIndex;
+
+ const uint8_t* block = data_begin_ + offset;
+ uint16_t bit_mask = *block;
+ ++block;
+ bit_mask = (bit_mask << kBitsPerByte) | *block;
+ ++block;
+ if ((bit_mask & (1 << bit_index)) == 0) {
+ // Bit is not set means the offset is 0 for the debug info.
+ return 0u;
+ }
+ // Trim off the bits above the index we want and count how many bits are set. This is how many
+ // lebs we need to decode.
+ size_t count = POPCOUNT(static_cast<uintptr_t>(bit_mask) << (kBitsPerIntPtrT - 1 - bit_index));
+ DCHECK_GT(count, 0u);
+ uint32_t current_offset = debug_info_base_;
+ do {
+ current_offset += DecodeUnsignedLeb128(&block);
+ --count;
+ } while (count > 0);
+ return current_offset;
+}
+
+void CompactDexDebugInfoOffsetTable::Build(const std::vector<uint32_t>& debug_info_offsets,
+ std::vector<uint8_t>* out_data,
+ uint32_t* out_min_offset,
+ uint32_t* out_table_offset) {
+ DCHECK(out_data != nullptr);
+ DCHECK(out_data->empty());
+ // Calculate the base offset and return it.
+ *out_min_offset = std::numeric_limits<uint32_t>::max();
+ for (const uint32_t offset : debug_info_offsets) {
+ if (offset != 0u) {
+ *out_min_offset = std::min(*out_min_offset, offset);
+ }
+ }
+ // Write the leb blocks and store the important offsets (each kElementsPerIndex elements).
+ size_t block_start = 0;
+
+ std::vector<uint32_t> offset_table;
+
+ // Write data first then the table.
+ while (block_start < debug_info_offsets.size()) {
+ // Write the offset of the block for each block.
+ offset_table.push_back(out_data->size());
+
+ // Block size of up to kElementsPerIndex
+ const size_t block_size = std::min(debug_info_offsets.size() - block_start, kElementsPerIndex);
+
+ // Calculate bit mask since need to write that first.
+ uint16_t bit_mask = 0u;
+ for (size_t i = 0; i < block_size; ++i) {
+ if (debug_info_offsets[block_start + i] != 0u) {
+ bit_mask |= 1 << i;
+ }
+ }
+ // Write bit mask.
+ out_data->push_back(static_cast<uint8_t>(bit_mask >> kBitsPerByte));
+ out_data->push_back(static_cast<uint8_t>(bit_mask));
+
+ // Write debug info offsets relative to the current offset.
+ uint32_t current_offset = *out_min_offset;
+ for (size_t i = 0; i < block_size; ++i) {
+ const uint32_t debug_info_offset = debug_info_offsets[block_start + i];
+ if (debug_info_offset != 0u) {
+ uint32_t delta = debug_info_offset - current_offset;
+ EncodeUnsignedLeb128(out_data, delta);
+ current_offset = debug_info_offset;
+ }
+ }
+
+ block_start += block_size;
+ }
+
+ // Write the offset table.
+ AlignmentPadVector(out_data, alignof(uint32_t));
+ *out_table_offset = out_data->size();
+ out_data->insert(out_data->end(),
+ reinterpret_cast<const uint8_t*>(&offset_table[0]),
+ reinterpret_cast<const uint8_t*>(&offset_table[0] + offset_table.size()));
+}
+
+} // namespace art
diff --git a/runtime/dex/compact_dex_debug_info.h b/runtime/dex/compact_dex_debug_info.h
new file mode 100644
index 0000000000..1aff75879e
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
+#define ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
+
+#include <cstdint>
+#include <vector>
+
+namespace art {
+
+// Debug offset table for compact dex, aims to minimize size while still providing reasonable
+// speed (10-20ns average time per lookup on host).
+class CompactDexDebugInfoOffsetTable {
+ public:
+ // This value is coupled with the leb chunk bitmask. That logic must also be adjusted when the
+ // integer is modified.
+ static constexpr size_t kElementsPerIndex = 16;
+
+ // Leb block format:
+ // [uint16_t] 16 bit mask for what method ids actually have a debug info offset for the chunk.
+ // [lebs] Up to 16 lebs encoded using leb128, one leb bit. The leb specifies how the offset
+ // changes compared to the previous index.
+
+ class Accessor {
+ public:
+ Accessor(const uint8_t* data_begin,
+ uint32_t debug_info_base,
+ uint32_t debug_info_table_offset);
+
+ // Return the debug info for a method index (or 0 if it doesn't have one).
+ uint32_t GetDebugInfoOffset(uint32_t method_idx) const;
+
+ private:
+ const uint32_t* const table_;
+ const uint32_t debug_info_base_;
+ const uint8_t* const data_begin_;
+ };
+
+ // Returned offsets are all relative to debug_info_offsets.
+ static void Build(const std::vector<uint32_t>& debug_info_offsets,
+ std::vector<uint8_t>* out_data,
+ uint32_t* out_min_offset,
+ uint32_t* out_table_offset);
+
+ // 32 bit aligned for the offset table.
+ static constexpr size_t kAlignment = sizeof(uint32_t);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_COMPACT_DEX_DEBUG_INFO_H_
diff --git a/runtime/dex/compact_dex_debug_info_test.cc b/runtime/dex/compact_dex_debug_info_test.cc
new file mode 100644
index 0000000000..02b95e68d7
--- /dev/null
+++ b/runtime/dex/compact_dex_debug_info_test.cc
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+#include "dex/compact_dex_debug_info.h"
+#include "gtest/gtest.h"
+#include "mem_map.h"
+
+namespace art {
+
+TEST(CompactDexDebugInfoTest, TestBuildAndAccess) {
+ MemMap::Init();
+
+ const size_t kDebugInfoMinOffset = 1234567;
+ std::vector<uint32_t> offsets = {
+ 0, 17, 2, 3, 11, 0, 0, 0, 0, 1, 0, 1552, 100, 122, 44, 1234567, 0, 0,
+ std::numeric_limits<uint32_t>::max() - kDebugInfoMinOffset, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12,
+ };
+ // Add some large offset since the debug info section will never be that close to the beginning
+ // of the file.
+ for (uint32_t& offset : offsets) {
+ if (offset != 0u) {
+ offset += kDebugInfoMinOffset;
+ }
+ }
+
+ std::vector<uint8_t> data;
+ uint32_t base_offset = 0;
+ uint32_t table_offset = 0;
+ CompactDexDebugInfoOffsetTable::Build(offsets,
+ /*out*/ &data,
+ /*out*/ &base_offset,
+ /*out*/ &table_offset);
+ EXPECT_GE(base_offset, kDebugInfoMinOffset);
+ EXPECT_LT(table_offset, data.size());
+ ASSERT_GT(data.size(), 0u);
+ const size_t before_size = offsets.size() * sizeof(offsets.front());
+ EXPECT_LT(data.size(), before_size);
+
+ // Note that the accessor requires the data to be aligned. Use memmap to accomplish this.
+ std::string error_msg;
+ // Leave some extra room since we don't copy the table at the start (for testing).
+ constexpr size_t kExtraOffset = 4 * 128;
+ std::unique_ptr<MemMap> fake_dex(MemMap::MapAnonymous("fake dex",
+ nullptr,
+ data.size() + kExtraOffset,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error_msg));
+ ASSERT_TRUE(fake_dex != nullptr) << error_msg;
+ std::copy(data.begin(), data.end(), fake_dex->Begin() + kExtraOffset);
+
+ CompactDexDebugInfoOffsetTable::Accessor accessor(fake_dex->Begin() + kExtraOffset,
+ base_offset,
+ table_offset);
+ for (size_t i = 0; i < offsets.size(); ++i) {
+ EXPECT_EQ(offsets[i], accessor.GetDebugInfoOffset(i));
+ }
+
+ // Sort to produce a try and produce a smaller table. This happens because the leb diff is smaller
+ // for sorted increasing order.
+ std::sort(offsets.begin(), offsets.end());
+ std::vector<uint8_t> sorted_data;
+ CompactDexDebugInfoOffsetTable::Build(offsets,
+ /*out*/ &sorted_data,
+ /*out*/ &base_offset,
+ /*out*/ &table_offset);
+ EXPECT_LT(sorted_data.size(), data.size());
+ {
+ ScopedLogSeverity sls(LogSeverity::INFO);
+ LOG(INFO) << "raw size " << before_size
+ << " table size " << data.size()
+ << " sorted table size " << sorted_data.size();
+ }
+}
+
+} // namespace art
diff --git a/runtime/dex/compact_dex_file.cc b/runtime/dex/compact_dex_file.cc
index 8f90e098bb..ff193ffb07 100644
--- a/runtime/dex/compact_dex_file.cc
+++ b/runtime/dex/compact_dex_file.cc
@@ -16,6 +16,7 @@
#include "compact_dex_file.h"
+#include "code_item_accessors-no_art-inl.h"
#include "dex_file-inl.h"
#include "leb128.h"
@@ -58,33 +59,25 @@ uint32_t CompactDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
// TODO: Clean up this temporary code duplication with StandardDexFile. Eventually the
// implementations will differ.
DCHECK(HasAddress(&item));
- const CodeItem& code_item = down_cast<const CodeItem&>(item);
- uintptr_t code_item_start = reinterpret_cast<uintptr_t>(&code_item);
- uint32_t insns_size = code_item.insns_size_in_code_units_;
- uint32_t tries_size = code_item.tries_size_;
- const uint8_t* handler_data = GetCatchHandlerData(
- DexInstructionIterator(code_item.insns_, code_item.insns_size_in_code_units_),
- code_item.tries_size_,
- 0);
-
- if (tries_size == 0 || handler_data == nullptr) {
- uintptr_t insns_end = reinterpret_cast<uintptr_t>(&code_item.insns_[insns_size]);
- return insns_end - code_item_start;
- } else {
- // Get the start of the handler data.
- uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
- // Manually read each handler.
- for (uint32_t i = 0; i < handlers_size; ++i) {
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t j = 0; j < uleb128_count; ++j) {
- DecodeUnsignedLeb128(&handler_data);
- }
- }
- return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
- }
+ return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
+ reinterpret_cast<uintptr_t>(&item);
}
+CompactDexFile::CompactDexFile(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ DexFileContainer* container)
+ : DexFile(base,
+ size,
+ location,
+ location_checksum,
+ oat_dex_file,
+ container,
+ /*is_compact_dex*/ true),
+ debug_info_offsets_(Begin() + GetHeader().debug_info_offsets_pos_,
+ GetHeader().debug_info_base_,
+ GetHeader().debug_info_offsets_table_offset_) {}
+
} // namespace art
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 280c6f70cc..8dad84d5cd 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "dex_file.h"
+#include "dex/compact_dex_debug_info.h"
namespace art {
@@ -41,13 +42,180 @@ class CompactDexFile : public DexFile {
private:
uint32_t feature_flags_ = 0u;
+ // Position in the compact dex file for the debug info table data starts.
+ uint32_t debug_info_offsets_pos_ = 0u;
+
+ // Offset into the debug info table data where the lookup table is.
+ uint32_t debug_info_offsets_table_offset_ = 0u;
+
+ // Base offset of where debug info starts in the dex file.
+ uint32_t debug_info_base_ = 0u;
+
+ friend class CompactDexFile;
friend class CompactDexWriter;
};
+ // Like the standard code item except without a debug info offset. Each code item may have a
+ // preheader to encode large methods. In 99% of cases, the preheader is not used. This enables
+ // smaller size with a good fast path case in the accessors.
struct CodeItem : public DexFile::CodeItem {
+ static constexpr size_t kAlignment = sizeof(uint16_t);
+ // Max preheader size in uint16_ts.
+ static constexpr size_t kMaxPreHeaderSize = 6;
+
private:
- // TODO: Insert compact dex specific fields here.
+ CodeItem() = default;
+
+ static constexpr size_t kRegistersSizeShift = 12;
+ static constexpr size_t kInsSizeShift = 8;
+ static constexpr size_t kOutsSizeShift = 4;
+ static constexpr size_t kTriesSizeSizeShift = 0;
+ static constexpr uint16_t kFlagPreHeaderRegisterSize = 0x1 << 0;
+ static constexpr uint16_t kFlagPreHeaderInsSize = 0x1 << 1;
+ static constexpr uint16_t kFlagPreHeaderOutsSize = 0x1 << 2;
+ static constexpr uint16_t kFlagPreHeaderTriesSize = 0x1 << 3;
+ static constexpr uint16_t kFlagPreHeaderInsnsSize = 0x1 << 4;
+ static constexpr size_t kInsnsSizeShift = 5;
+ static constexpr size_t kInsnsSizeBits = sizeof(uint16_t) * kBitsPerByte - kInsnsSizeShift;
+
+ // Combined preheader flags for fast testing if we need to go slow path.
+ static constexpr uint16_t kFlagPreHeaderCombined =
+ kFlagPreHeaderRegisterSize |
+ kFlagPreHeaderInsSize |
+ kFlagPreHeaderOutsSize |
+ kFlagPreHeaderTriesSize |
+ kFlagPreHeaderInsnsSize;
+
+ // Create a code item and associated preheader if required based on field values.
+ // Returns the start of the preheader. The preheader buffer must be at least as large as
+ // kMaxPreHeaderSize;
+ uint16_t* Create(uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t outs_size,
+ uint16_t tries_size,
+ uint32_t insns_size_in_code_units,
+ uint16_t* out_preheader) {
+ // Dex verification ensures that registers size > ins_size, so we can subtract the registers
+ // size accordingly to reduce how often we need to use the preheader.
+ DCHECK_GE(registers_size, ins_size);
+ registers_size -= ins_size;
+ fields_ = (registers_size & 0xF) << kRegistersSizeShift;
+ fields_ |= (ins_size & 0xF) << kInsSizeShift;
+ fields_ |= (outs_size & 0xF) << kOutsSizeShift;
+ fields_ |= (tries_size & 0xF) << kTriesSizeSizeShift;
+ registers_size &= ~0xF;
+ ins_size &= ~0xF;
+ outs_size &= ~0xF;
+ tries_size &= ~0xF;
+ insns_count_and_flags_ = 0;
+ const size_t masked_count = insns_size_in_code_units & ((1 << kInsnsSizeBits) - 1);
+ insns_count_and_flags_ |= masked_count << kInsnsSizeShift;
+ insns_size_in_code_units -= masked_count;
+
+ // Since the preheader case is rare (1% of code items), use a suboptimally large but fast
+ // decoding format.
+ if (insns_size_in_code_units != 0) {
+ insns_count_and_flags_ |= kFlagPreHeaderInsnsSize;
+ --out_preheader;
+ *out_preheader = static_cast<uint16_t>(insns_size_in_code_units);
+ --out_preheader;
+ *out_preheader = static_cast<uint16_t>(insns_size_in_code_units >> 16);
+ }
+ auto preheader_encode = [&](uint16_t size, uint16_t flag) {
+ if (size != 0) {
+ insns_count_and_flags_ |= flag;
+ --out_preheader;
+ *out_preheader = size;
+ }
+ };
+ preheader_encode(registers_size, kFlagPreHeaderRegisterSize);
+ preheader_encode(ins_size, kFlagPreHeaderInsSize);
+ preheader_encode(outs_size, kFlagPreHeaderOutsSize);
+ preheader_encode(tries_size, kFlagPreHeaderTriesSize);
+ return out_preheader;
+ }
+
+ ALWAYS_INLINE bool HasPreHeader(uint16_t flag) const {
+ return (insns_count_and_flags_ & flag) != 0;
+ }
+
+ // Return true if the code item has any preheaders.
+ ALWAYS_INLINE static bool HasAnyPreHeader(uint16_t insns_count_and_flags) {
+ return (insns_count_and_flags & kFlagPreHeaderCombined) != 0;
+ }
+
+ ALWAYS_INLINE uint16_t* GetPreHeader() {
+ return reinterpret_cast<uint16_t*>(this);
+ }
+
+ ALWAYS_INLINE const uint16_t* GetPreHeader() const {
+ return reinterpret_cast<const uint16_t*>(this);
+ }
+
+ // Decode fields and read the preheader if necessary. If kDecodeOnlyInstructionCount is
+ // specified then only the instruction count is decoded.
+ template <bool kDecodeOnlyInstructionCount>
+ ALWAYS_INLINE void DecodeFields(uint32_t* insns_count,
+ uint16_t* registers_size,
+ uint16_t* ins_size,
+ uint16_t* outs_size,
+ uint16_t* tries_size) const {
+ *insns_count = insns_count_and_flags_ >> kInsnsSizeShift;
+ if (!kDecodeOnlyInstructionCount) {
+ const uint16_t fields = fields_;
+ *registers_size = (fields >> kRegistersSizeShift) & 0xF;
+ *ins_size = (fields >> kInsSizeShift) & 0xF;
+ *outs_size = (fields >> kOutsSizeShift) & 0xF;
+ *tries_size = (fields >> kTriesSizeSizeShift) & 0xF;
+ }
+ if (UNLIKELY(HasAnyPreHeader(insns_count_and_flags_))) {
+ const uint16_t* preheader = GetPreHeader();
+ if (HasPreHeader(kFlagPreHeaderInsnsSize)) {
+ --preheader;
+ *insns_count += static_cast<uint32_t>(*preheader);
+ --preheader;
+ *insns_count += static_cast<uint32_t>(*preheader) << 16;
+ }
+ if (!kDecodeOnlyInstructionCount) {
+ if (HasPreHeader(kFlagPreHeaderRegisterSize)) {
+ --preheader;
+ *registers_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderInsSize)) {
+ --preheader;
+ *ins_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderOutsSize)) {
+ --preheader;
+ *outs_size += preheader[0];
+ }
+ if (HasPreHeader(kFlagPreHeaderTriesSize)) {
+ --preheader;
+ *tries_size += preheader[0];
+ }
+ }
+ }
+ if (!kDecodeOnlyInstructionCount) {
+ *registers_size += *ins_size;
+ }
+ }
+
+ // Packed code item data, 4 bits each: [registers_size, ins_size, outs_size, tries_size]
+ uint16_t fields_;
+
+ // 5 bits for if either of the fields required preheader extension, 11 bits for the number of
+ // instruction code units.
+ uint16_t insns_count_and_flags_;
+
+ uint16_t insns_[1]; // actual array of bytecode.
+
+ ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
+ ART_FRIEND_TEST(CompactDexFileTest, CodeItemFields);
+ friend class CodeItemDataAccessor;
+ friend class CodeItemDebugInfoAccessor;
+ friend class CodeItemInstructionAccessor;
friend class CompactDexFile;
+ friend class CompactDexWriter;
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
@@ -73,25 +241,22 @@ class CompactDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ uint32_t GetDebugInfoOffset(uint32_t dex_method_index) const {
+ return debug_info_offsets_.GetDebugInfoOffset(dex_method_index);
+ }
+
private:
- // Not supported yet.
CompactDexFile(const uint8_t* base,
size_t size,
const std::string& location,
uint32_t location_checksum,
const OatDexFile* oat_dex_file,
- DexFileContainer* container)
- : DexFile(base,
- size,
- location,
- location_checksum,
- oat_dex_file,
- container,
- /*is_compact_dex*/ true) {}
+ DexFileContainer* container);
+
+ CompactDexDebugInfoOffsetTable::Accessor debug_info_offsets_;
friend class DexFile;
friend class DexFileLoader;
-
DISALLOW_COPY_AND_ASSIGN(CompactDexFile);
};
diff --git a/runtime/dex/compact_dex_file_test.cc b/runtime/dex/compact_dex_file_test.cc
index d665dc994b..517c5873ed 100644
--- a/runtime/dex/compact_dex_file_test.cc
+++ b/runtime/dex/compact_dex_file_test.cc
@@ -14,15 +14,14 @@
* limitations under the License.
*/
-#include "common_runtime_test.h"
+
#include "compact_dex_file.h"
#include "dex_file_loader.h"
+#include "gtest/gtest.h"
namespace art {
-class CompactDexFileTest : public CommonRuntimeTest {};
-
-TEST_F(CompactDexFileTest, MagicAndVersion) {
+TEST(CompactDexFileTest, MagicAndVersion) {
// Test permutations of valid/invalid headers.
for (size_t i = 0; i < 2; ++i) {
for (size_t j = 0; j < 2; ++j) {
@@ -45,4 +44,58 @@ TEST_F(CompactDexFileTest, MagicAndVersion) {
}
}
+TEST(CompactDexFileTest, CodeItemFields) {
+ auto test_and_write = [&] (uint16_t registers_size,
+ uint16_t ins_size,
+ uint16_t outs_size,
+ uint16_t tries_size,
+ uint32_t insns_size_in_code_units) {
+ ASSERT_GE(registers_size, ins_size);
+ uint16_t buffer[sizeof(CompactDexFile::CodeItem) +
+ CompactDexFile::CodeItem::kMaxPreHeaderSize] = {};
+ CompactDexFile::CodeItem* code_item = reinterpret_cast<CompactDexFile::CodeItem*>(
+ &buffer[CompactDexFile::CodeItem::kMaxPreHeaderSize]);
+ const uint16_t* preheader_ptr = code_item->Create(registers_size,
+ ins_size,
+ outs_size,
+ tries_size,
+ insns_size_in_code_units,
+ code_item->GetPreHeader());
+ ASSERT_GT(preheader_ptr, buffer);
+
+ uint16_t out_registers_size;
+ uint16_t out_ins_size;
+ uint16_t out_outs_size;
+ uint16_t out_tries_size;
+ uint32_t out_insns_size_in_code_units;
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount*/false>(&out_insns_size_in_code_units,
+ &out_registers_size,
+ &out_ins_size,
+ &out_outs_size,
+ &out_tries_size);
+ ASSERT_EQ(registers_size, out_registers_size);
+ ASSERT_EQ(ins_size, out_ins_size);
+ ASSERT_EQ(outs_size, out_outs_size);
+ ASSERT_EQ(tries_size, out_tries_size);
+ ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
+
+ ++out_insns_size_in_code_units; // Force value to change.
+ code_item->DecodeFields</*kDecodeOnlyInstructionCount*/true>(&out_insns_size_in_code_units,
+ /*registers_size*/ nullptr,
+ /*ins_size*/ nullptr,
+ /*outs_size*/ nullptr,
+ /*tries_size*/ nullptr);
+ ASSERT_EQ(insns_size_in_code_units, out_insns_size_in_code_units);
+ };
+ static constexpr uint32_t kMax32 = std::numeric_limits<uint32_t>::max();
+ static constexpr uint16_t kMax16 = std::numeric_limits<uint16_t>::max();
+ test_and_write(0, 0, 0, 0, 0);
+ test_and_write(kMax16, kMax16, kMax16, kMax16, kMax32);
+ test_and_write(kMax16 - 1, kMax16 - 2, kMax16 - 3, kMax16 - 4, kMax32 - 5);
+ test_and_write(kMax16 - 4, kMax16 - 5, kMax16 - 3, kMax16 - 2, kMax32 - 1);
+ test_and_write(5, 4, 3, 2, 1);
+ test_and_write(5, 0, 3, 2, 1);
+ test_and_write(kMax16, 0, kMax16 / 2, 1234, kMax32 / 4);
+}
+
} // namespace art
diff --git a/runtime/dex/compact_dex_utils.h b/runtime/dex/compact_dex_utils.h
new file mode 100644
index 0000000000..1c7e9514fd
--- /dev/null
+++ b/runtime/dex/compact_dex_utils.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
+#define ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
+
+#include <vector>
+
+#include "base/bit_utils.h"
+
+namespace art {
+
+// Add padding to the end of the array until the size is aligned.
+template <typename T, template<typename> class Allocator>
+static inline void AlignmentPadVector(std::vector<T, Allocator<T>>* dest,
+ size_t alignment) {
+ while (!IsAlignedParam(dest->size(), alignment)) {
+ dest->push_back(T());
+ }
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_DEX_COMPACT_DEX_UTILS_H_
diff --git a/runtime/dex/dex_file-inl.h b/runtime/dex/dex_file-inl.h
index 9b56328a71..9b14514cf4 100644
--- a/runtime/dex/dex_file-inl.h
+++ b/runtime/dex/dex_file-inl.h
@@ -136,7 +136,7 @@ inline const char* DexFile::GetShorty(uint32_t proto_idx) const {
inline const DexFile::TryItem* DexFile::GetTryItems(const DexInstructionIterator& code_item_end,
uint32_t offset) {
return reinterpret_cast<const TryItem*>
- (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), 4)) + offset;
+ (RoundUp(reinterpret_cast<uintptr_t>(&code_item_end.Inst()), TryItem::kAlignment)) + offset;
}
static inline bool DexFileStringEquals(const DexFile* df1, dex::StringIndex sidx1,
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index c2a36ce01a..1ee48f71bc 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -29,6 +29,7 @@
#include "dex_file_types.h"
#include "dex_instruction_iterator.h"
#include "globals.h"
+#include "hidden_api_access_flags.h"
#include "jni.h"
#include "modifiers.h"
@@ -301,53 +302,26 @@ class DexFile {
DISALLOW_COPY_AND_ASSIGN(CallSiteIdItem);
};
- // Raw code_item.
+ // Base code_item, compact dex and standard dex have different code item layouts.
struct CodeItem {
- // Used when quickening / unquickening.
- void SetDebugInfoOffset(uint32_t new_offset) {
- debug_info_off_ = new_offset;
- }
-
- uint32_t GetDebugInfoOffset() const {
- return debug_info_off_;
- }
-
protected:
- uint16_t registers_size_; // the number of registers used by this code
- // (locals + parameters)
- uint16_t ins_size_; // the number of words of incoming arguments to the method
- // that this code is for
- uint16_t outs_size_; // the number of words of outgoing argument space required
- // by this code for method invocation
- uint16_t tries_size_; // the number of try_items for this instance. If non-zero,
- // then these appear as the tries array just after the
- // insns in this instance.
- // Normally holds file offset to debug info stream. In case the method has been quickened
- // holds an offset in the Vdex file containing both the actual debug_info_off and the
- // quickening info offset.
- // Don't use this field directly, use OatFile::GetDebugInfoOffset in general ART code,
- // or DexFile::GetDebugInfoOffset in code that are not using a Runtime.
- uint32_t debug_info_off_;
-
- uint32_t insns_size_in_code_units_; // size of the insns array, in 2 byte code units
- uint16_t insns_[1]; // actual array of bytecode.
+ CodeItem() = default;
private:
- ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
- friend class CodeItemDataAccessor;
- friend class CodeItemDebugInfoAccessor;
- friend class CodeItemInstructionAccessor;
- friend class VdexFile; // TODO: Remove this one when it's cleaned up.
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
// Raw try_item.
struct TryItem {
+ static constexpr size_t kAlignment = sizeof(uint32_t);
+
uint32_t start_addr_;
uint16_t insn_count_;
uint16_t handler_off_;
private:
+ TryItem() = default;
+ friend class DexWriter;
DISALLOW_COPY_AND_ASSIGN(TryItem);
};
@@ -712,15 +686,6 @@ class DexFile {
return reinterpret_cast<const CodeItem*>(addr);
}
- uint32_t GetDebugInfoOffset(const CodeItem* code_item) const {
- if (code_item == nullptr) {
- return 0;
- }
- CHECK(oat_dex_file_ == nullptr)
- << "Should only use GetDebugInfoOffset in a non runtime setup";
- return code_item->GetDebugInfoOffset();
- }
-
const char* GetReturnTypeDescriptor(const ProtoId& proto_id) const;
// Returns the number of prototype identifiers in the .dex file.
@@ -1290,10 +1255,16 @@ class ClassDataItemIterator {
}
}
uint32_t GetFieldAccessFlags() const {
- return GetRawMemberAccessFlags() & kAccValidFieldFlags;
+ return GetMemberAccessFlags() & kAccValidFieldFlags;
}
uint32_t GetMethodAccessFlags() const {
- return GetRawMemberAccessFlags() & kAccValidMethodFlags;
+ return GetMemberAccessFlags() & kAccValidMethodFlags;
+ }
+ uint32_t GetMemberAccessFlags() const {
+ return HiddenApiAccessFlags::RemoveFromDex(GetRawMemberAccessFlags());
+ }
+ HiddenApiAccessFlags::ApiList DecodeHiddenAccessFlags() const {
+ return HiddenApiAccessFlags::DecodeFromDex(GetRawMemberAccessFlags());
}
bool MemberIsNative() const {
return GetRawMemberAccessFlags() & kAccNative;
diff --git a/runtime/dex/dex_file_loader.cc b/runtime/dex/dex_file_loader.cc
index fafd69889d..c80ea199bc 100644
--- a/runtime/dex/dex_file_loader.cc
+++ b/runtime/dex/dex_file_loader.cc
@@ -16,72 +16,25 @@
#include "dex_file_loader.h"
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#include <sys/stat.h>
+// #include <sys/mman.h> // For the PROT_* and MAP_* constants.
+// #include <sys/stat.h>
#include "android-base/stringprintf.h"
#include "base/file_magic.h"
#include "base/stl_util.h"
-#include "base/systrace.h"
-#include "base/unix_file/fd_file.h"
+// #include "base/systrace.h"
+// #include "base/unix_file/fd_file.h"
#include "compact_dex_file.h"
#include "dex_file.h"
#include "dex_file_verifier.h"
#include "standard_dex_file.h"
-#include "zip_archive.h"
+// #include "zip_archive.h"
namespace art {
-namespace {
-
-class MemMapContainer : public DexFileContainer {
- public:
- explicit MemMapContainer(std::unique_ptr<MemMap>&& mem_map) : mem_map_(std::move(mem_map)) { }
- virtual ~MemMapContainer() OVERRIDE { }
-
- int GetPermissions() OVERRIDE {
- if (mem_map_.get() == nullptr) {
- return 0;
- } else {
- return mem_map_->GetProtect();
- }
- }
-
- bool IsReadOnly() OVERRIDE {
- return GetPermissions() == PROT_READ;
- }
-
- bool EnableWrite() OVERRIDE {
- CHECK(IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ | PROT_WRITE);
- }
- }
-
- bool DisableWrite() OVERRIDE {
- CHECK(!IsReadOnly());
- if (mem_map_.get() == nullptr) {
- return false;
- } else {
- return mem_map_->Protect(PROT_READ);
- }
- }
-
- private:
- std::unique_ptr<MemMap> mem_map_;
- DISALLOW_COPY_AND_ASSIGN(MemMapContainer);
-};
-
-} // namespace
-
using android::base::StringPrintf;
-static constexpr OatDexFile* kNoOatDexFile = nullptr;
-
-
bool DexFileLoader::IsMagicValid(uint32_t magic) {
return IsMagicValid(reinterpret_cast<uint8_t*>(&magic));
}
@@ -101,63 +54,6 @@ bool DexFileLoader::IsVersionAndMagicValid(const uint8_t* magic) {
return false;
}
-bool DexFileLoader::GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd) {
- CHECK(checksums != nullptr);
- uint32_t magic;
-
- File fd;
- if (zip_fd != -1) {
- if (ReadMagicAndReset(zip_fd, &magic, error_msg)) {
- fd = File(zip_fd, false /* check_usage */);
- }
- } else {
- fd = OpenAndReadMagic(filename, &magic, error_msg);
- }
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- std::unique_ptr<ZipArchive> zip_archive(
- ZipArchive::OpenFromFd(fd.Release(), filename, error_msg));
- if (zip_archive.get() == nullptr) {
- *error_msg = StringPrintf("Failed to open zip archive '%s' (error msg: %s)", filename,
- error_msg->c_str());
- return false;
- }
-
- uint32_t i = 0;
- std::string zip_entry_name = GetMultiDexClassesDexName(i++);
- std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- if (zip_entry.get() == nullptr) {
- *error_msg = StringPrintf("Zip archive '%s' doesn't contain %s (error msg: %s)", filename,
- zip_entry_name.c_str(), error_msg->c_str());
- return false;
- }
-
- do {
- checksums->push_back(zip_entry->GetCrc32());
- zip_entry_name = GetMultiDexClassesDexName(i++);
- zip_entry.reset(zip_archive->Find(zip_entry_name.c_str(), error_msg));
- } while (zip_entry.get() != nullptr);
- return true;
- }
- if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(
- OpenFile(fd.Release(), filename, false, false, error_msg));
- if (dex_file == nullptr) {
- return false;
- }
- checksums->push_back(dex_file->GetHeader().checksum_);
- return true;
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
- return false;
-}
-
bool DexFileLoader::IsMultiDexLocation(const char* location) {
return strrchr(location, kMultiDexSeparator) != nullptr;
}
@@ -187,6 +83,17 @@ std::string DexFileLoader::GetDexCanonicalLocation(const char* dex_location) {
}
}
+// All of the implementations here should be independent of the runtime.
+// TODO: implement all the virtual methods.
+
+bool DexFileLoader::GetMultiDexChecksums(const char* filename ATTRIBUTE_UNUSED,
+ std::vector<uint32_t>* checksums ATTRIBUTE_UNUSED,
+ std::string* error_msg,
+ int zip_fd ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
+ return false;
+}
+
std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
size_t size,
const std::string& location,
@@ -194,8 +101,7 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
const OatDexFile* oat_dex_file,
bool verify,
bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from RAM ") + location);
+ std::string* error_msg) const {
return OpenCommon(base,
size,
location,
@@ -208,305 +114,81 @@ std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
/*verify_result*/ nullptr);
}
-std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location,
- uint32_t location_checksum,
- std::unique_ptr<MemMap> map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
- CHECK(map.get() != nullptr);
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- location_checksum,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- /*verify_result*/ nullptr);
- return dex_file;
+std::unique_ptr<const DexFile> DexFileLoader::Open(const std::string& location ATTRIBUTE_UNUSED,
+ uint32_t location_checksum ATTRIBUTE_UNUSED,
+ std::unique_ptr<MemMap> map ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ std::string* error_msg) const {
+ *error_msg = "UNIMPLEMENTED";
+ return nullptr;
}
-bool DexFileLoader::Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
- uint32_t magic;
- File fd = OpenAndReadMagic(filename, &magic, error_msg);
- if (fd.Fd() == -1) {
- DCHECK(!error_msg->empty());
- return false;
- }
- if (IsZipMagic(magic)) {
- return OpenZip(fd.Release(), location, verify, verify_checksum, error_msg, dex_files);
- }
- if (IsMagicValid(magic)) {
- std::unique_ptr<const DexFile> dex_file(OpenFile(fd.Release(),
- location,
- verify,
- verify_checksum,
- error_msg));
- if (dex_file.get() != nullptr) {
- dex_files->push_back(std::move(dex_file));
- return true;
- } else {
- return false;
- }
- }
- *error_msg = StringPrintf("Expected valid zip or dex file: '%s'", filename);
+bool DexFileLoader::Open(
+ const char* filename ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
return false;
}
-std::unique_ptr<const DexFile> DexFileLoader::OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace("Open dex file " + std::string(location));
- return OpenFile(fd, location, verify, verify_checksum, error_msg);
+std::unique_ptr<const DexFile> DexFileLoader::OpenDex(
+ int fd ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ bool mmap_shared ATTRIBUTE_UNUSED,
+ std::string* error_msg) const {
+ *error_msg = "UNIMPLEMENTED";
+ return nullptr;
}
-bool DexFileLoader::OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenZip: out-param is nullptr";
- std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(fd, location.c_str(), error_msg));
- if (zip_archive.get() == nullptr) {
- DCHECK(!error_msg->empty());
- return false;
- }
- return OpenAllDexFilesFromZip(
- *zip_archive, location, verify, verify_checksum, error_msg, dex_files);
+bool DexFileLoader::OpenZip(
+ int fd ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
+ return false;
}
-std::unique_ptr<const DexFile> DexFileLoader::OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) {
- ScopedTrace trace(std::string("Open dex file ") + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<MemMap> map;
- {
- File delayed_close(fd, /* check_usage */ false);
- struct stat sbuf;
- memset(&sbuf, 0, sizeof(sbuf));
- if (fstat(fd, &sbuf) == -1) {
- *error_msg = StringPrintf("DexFile: fstat '%s' failed: %s", location.c_str(),
- strerror(errno));
- return nullptr;
- }
- if (S_ISDIR(sbuf.st_mode)) {
- *error_msg = StringPrintf("Attempt to mmap directory '%s'", location.c_str());
- return nullptr;
- }
- size_t length = sbuf.st_size;
- map.reset(MemMap::MapFile(length,
- PROT_READ,
- MAP_PRIVATE,
- fd,
- 0,
- /*low_4gb*/false,
- location.c_str(),
- error_msg));
- if (map == nullptr) {
- DCHECK(!error_msg->empty());
- return nullptr;
- }
- }
-
- if (map->Size() < sizeof(DexFile::Header)) {
- *error_msg = StringPrintf(
- "DexFile: failed to open dex file '%s' that is too short to have a header",
- location.c_str());
- return nullptr;
- }
-
- const DexFile::Header* dex_header = reinterpret_cast<const DexFile::Header*>(map->Begin());
-
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- dex_header->checksum_,
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- /*verify_result*/ nullptr);
-
- return dex_file;
+std::unique_ptr<const DexFile> DexFileLoader::OpenFile(
+ int fd ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ bool mmap_shared ATTRIBUTE_UNUSED,
+ std::string* error_msg) const {
+ *error_msg = "UNIMPLEMENTED";
+ return nullptr;
}
std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
- const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify,
- bool verify_checksum,
+ const ZipArchive& zip_archive ATTRIBUTE_UNUSED,
+ const char* entry_name ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
std::string* error_msg,
- ZipOpenErrorCode* error_code) {
- ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
- CHECK(!location.empty());
- std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
- if (zip_entry == nullptr) {
- *error_code = ZipOpenErrorCode::kEntryNotFound;
- return nullptr;
- }
- if (zip_entry->GetUncompressedLength() == 0) {
- *error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
- return nullptr;
- }
-
- std::unique_ptr<MemMap> map;
- if (zip_entry->IsUncompressed()) {
- if (!zip_entry->IsAlignedTo(alignof(DexFile::Header))) {
- // Do not mmap unaligned ZIP entries because
- // doing so would fail dex verification which requires 4 byte alignment.
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "please zipalign to " << alignof(DexFile::Header) << " bytes. "
- << "Falling back to extracting file.";
- } else {
- // Map uncompressed files within zip as file-backed to avoid a dirty copy.
- map.reset(zip_entry->MapDirectlyFromFile(location.c_str(), /*out*/error_msg));
- if (map == nullptr) {
- LOG(WARNING) << "Can't mmap dex file " << location << "!" << entry_name << " directly; "
- << "is your ZIP file corrupted? Falling back to extraction.";
- // Try again with Extraction which still has a chance of recovery.
- }
- }
- }
-
- if (map == nullptr) {
- // Default path for compressed ZIP entries,
- // and fallback for stored ZIP entries.
- map.reset(zip_entry->ExtractToMemMap(location.c_str(), entry_name, error_msg));
- }
-
- if (map == nullptr) {
- *error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
- error_msg->c_str());
- *error_code = ZipOpenErrorCode::kExtractToMemoryError;
- return nullptr;
- }
- VerifyResult verify_result;
- std::unique_ptr<DexFile> dex_file = OpenCommon(map->Begin(),
- map->Size(),
- location,
- zip_entry->GetCrc32(),
- kNoOatDexFile,
- verify,
- verify_checksum,
- error_msg,
- new MemMapContainer(std::move(map)),
- &verify_result);
- if (dex_file == nullptr) {
- if (verify_result == VerifyResult::kVerifyNotAttempted) {
- *error_code = ZipOpenErrorCode::kDexFileError;
- } else {
- *error_code = ZipOpenErrorCode::kVerifyError;
- }
- return nullptr;
- }
- if (!dex_file->DisableWrite()) {
- *error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
- *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
- return nullptr;
- }
- CHECK(dex_file->IsReadOnly()) << location;
- if (verify_result != VerifyResult::kVerifySucceeded) {
- *error_code = ZipOpenErrorCode::kVerifyError;
- return nullptr;
- }
- *error_code = ZipOpenErrorCode::kNoError;
- return dex_file;
+ ZipOpenErrorCode* error_code ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
+ return nullptr;
}
-// Technically we do not have a limitation with respect to the number of dex files that can be in a
-// multidex APK. However, it's bad practice, as each dex file requires its own tables for symbols
-// (types, classes, methods, ...) and dex caches. So warn the user that we open a zip with what
-// seems an excessive number.
-static constexpr size_t kWarnOnManyDexFilesThreshold = 100;
-
-bool DexFileLoader::OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- ScopedTrace trace("Dex file open from Zip " + std::string(location));
- DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
- ZipOpenErrorCode error_code;
- std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
- kClassesDex,
- location,
- verify,
- verify_checksum,
- error_msg,
- &error_code));
- if (dex_file.get() == nullptr) {
- return false;
- } else {
- // Had at least classes.dex.
- dex_files->push_back(std::move(dex_file));
-
- // Now try some more.
-
- // We could try to avoid std::string allocations by working on a char array directly. As we
- // do not expect a lot of iterations, this seems too involved and brittle.
-
- for (size_t i = 1; ; ++i) {
- std::string name = GetMultiDexClassesDexName(i);
- std::string fake_location = GetMultiDexLocation(i, location.c_str());
- std::unique_ptr<const DexFile> next_dex_file(OpenOneDexFileFromZip(zip_archive,
- name.c_str(),
- fake_location,
- verify,
- verify_checksum,
- error_msg,
- &error_code));
- if (next_dex_file.get() == nullptr) {
- if (error_code != ZipOpenErrorCode::kEntryNotFound) {
- LOG(WARNING) << "Zip open failed: " << *error_msg;
- }
- break;
- } else {
- dex_files->push_back(std::move(next_dex_file));
- }
-
- if (i == kWarnOnManyDexFilesThreshold) {
- LOG(WARNING) << location << " has in excess of " << kWarnOnManyDexFilesThreshold
- << " dex files. Please consider coalescing and shrinking the number to "
- " avoid runtime overhead.";
- }
-
- if (i == std::numeric_limits<size_t>::max()) {
- LOG(ERROR) << "Overflow in number of dex files!";
- break;
- }
- }
-
- return true;
- }
+bool DexFileLoader::OpenAllDexFilesFromZip(
+ const ZipArchive& zip_archive ATTRIBUTE_UNUSED,
+ const std::string& location ATTRIBUTE_UNUSED,
+ bool verify ATTRIBUTE_UNUSED,
+ bool verify_checksum ATTRIBUTE_UNUSED,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files ATTRIBUTE_UNUSED) const {
+ *error_msg = "UNIMPLEMENTED";
+ return false;
}
std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base,
diff --git a/runtime/dex/dex_file_loader.h b/runtime/dex/dex_file_loader.h
index 7db8d8e08e..4e45fb03b8 100644
--- a/runtime/dex/dex_file_loader.h
+++ b/runtime/dex/dex_file_loader.h
@@ -46,6 +46,8 @@ class DexFileLoader {
// Return true if the corresponding version and magic is valid.
static bool IsVersionAndMagicValid(const uint8_t* magic);
+ virtual ~DexFileLoader() { }
+
// Returns the checksums of a file for comparison with GetLocationChecksum().
// For .dex files, this is the single header checksum.
// For zip files, this is the zip entry CRC32 checksum for classes.dex and
@@ -55,55 +57,56 @@ class DexFileLoader {
// zip_fd is -1, the method will try to open the `filename` and read the
// content from it.
// Return true if the checksums could be found, false otherwise.
- static bool GetMultiDexChecksums(const char* filename,
- std::vector<uint32_t>* checksums,
- std::string* error_msg,
- int zip_fd = -1);
+ virtual bool GetMultiDexChecksums(const char* filename,
+ std::vector<uint32_t>* checksums,
+ std::string* error_msg,
+ int zip_fd = -1) const;
// Check whether a location denotes a multidex dex file. This is a very simple check: returns
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
// Opens .dex file, backed by existing memory
- static std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
+ virtual std::unique_ptr<const DexFile> Open(const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const;
// Opens .dex file that has been memory-mapped by the caller.
- static std::unique_ptr<const DexFile> Open(const std::string& location,
- uint32_t location_checkum,
- std::unique_ptr<MemMap> mem_map,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
+ virtual std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) const;
// Opens all .dex files found in the file, guessing the container format based on file extension.
- static bool Open(const char* filename,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
+ virtual bool Open(const char* filename,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
// Open a single dex file from an fd. This function closes the fd.
- static std::unique_ptr<const DexFile> OpenDex(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
+ virtual std::unique_ptr<const DexFile> OpenDex(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const;
// Opens dex files from within a .jar, .zip, or .apk file
- static bool OpenZip(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
+ virtual bool OpenZip(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
// Return the name of the index-th classes.dex in a multidex zip file. This is classes.dex for
// index == 0, and classes{index + 1}.dex else.
@@ -148,13 +151,7 @@ class DexFileLoader {
return (pos == std::string::npos) ? std::string() : location.substr(pos);
}
- private:
- static std::unique_ptr<const DexFile> OpenFile(int fd,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg);
-
+ protected:
enum class ZipOpenErrorCode {
kNoError,
kEntryNotFound,
@@ -164,24 +161,6 @@ class DexFileLoader {
kVerifyError
};
- // Open all classesXXX.dex files from a zip archive.
- static bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- std::vector<std::unique_ptr<const DexFile>>* dex_files);
-
- // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
- // return.
- static std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
- const char* entry_name,
- const std::string& location,
- bool verify,
- bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code);
-
enum class VerifyResult { // private
kVerifyNotAttempted,
kVerifySucceeded,
@@ -198,6 +177,32 @@ class DexFileLoader {
std::string* error_msg,
DexFileContainer* container,
VerifyResult* verify_result);
+
+ private:
+ virtual std::unique_ptr<const DexFile> OpenFile(int fd,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ bool mmap_shared,
+ std::string* error_msg) const;
+
+ // Open all classesXXX.dex files from a zip archive.
+ virtual bool OpenAllDexFilesFromZip(const ZipArchive& zip_archive,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
+
+ // Opens .dex file from the entry_name in a zip archive. error_code is undefined when non-null
+ // return.
+ virtual std::unique_ptr<const DexFile> OpenOneDexFileFromZip(const ZipArchive& zip_archive,
+ const char* entry_name,
+ const std::string& location,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ ZipOpenErrorCode* error_code) const;
};
} // namespace art
diff --git a/runtime/dex/dex_file_test.cc b/runtime/dex/dex_file_test.cc
index 87eec571f1..cb721af754 100644
--- a/runtime/dex/dex_file_test.cc
+++ b/runtime/dex/dex_file_test.cc
@@ -20,6 +20,7 @@
#include <memory>
+#include "art_dex_file_loader.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "code_item_accessors-inl.h"
@@ -237,7 +238,8 @@ static bool OpenDexFilesBase64(const char* base64,
ScopedObjectAccess soa(Thread::Current());
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ bool success = dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, error_msg, &tmp);
if (success) {
for (std::unique_ptr<const DexFile>& dex_file : tmp) {
@@ -277,12 +279,13 @@ static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base
/* reuse */ false,
&error_message));
memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size());
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
- location_checksum,
- std::move(region),
- /* verify */ true,
- /* verify_checksum */ true,
- &error_message));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
+ location_checksum,
+ std::move(region),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_message));
if (expect_success) {
CHECK(dex_file != nullptr) << error_message;
} else {
@@ -368,7 +371,8 @@ TEST_F(DexFileTest, Version40Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -381,7 +385,8 @@ TEST_F(DexFileTest, Version41Rejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -394,7 +399,8 @@ TEST_F(DexFileTest, ZeroLengthDexRejected) {
static constexpr bool kVerifyChecksum = true;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- ASSERT_FALSE(DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ ASSERT_FALSE(dex_file_loader.Open(
location, location, /* verify */ true, kVerifyChecksum, &error_msg, &dex_files));
}
@@ -408,9 +414,10 @@ TEST_F(DexFileTest, GetChecksum) {
std::vector<uint32_t> checksums;
ScopedObjectAccess soa(Thread::Current());
std::string error_msg;
- EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
- &checksums,
- &error_msg))
+ const ArtDexFileLoader dex_file_loader;
+ EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(GetLibCoreDexFileNames()[0].c_str(),
+ &checksums,
+ &error_msg))
<< error_msg;
ASSERT_EQ(1U, checksums.size());
EXPECT_EQ(java_lang_dex_file_->GetLocationChecksum(), checksums[0]);
@@ -420,9 +427,10 @@ TEST_F(DexFileTest, GetMultiDexChecksums) {
std::string error_msg;
std::vector<uint32_t> checksums;
std::string multidex_file = GetTestDexFileName("MultiDex");
- EXPECT_TRUE(DexFileLoader::GetMultiDexChecksums(multidex_file.c_str(),
- &checksums,
- &error_msg)) << error_msg;
+ const ArtDexFileLoader dex_file_loader;
+ EXPECT_TRUE(dex_file_loader.GetMultiDexChecksums(multidex_file.c_str(),
+ &checksums,
+ &error_msg)) << error_msg;
std::vector<std::unique_ptr<const DexFile>> dexes = OpenTestDexFiles("MultiDex");
ASSERT_EQ(2U, dexes.size());
@@ -730,8 +738,10 @@ TEST_F(DexFileTest, OpenDexDebugInfoLocalNullType) {
std::unique_ptr<const DexFile> raw = OpenDexFileInMemoryBase64(
kRawDexDebugInfoLocalNullType, tmp.GetFilename().c_str(), 0xf25f2b38U, true);
const DexFile::ClassDef& class_def = raw->GetClassDef(0);
- const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def, 1));
- CodeItemDebugInfoAccessor accessor(raw.get(), code_item);
+ constexpr uint32_t kMethodIdx = 1;
+ const DexFile::CodeItem* code_item = raw->GetCodeItem(raw->FindCodeItemOffset(class_def,
+ kMethodIdx));
+ CodeItemDebugInfoAccessor accessor(*raw, code_item, kMethodIdx);
ASSERT_TRUE(accessor.DecodeDebugLocalInfo(true, 1, Callback, nullptr));
}
diff --git a/runtime/dex/dex_file_tracking_registrar.cc b/runtime/dex/dex_file_tracking_registrar.cc
index bffca5599a..78ea9c16cb 100644
--- a/runtime/dex/dex_file_tracking_registrar.cc
+++ b/runtime/dex/dex_file_tracking_registrar.cc
@@ -185,7 +185,7 @@ void DexFileTrackingRegistrar::SetAllCodeItemStartRegistration(bool should_poiso
if (code_item != nullptr) {
const void* code_item_begin = reinterpret_cast<const void*>(code_item);
size_t code_item_start = reinterpret_cast<size_t>(code_item);
- CodeItemInstructionAccessor accessor(dex_file_, code_item);
+ CodeItemInstructionAccessor accessor(*dex_file_, code_item);
size_t code_item_start_end = reinterpret_cast<size_t>(accessor.Insns());
size_t code_item_start_size = code_item_start_end - code_item_start;
range_values_.push_back(std::make_tuple(code_item_begin,
@@ -208,7 +208,7 @@ void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
while (cdit.HasNextMethod()) {
const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
if (code_item != nullptr) {
- CodeItemInstructionAccessor accessor(dex_file_, code_item);
+ CodeItemInstructionAccessor accessor(*dex_file_, code_item);
const void* insns_begin = reinterpret_cast<const void*>(accessor.Insns());
// Member insns_size_in_code_units_ is in 2-byte units
size_t insns_size = accessor.InsnsSizeInCodeUnits() * 2;
diff --git a/runtime/dex/dex_file_verifier.cc b/runtime/dex/dex_file_verifier.cc
index d6f685a595..7265aad1ba 100644
--- a/runtime/dex/dex_file_verifier.cc
+++ b/runtime/dex/dex_file_verifier.cc
@@ -386,15 +386,14 @@ bool DexFileVerifier::CheckHeader() {
return false;
}
- bool size_matches = false;
- if (dex_file_->IsCompactDexFile()) {
- size_matches = header_->header_size_ == sizeof(CompactDexFile::Header);
- } else {
- size_matches = header_->header_size_ == sizeof(StandardDexFile::Header);
- }
+ const uint32_t expected_header_size = dex_file_->IsCompactDexFile()
+ ? sizeof(CompactDexFile::Header)
+ : sizeof(StandardDexFile::Header);
- if (!size_matches) {
- ErrorStringPrintf("Bad header size: %ud", header_->header_size_);
+ if (header_->header_size_ != expected_header_size) {
+ ErrorStringPrintf("Bad header size: %ud expected %ud",
+ header_->header_size_,
+ expected_header_size);
return false;
}
@@ -580,7 +579,7 @@ uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
bool DexFileVerifier::CheckAndGetHandlerOffsets(const DexFile::CodeItem* code_item,
uint32_t* handler_offsets, uint32_t handlers_size) {
- CodeItemDataAccessor accessor(dex_file_, code_item);
+ CodeItemDataAccessor accessor(*dex_file_, code_item);
const uint8_t* handlers_base = accessor.GetCatchHandlerData();
for (uint32_t i = 0; i < handlers_size; i++) {
@@ -1233,7 +1232,7 @@ bool DexFileVerifier::CheckIntraCodeItem() {
return false;
}
- CodeItemDataAccessor accessor(dex_file_, code_item);
+ CodeItemDataAccessor accessor(*dex_file_, code_item);
if (UNLIKELY(accessor.InsSize() > accessor.RegistersSize())) {
ErrorStringPrintf("ins_size (%ud) > registers_size (%ud)",
accessor.InsSize(), accessor.RegistersSize());
diff --git a/runtime/dex/dex_file_verifier_test.cc b/runtime/dex/dex_file_verifier_test.cc
index d4d912cbfb..9759685961 100644
--- a/runtime/dex/dex_file_verifier_test.cc
+++ b/runtime/dex/dex_file_verifier_test.cc
@@ -22,6 +22,7 @@
#include <functional>
#include <memory>
+#include "art_dex_file_loader.h"
#include "base/bit_utils.h"
#include "base/macros.h"
#include "base/unix_file/fd_file.h"
@@ -114,7 +115,8 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex file
ScopedObjectAccess soa(Thread::Current());
std::vector<std::unique_ptr<const DexFile>> tmp;
- bool success = DexFileLoader::Open(
+ const ArtDexFileLoader dex_file_loader;
+ bool success = dex_file_loader.Open(
location, location, /* verify */ true, /* verify_checksum */ true, error_msg, &tmp);
CHECK(success) << *error_msg;
EXPECT_EQ(1U, tmp.size());
diff --git a/runtime/dex/standard_dex_file.cc b/runtime/dex/standard_dex_file.cc
index 843508d831..52fdff303b 100644
--- a/runtime/dex/standard_dex_file.cc
+++ b/runtime/dex/standard_dex_file.cc
@@ -17,6 +17,7 @@
#include "standard_dex_file.h"
#include "base/casts.h"
+#include "code_item_accessors-no_art-inl.h"
#include "dex_file-inl.h"
#include "leb128.h"
@@ -73,33 +74,11 @@ bool StandardDexFile::SupportsDefaultMethods() const {
uint32_t StandardDexFile::GetCodeItemSize(const DexFile::CodeItem& item) const {
DCHECK(HasAddress(&item));
- const CodeItem& code_item = down_cast<const CodeItem&>(item);
- uintptr_t code_item_start = reinterpret_cast<uintptr_t>(&code_item);
- uint32_t insns_size = code_item.insns_size_in_code_units_;
- uint32_t tries_size = code_item.tries_size_;
- const uint8_t* handler_data = GetCatchHandlerData(
- DexInstructionIterator(code_item.insns_, code_item.insns_size_in_code_units_),
- code_item.tries_size_,
- 0);
-
- if (tries_size == 0 || handler_data == nullptr) {
- uintptr_t insns_end = reinterpret_cast<uintptr_t>(&code_item.insns_[insns_size]);
- return insns_end - code_item_start;
- } else {
- // Get the start of the handler data.
- uint32_t handlers_size = DecodeUnsignedLeb128(&handler_data);
- // Manually read each handler.
- for (uint32_t i = 0; i < handlers_size; ++i) {
- int32_t uleb128_count = DecodeSignedLeb128(&handler_data) * 2;
- if (uleb128_count <= 0) {
- uleb128_count = -uleb128_count + 1;
- }
- for (int32_t j = 0; j < uleb128_count; ++j) {
- DecodeUnsignedLeb128(&handler_data);
- }
- }
- return reinterpret_cast<uintptr_t>(handler_data) - code_item_start;
- }
+ // TODO: Clean up this temporary code duplication with StandardDexFile. Eventually the
+ // implementations will differ.
+ DCHECK(HasAddress(&item));
+ return reinterpret_cast<uintptr_t>(CodeItemDataAccessor(*this, &item).CodeItemDataEnd()) -
+ reinterpret_cast<uintptr_t>(&item);
}
} // namespace art
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index fb2f720920..6437def4f5 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -33,8 +33,30 @@ class StandardDexFile : public DexFile {
};
struct CodeItem : public DexFile::CodeItem {
+ static constexpr size_t kAlignment = 4;
+
private:
- // TODO: Insert standard dex specific fields here.
+ CodeItem() = default;
+
+ uint16_t registers_size_; // the number of registers used by this code
+ // (locals + parameters)
+ uint16_t ins_size_; // the number of words of incoming arguments to the method
+ // that this code is for
+ uint16_t outs_size_; // the number of words of outgoing argument space required
+ // by this code for method invocation
+ uint16_t tries_size_; // the number of try_items for this instance. If non-zero,
+ // then these appear as the tries array just after the
+ // insns in this instance.
+ uint32_t debug_info_off_; // Holds file offset to debug info stream.
+
+ uint32_t insns_size_in_code_units_; // size of the insns array, in 2 byte code units
+ uint16_t insns_[1]; // actual array of bytecode.
+
+ ART_FRIEND_TEST(CodeItemAccessorsTest, TestDexInstructionsAccessor);
+ friend class CodeItemDataAccessor;
+ friend class CodeItemDebugInfoAccessor;
+ friend class CodeItemInstructionAccessor;
+ friend class DexWriter;
friend class StandardDexFile;
DISALLOW_COPY_AND_ASSIGN(CodeItem);
};
@@ -80,6 +102,7 @@ class StandardDexFile : public DexFile {
friend class DexFileVerifierTest;
ART_FRIEND_TEST(ClassLinkerTest, RegisterDexFileName); // for constructor
+ friend class OptimizingUnitTestHelper; // for constructor
DISALLOW_COPY_AND_ASSIGN(StandardDexFile);
};
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index e459f09e95..20cde530c2 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -27,6 +27,7 @@
#include "base/stl_util.h"
#include "common_runtime_test.h"
#include "compiler_callbacks.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
@@ -43,6 +44,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
public:
virtual void SetUp() OVERRIDE {
CommonRuntimeTest::SetUp();
+ const ArtDexFileLoader dex_file_loader;
// Create a scratch directory to work from.
@@ -74,7 +76,7 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
<< "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
ASSERT_FALSE(
- DexFileLoader::GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
+ dex_file_loader.GetMultiDexChecksums(GetStrippedDexSrc1().c_str(), &checksums, &error_msg))
<< "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
<< "Expected dex file to be at: " << GetDexSrc2();
@@ -83,21 +85,21 @@ class Dex2oatEnvironmentTest : public CommonRuntimeTest {
// GetMultiDexSrc1, but a different secondary dex checksum.
static constexpr bool kVerifyChecksum = true;
std::vector<std::unique_ptr<const DexFile>> multi1;
- ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc1().c_str(),
- GetMultiDexSrc1().c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &multi1)) << error_msg;
+ ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc1().c_str(),
+ GetMultiDexSrc1().c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &multi1)) << error_msg;
ASSERT_GT(multi1.size(), 1u);
std::vector<std::unique_ptr<const DexFile>> multi2;
- ASSERT_TRUE(DexFileLoader::Open(GetMultiDexSrc2().c_str(),
- GetMultiDexSrc2().c_str(),
- /* verify */ true,
- kVerifyChecksum,
- &error_msg,
- &multi2)) << error_msg;
+ ASSERT_TRUE(dex_file_loader.Open(GetMultiDexSrc2().c_str(),
+ GetMultiDexSrc2().c_str(),
+ /* verify */ true,
+ kVerifyChecksum,
+ &error_msg,
+ &multi2)) << error_msg;
ASSERT_GT(multi2.size(), 1u);
ASSERT_EQ(multi1[0]->GetLocationChecksum(), multi2[0]->GetLocationChecksum());
diff --git a/runtime/dex_to_dex_decompiler.cc b/runtime/dex_to_dex_decompiler.cc
index f3f2d52cb4..7887191713 100644
--- a/runtime/dex_to_dex_decompiler.cc
+++ b/runtime/dex_to_dex_decompiler.cc
@@ -35,9 +35,8 @@ class DexDecompiler {
const DexFile::CodeItem& code_item,
const ArrayRef<const uint8_t>& quickened_info,
bool decompile_return_instruction)
- : code_item_accessor_(&dex_file, &code_item),
- quicken_info_(quickened_info.data()),
- quicken_info_number_of_indices_(QuickenInfoTable::NumberOfIndices(quickened_info.size())),
+ : code_item_accessor_(dex_file, &code_item),
+ quicken_info_(quickened_info),
decompile_return_instruction_(decompile_return_instruction) {}
bool Decompile();
@@ -72,7 +71,7 @@ class DexDecompiler {
}
uint16_t NextIndex() {
- DCHECK_LT(quicken_index_, quicken_info_number_of_indices_);
+ DCHECK_LT(quicken_index_, quicken_info_.NumIndices());
const uint16_t ret = quicken_info_.GetData(quicken_index_);
quicken_index_++;
return ret;
@@ -80,7 +79,6 @@ class DexDecompiler {
const CodeItemInstructionAccessor code_item_accessor_;
const QuickenInfoTable quicken_info_;
- const size_t quicken_info_number_of_indices_;
const bool decompile_return_instruction_;
size_t quicken_index_ = 0u;
@@ -104,7 +102,7 @@ bool DexDecompiler::Decompile() {
break;
case Instruction::NOP:
- if (quicken_info_number_of_indices_ > 0) {
+ if (quicken_info_.NumIndices() > 0) {
// Only try to decompile NOP if there are more than 0 indices. Not having
// any index happens when we unquicken a code item that only has
// RETURN_VOID_NO_BARRIER as quickened instruction.
@@ -181,14 +179,14 @@ bool DexDecompiler::Decompile() {
}
}
- if (quicken_index_ != quicken_info_number_of_indices_) {
+ if (quicken_index_ != quicken_info_.NumIndices()) {
if (quicken_index_ == 0) {
LOG(WARNING) << "Failed to use any value in quickening info,"
<< " potentially due to duplicate methods.";
} else {
LOG(FATAL) << "Failed to use all values in quickening info."
<< " Actual: " << std::hex << quicken_index_
- << " Expected: " << quicken_info_number_of_indices_;
+ << " Expected: " << quicken_info_.NumIndices();
return false;
}
}
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index d93d76793f..037d1fb49c 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -213,8 +213,8 @@ void DexoptTest::ReserveImageSpace() {
// Ensure a chunk of memory is reserved for the image space.
// The reservation_end includes room for the main space that has to come
// right after the image in case of the GSS collector.
- uintptr_t reservation_start = ART_BASE_ADDRESS;
- uintptr_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
+ uint64_t reservation_start = ART_BASE_ADDRESS;
+ uint64_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
diff --git a/runtime/elf.h b/runtime/elf.h
index 63b18c5d34..521d4a232f 100644
--- a/runtime/elf.h
+++ b/runtime/elf.h
@@ -64,6 +64,9 @@ constexpr char ELFMAG0 = ElfMagic[EI_MAG0];
constexpr char ELFMAG1 = ElfMagic[EI_MAG1];
constexpr char ELFMAG2 = ElfMagic[EI_MAG2];
constexpr char ELFMAG3 = ElfMagic[EI_MAG3];
+constexpr char ELFMAG[] = "\177ELF";
+constexpr int SELFMAG = 4;
+constexpr int NT_PRSTATUS = 1;
// END android-added for <elf.h> compat
struct Elf32_Ehdr {
@@ -1411,7 +1414,9 @@ struct Elf32_Sym {
};
// BEGIN android-added for <elf.h> compat
+static inline unsigned char ELF32_ST_BIND(unsigned char st_info) { return st_info >> 4; }
static inline unsigned char ELF32_ST_TYPE(unsigned char st_info) { return st_info & 0x0f; }
+static inline unsigned char ELF64_ST_BIND(unsigned char st_info) { return st_info >> 4; }
static inline unsigned char ELF64_ST_TYPE(unsigned char st_info) { return st_info & 0x0f; }
// END android-added for <elf.h> compat
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 74e7c180b8..48a56f2fbf 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -91,6 +91,7 @@
V(Asin, double, double) \
V(Atan, double, double) \
V(Atan2, double, double, double) \
+ V(Pow, double, double, double) \
V(Cbrt, double, double) \
V(Cosh, double, double) \
V(Exp, double, double) \
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 7c912d0a4a..1fdf439d3f 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -238,7 +238,8 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAcos, pAsin, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAsin, pAtan, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan, pAtan2, sizeof(void*));
- EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan2, pCbrt, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pAtan2, pPow, sizeof(void*));
+ EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pPow, pCbrt, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCbrt, pCosh, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCosh, pExp, sizeof(void*));
EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pExp, pExpm1, sizeof(void*));
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 49c2a15e86..9d6e5de803 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -201,13 +201,13 @@ bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
return true;
}
}
+ }
- // We hit a signal we didn't handle. This might be something for which
- // we can give more information about so call all registered handlers to
- // see if it is.
- if (HandleFaultByOtherHandlers(sig, info, context)) {
- return true;
- }
+ // We hit a signal we didn't handle. This might be something for which
+ // we can give more information about so call all registered handlers to
+ // see if it is.
+ if (HandleFaultByOtherHandlers(sig, info, context)) {
+ return true;
}
// Set a breakpoint in this function to catch unhandled signals.
@@ -232,7 +232,7 @@ void FaultManager::RemoveHandler(FaultHandler* handler) {
}
auto it2 = std::find(other_handlers_.begin(), other_handlers_.end(), handler);
if (it2 != other_handlers_.end()) {
- other_handlers_.erase(it);
+ other_handlers_.erase(it2);
return;
}
LOG(FATAL) << "Attempted to remove non existent handler " << handler;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index cf837161e0..1e0c0b16e4 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -88,7 +88,6 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
from_space_num_bytes_at_first_pause_(0),
mark_stack_mode_(kMarkStackModeOff),
weak_ref_access_enabled_(true),
- max_peak_num_non_free_regions_(0),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
mark_from_read_barrier_measurements_(false),
@@ -301,7 +300,6 @@ void ConcurrentCopying::InitializePhase() {
objects_moved_.StoreRelaxed(0);
GcCause gc_cause = GetCurrentIteration()->GetGcCause();
if (gc_cause == kGcCauseExplicit ||
- gc_cause == kGcCauseForNativeAllocBlocking ||
gc_cause == kGcCauseCollectorTransition ||
GetCurrentIteration()->GetClearSoftReferences()) {
force_evacuate_all_ = true;
@@ -1755,8 +1753,6 @@ void ConcurrentCopying::ReclaimPhase() {
cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes);
uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent();
cumulative_objects_moved_.FetchAndAddRelaxed(to_objects);
- max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
- region_space_->GetNumNonFreeRegions());
if (kEnableFromSpaceAccountingCheck) {
CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects);
CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes);
@@ -2269,7 +2265,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
size_t dummy;
- mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
+ mirror::Object* to_ref = region_space_->AllocNonvirtual</*kForEvac*/ true>(
region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (to_ref != nullptr) {
@@ -2341,7 +2337,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref,
DCHECK(region_space_->IsInToSpace(to_ref));
if (bytes_allocated > space::RegionSpace::kRegionSize) {
// Free the large alloc.
- region_space_->FreeLarge(to_ref, bytes_allocated);
+ region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated);
} else {
// Record the lost copy for later reuse.
heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated);
@@ -2696,10 +2692,10 @@ void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) {
os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n";
os << "Peak regions allocated "
- << max_peak_num_non_free_regions_ << " ("
- << PrettySize(max_peak_num_non_free_regions_ * space::RegionSpace::kRegionSize)
- << ") / " << region_space_->GetNumRegions() << " ("
- << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize)
+ << region_space_->GetMaxPeakNumNonFreeRegions() << " ("
+ << PrettySize(region_space_->GetMaxPeakNumNonFreeRegions() * space::RegionSpace::kRegionSize)
+ << ") / " << region_space_->GetNumRegions() / 2 << " ("
+ << PrettySize(region_space_->GetNumRegions() * space::RegionSpace::kRegionSize / 2)
<< ")\n";
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 939e7fc8a4..8b4b58e7b1 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -308,11 +308,6 @@ class ConcurrentCopying : public GarbageCollector {
Atomic<uint64_t> cumulative_bytes_moved_;
Atomic<uint64_t> cumulative_objects_moved_;
- // Maintain the maximum of number of non-free regions collected just before
- // reclaim in each GC cycle. At this moment in cycle, highest number of
- // regions are in non-free.
- size_t max_peak_num_non_free_regions_;
-
// The skipped blocks are memory blocks/chucks that were copies of
// objects that were unused due to lost races (cas failures) at
// object copy/forward pointer install. They are reused.
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 3150781a5a..1e136bca2e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -193,7 +193,6 @@ void SemiSpace::MarkingPhase() {
if (generational_) {
if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
- GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAllocBlocking ||
GetCurrentIteration()->GetClearSoftReferences()) {
// If an explicit, native allocation-triggered, or last attempt
// collection, collect the whole heap.
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index d88fcdcc95..508d76535e 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -33,7 +33,6 @@ const char* PrettyCause(GcCause cause) {
case kGcCauseBackground: return "Background";
case kGcCauseExplicit: return "Explicit";
case kGcCauseForNativeAlloc: return "NativeAlloc";
- case kGcCauseForNativeAllocBlocking: return "NativeAllocBlocking";
case kGcCauseCollectorTransition: return "CollectorTransition";
case kGcCauseDisableMovingGc: return "DisableMovingGc";
case kGcCauseHomogeneousSpaceCompact: return "HomogeneousSpaceCompact";
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 78496f3ead..81781ceeb7 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -36,9 +36,6 @@ enum GcCause {
// GC triggered for a native allocation when NativeAllocationGcWatermark is exceeded.
// (This may be a blocking GC depending on whether we run a non-concurrent collector).
kGcCauseForNativeAlloc,
- // GC triggered for a native allocation when NativeAllocationBlockingGcWatermark is exceeded.
- // (This is always a blocking GC).
- kGcCauseForNativeAllocBlocking,
// GC triggered for a collector transition.
kGcCauseCollectorTransition,
// Not a real GC cause, used when we disable moving GC (currently for GetPrimitiveArrayCritical).
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9edba96ddd..b1932d1a29 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,9 +128,6 @@ static constexpr size_t kVerifyObjectAllocationStackSize = 16 * KB /
sizeof(mirror::HeapReference<mirror::Object>);
static constexpr size_t kDefaultAllocationStackSize = 8 * MB /
sizeof(mirror::HeapReference<mirror::Object>);
-// System.runFinalization can deadlock with native allocations, to deal with this, we have a
-// timeout on how long we wait for finalizers to run. b/21544853
-static constexpr uint64_t kNativeAllocationFinalizeTimeout = MsToNs(250u);
// For deterministic compilation, we need the heap to be at a well-known address.
static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
@@ -561,12 +558,6 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
- native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
- native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
- *native_blocking_gc_lock_));
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_ = 0;
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
@@ -1143,7 +1134,6 @@ Heap::~Heap() {
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
- delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2556,10 +2546,6 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// old_native_bytes_allocated_ now that GC has been triggered, resetting
// new_native_bytes_allocated_ to zero in the process.
old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
- if (gc_cause == kGcCauseForNativeAllocBlocking) {
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_in_progress_ = true;
- }
}
DCHECK_LT(gc_type, collector::kGcTypeMax);
@@ -3386,7 +3372,6 @@ collector::GcType Heap::WaitForGcToCompleteLocked(GcCause cause, Thread* self) {
// it results in log spam. kGcCauseExplicit is already logged in LogGC, so avoid it here too.
if (cause == kGcCauseForAlloc ||
cause == kGcCauseForNativeAlloc ||
- cause == kGcCauseForNativeAllocBlocking ||
cause == kGcCauseDisableMovingGc) {
VLOG(gc) << "Starting a blocking GC " << cause;
}
@@ -3499,10 +3484,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
bytes_allocated_before_gc;
// Calculate when to perform the next ConcurrentGC.
- // Calculate the estimated GC duration.
- const double gc_duration_seconds = NsToMs(current_gc_iteration_.GetDurationNs()) / 1000.0;
// Estimate how many remaining bytes we will have when we need to start the next GC.
- size_t remaining_bytes = bytes_allocated_during_gc * gc_duration_seconds;
+ size_t remaining_bytes = bytes_allocated_during_gc;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
@@ -3772,59 +3755,9 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- // See the REDESIGN section of go/understanding-register-native-allocation
- // for an explanation of how RegisterNativeAllocation works.
- size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- if (new_value > NativeAllocationBlockingGcWatermark()) {
- // Wait for a new GC to finish and finalizers to run, because the
- // allocation rate is too high.
- Thread* self = ThreadForEnv(env);
-
- bool run_gc = false;
- {
- MutexLock mu(self, *native_blocking_gc_lock_);
- uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
- if (native_blocking_gc_in_progress_) {
- // A native blocking GC is in progress from the last time the native
- // allocation blocking GC watermark was exceeded. Wait for that GC to
- // finish before addressing the fact that we exceeded the blocking
- // watermark again.
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Prior Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- initial_gcs_finished++;
- }
-
- // It's possible multiple threads have seen that we exceeded the
- // blocking watermark. Ensure that only one of those threads is assigned
- // to run the blocking GC. The rest of the threads should instead wait
- // for the blocking GC to complete.
- if (native_blocking_gcs_finished_ == initial_gcs_finished) {
- if (native_blocking_gc_is_assigned_) {
- do {
- ScopedTrace trace("RegisterNativeAllocation: Wait For Blocking GC Completion");
- native_blocking_gc_cond_->Wait(self);
- } while (native_blocking_gcs_finished_ == initial_gcs_finished);
- } else {
- native_blocking_gc_is_assigned_ = true;
- run_gc = true;
- }
- }
- }
+ size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
- if (run_gc) {
- CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAllocBlocking, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
-
- MutexLock mu(self, *native_blocking_gc_lock_);
- native_blocking_gc_is_assigned_ = false;
- native_blocking_gc_in_progress_ = false;
- native_blocking_gcs_finished_++;
- native_blocking_gc_cond_->Broadcast(self);
- }
- } else if (new_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
+ if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
!IsGCRequestPending()) {
// Trigger another GC because there have been enough native bytes
// allocated since the last GC.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7dcf82f415..57d3d506f0 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -268,7 +268,7 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
@@ -1087,16 +1087,6 @@ class Heap {
return max_free_;
}
- // How large new_native_bytes_allocated_ can grow while GC is in progress
- // before we block the allocating thread to allow GC to catch up.
- ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
- // Historically the native allocations were bounded by growth_limit_. This
- // uses that same value, dividing growth_limit_ by 2 to account for
- // the fact that now the bound is relative to the number of retained
- // registered native allocations rather than absolute.
- return growth_limit_ / 2;
- }
-
void TraceHeapSize(size_t heap_size);
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
@@ -1252,23 +1242,6 @@ class Heap {
// old_native_bytes_allocated_ and new_native_bytes_allocated_.
Atomic<size_t> old_native_bytes_allocated_;
- // Used for synchronization when multiple threads call into
- // RegisterNativeAllocation and require blocking GC.
- // * If a previous blocking GC is in progress, all threads will wait for
- // that GC to complete, then wait for one of the threads to complete another
- // blocking GC.
- // * If a blocking GC is assigned but not in progress, a thread has been
- // assigned to run a blocking GC but has not started yet. Threads will wait
- // for the assigned blocking GC to complete.
- // * If a blocking GC is not assigned nor in progress, the first thread will
- // run a blocking GC and signal to other threads that blocking GC has been
- // assigned.
- Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_is_assigned_ GUARDED_BY(native_blocking_gc_lock_);
- bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
- uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
-
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 251d94ca25..ca5a3eeb17 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -36,6 +36,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/time_utils.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
@@ -1828,6 +1829,7 @@ std::string ImageSpace::GetMultiImageBootClassPath(
}
bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg) {
+ const ArtDexFileLoader dex_file_loader;
for (const OatFile::OatDexFile* oat_dex_file : oat_file.GetOatDexFiles()) {
const std::string& dex_file_location = oat_dex_file->GetDexFileLocation();
@@ -1838,7 +1840,7 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file, std::string* error_msg
}
std::vector<uint32_t> checksums;
- if (!DexFileLoader::GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
+ if (!dex_file_loader.GetMultiDexChecksums(dex_file_location.c_str(), &checksums, error_msg)) {
*error_msg = StringPrintf("ValidateOatFile failed to get checksums of dex file '%s' "
"referenced by oat file %s: %s",
dex_file_location.c_str(),
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index ea2168fe9c..e74e9b169f 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -115,7 +115,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetBytesAllocatedInternal() {
+inline uint64_t RegionSpace::GetBytesAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -150,7 +150,7 @@ uint64_t RegionSpace::GetBytesAllocatedInternal() {
}
template<RegionSpace::RegionType kRegionType>
-uint64_t RegionSpace::GetObjectsAllocatedInternal() {
+inline uint64_t RegionSpace::GetObjectsAllocatedInternal() {
uint64_t bytes = 0;
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -185,7 +185,7 @@ uint64_t RegionSpace::GetObjectsAllocatedInternal() {
}
template<bool kToSpaceOnly, typename Visitor>
-void RegionSpace::WalkInternal(Visitor&& visitor) {
+inline void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
// issues (the classloader classes lock and the monitor lock). We
// call this with threads suspended.
@@ -237,9 +237,10 @@ inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
}
template<bool kForEvac>
-mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -274,7 +275,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
Region* first_reg = &regions_[left];
DCHECK(first_reg->IsFree());
first_reg->UnfreeLarge(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
size_t allocated = num_regs * kRegionSize;
// We make 'top' all usable bytes, as the caller of this
// allocation may use all of 'usable_size' (see mirror::Array::Alloc).
@@ -283,7 +288,11 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
DCHECK_LT(p, num_regions_);
DCHECK(regions_[p].IsFree());
regions_[p].UnfreeLargeTail(this, time_);
- ++num_non_free_regions_;
+ if (kForEvac) {
+ ++num_evac_regions_;
+ } else {
+ ++num_non_free_regions_;
+ }
}
*bytes_allocated = allocated;
if (usable_size != nullptr) {
@@ -299,6 +308,35 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
return nullptr;
}
+template<bool kForEvac>
+inline void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
+ DCHECK(Contains(large_obj));
+ DCHECK_ALIGNED(large_obj, kRegionSize);
+ MutexLock mu(Thread::Current(), region_lock_);
+ uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
+ uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
+ CHECK_LT(begin_addr, end_addr);
+ for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
+ Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
+ if (addr == begin_addr) {
+ DCHECK(reg->IsLarge());
+ } else {
+ DCHECK(reg->IsLargeTail());
+ }
+ reg->Clear(/*zero_and_release_pages*/true);
+ if (kForEvac) {
+ --num_evac_regions_;
+ } else {
+ --num_non_free_regions_;
+ }
+ }
+ if (end_addr < Limit()) {
+ // If we aren't at the end of the space, check that the next region is not a large tail.
+ Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
+ DCHECK(!following_reg->IsLargeTail());
+ }
+}
+
inline size_t RegionSpace::Region::BytesAllocated() const {
if (IsLarge()) {
DCHECK_LT(begin_ + kRegionSize, Top());
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index a51df7c783..45cfff90cc 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -84,14 +84,18 @@ RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
: ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
kGcRetentionPolicyAlwaysCollect),
- region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
- size_t mem_map_size = mem_map->Size();
- CHECK_ALIGNED(mem_map_size, kRegionSize);
+ region_lock_("Region lock", kRegionSpaceRegionLock),
+ time_(1U),
+ num_regions_(mem_map->Size() / kRegionSize),
+ num_non_free_regions_(0U),
+ num_evac_regions_(0U),
+ max_peak_num_non_free_regions_(0U),
+ non_free_region_index_limit_(0U),
+ current_region_(&full_region_),
+ evac_region_(nullptr) {
+ CHECK_ALIGNED(mem_map->Size(), kRegionSize);
CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
- num_regions_ = mem_map_size / kRegionSize;
- num_non_free_regions_ = 0U;
DCHECK_GT(num_regions_, 0U);
- non_free_region_index_limit_ = 0U;
regions_.reset(new Region[num_regions_]);
uint8_t* region_addr = mem_map->Begin();
for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
@@ -112,8 +116,6 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
}
DCHECK(!full_region_.IsFree());
DCHECK(full_region_.IsAllocated());
- current_region_ = &full_region_;
- evac_region_ = nullptr;
size_t ignored;
DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
}
@@ -267,6 +269,9 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
VerifyNonFreeRegionLimit();
size_t new_non_free_region_index_limit = 0;
+ // Update max of peak non free region count before reclaiming evacuated regions.
+ max_peak_num_non_free_regions_ = std::max(max_peak_num_non_free_regions_,
+ num_non_free_regions_);
// Combine zeroing and releasing pages to reduce how often madvise is called. This helps
// reduce contention on the mmap semaphore. b/62194020
// clear_region adds a region to the current block. If the region is not adjacent, the
@@ -350,6 +355,8 @@ void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_obje
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
+ num_non_free_regions_ += num_evac_regions_;
+ num_evac_regions_ = 0;
}
void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
@@ -411,30 +418,6 @@ void RegionSpace::Dump(std::ostream& os) const {
<< reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
}
-void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
- DCHECK(Contains(large_obj));
- DCHECK_ALIGNED(large_obj, kRegionSize);
- MutexLock mu(Thread::Current(), region_lock_);
- uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
- uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
- CHECK_LT(begin_addr, end_addr);
- for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
- Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
- if (addr == begin_addr) {
- DCHECK(reg->IsLarge());
- } else {
- DCHECK(reg->IsLargeTail());
- }
- reg->Clear(/*zero_and_release_pages*/true);
- --num_non_free_regions_;
- }
- if (end_addr < Limit()) {
- // If we aren't at the end of the space, check that the next region is not a large tail.
- Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
- DCHECK(!following_reg->IsLargeTail());
- }
-}
-
void RegionSpace::DumpRegions(std::ostream& os) {
MutexLock mu(Thread::Current(), region_lock_);
for (size_t i = 0; i < num_regions_; ++i) {
@@ -572,10 +555,12 @@ RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
Region* r = &regions_[i];
if (r->IsFree()) {
r->Unfree(this, time_);
- ++num_non_free_regions_;
- if (!for_evac) {
+ if (for_evac) {
+ ++num_evac_regions_;
// Evac doesn't count as newly allocated.
+ } else {
r->SetNewlyAllocated();
+ ++num_non_free_regions_;
}
return r;
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index c9c9136c27..ef8aa52a03 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -64,6 +64,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
template<bool kForEvac>
mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+ template<bool kForEvac>
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
// Return the storage space required by obj.
@@ -138,9 +139,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
uint64_t GetObjectsAllocatedInUnevacFromSpace() REQUIRES(!region_lock_) {
return GetObjectsAllocatedInternal<RegionType::kRegionTypeUnevacFromSpace>();
}
- // It is OK to do a racy read here as it's only for performance dump.
- size_t GetNumNonFreeRegions() const {
- return num_non_free_regions_;
+ size_t GetMaxPeakNumNonFreeRegions() const {
+ return max_peak_num_non_free_regions_;
}
size_t GetNumRegions() const {
return num_regions_;
@@ -530,8 +530,18 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
uint32_t time_; // The time as the number of collections since the startup.
- size_t num_regions_; // The number of regions in this space.
- size_t num_non_free_regions_; // The number of non-free regions in this space.
+ const size_t num_regions_; // The number of regions in this space.
+ // The number of non-free regions in this space.
+ size_t num_non_free_regions_ GUARDED_BY(region_lock_);
+
+ // The number of evac regions allocated during collection. 0 when GC not running.
+ size_t num_evac_regions_ GUARDED_BY(region_lock_);
+
+ // Maintain the maximum of number of non-free regions collected just before
+ // reclaim in each GC cycle. At this moment in cycle, highest number of
+ // regions are in non-free.
+ size_t max_peak_num_non_free_regions_;
+
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
// The upper-bound index of the non-free regions. Used to avoid scanning all regions in
diff --git a/runtime/globals.h b/runtime/globals.h
index f14d6e95a6..ca4040d777 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -62,6 +62,12 @@ static constexpr bool kIsDebugBuild = GlobalsReturnSelf(false);
static constexpr bool kIsDebugBuild = GlobalsReturnSelf(true);
#endif
+#if defined(ART_PGO_INSTRUMENTATION)
+static constexpr bool kIsPGOInstrumentation = true;
+#else
+static constexpr bool kIsPGOInstrumentation = false;
+#endif
+
// ART_TARGET - Defined for target builds of ART.
// ART_TARGET_LINUX - Defined for target Linux builds of ART.
// ART_TARGET_ANDROID - Defined for target Android builds of ART.
diff --git a/runtime/hidden_api_access_flags.h b/runtime/hidden_api_access_flags.h
new file mode 100644
index 0000000000..80a002d96e
--- /dev/null
+++ b/runtime/hidden_api_access_flags.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
+#define ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
+
+#include "base/bit_utils.h"
+#include "modifiers.h"
+
+namespace art {
+
+/* This class is used for encoding and decoding access flags of class members
+ * from the boot class path. These access flags might contain additional two bits
+ * of information on whether the given class member should be hidden from apps
+ * and under what circumstances.
+ *
+ * The encoding is different inside DexFile, where we are concerned with size,
+ * and at runtime where we want to optimize for speed of access. The class
+ * provides helper functions to decode/encode both of them.
+ *
+ * Encoding in DexFile
+ * ===================
+ *
+ * First bit is encoded as inversion of visibility flags (public/private/protected).
+ * At most one can be set for any given class member. If two or three are set,
+ * this is interpreted as the first bit being set and actual visibility flags
+ * being the complement of the encoded flags.
+ *
+ * Second bit is either encoded as bit 5 for fields and non-native methods, where
+ * it carries no other meaning. If a method is native (bit 8 set), bit 9 is used.
+ *
+ * Bits were selected so that they never increase the length of unsigned LEB-128
+ * encoding of the access flags.
+ *
+ * Encoding at runtime
+ * ===================
+ *
+ * Two bits are set aside in the uint32_t access flags in the intrinsics ordinal
+ * space (thus intrinsics need to be special-cased). These are two consecutive
+ * bits and they are directly used to store the integer value of the ApiList
+ * enum values.
+ *
+ */
+class HiddenApiAccessFlags {
+ public:
+ enum ApiList {
+ kWhitelist = 0,
+ kLightGreylist,
+ kDarkGreylist,
+ kBlacklist,
+ };
+
+ static ALWAYS_INLINE ApiList DecodeFromDex(uint32_t dex_access_flags) {
+ DexHiddenAccessFlags flags(dex_access_flags);
+ uint32_t int_value = (flags.IsFirstBitSet() ? 1 : 0) + (flags.IsSecondBitSet() ? 2 : 0);
+ return static_cast<ApiList>(int_value);
+ }
+
+ static ALWAYS_INLINE uint32_t RemoveFromDex(uint32_t dex_access_flags) {
+ DexHiddenAccessFlags flags(dex_access_flags);
+ flags.SetFirstBit(false);
+ flags.SetSecondBit(false);
+ return flags.GetEncoding();
+ }
+
+ static ALWAYS_INLINE uint32_t EncodeForDex(uint32_t dex_access_flags, ApiList value) {
+ DexHiddenAccessFlags flags(RemoveFromDex(dex_access_flags));
+ uint32_t int_value = static_cast<uint32_t>(value);
+ flags.SetFirstBit((int_value & 1) != 0);
+ flags.SetSecondBit((int_value & 2) != 0);
+ return flags.GetEncoding();
+ }
+
+ static ALWAYS_INLINE ApiList DecodeFromRuntime(uint32_t runtime_access_flags) {
+ if ((runtime_access_flags & kAccIntrinsic) != 0) {
+ return kWhitelist;
+ } else {
+ uint32_t int_value = (runtime_access_flags & kAccHiddenApiBits) >> kAccFlagsShift;
+ return static_cast<ApiList>(int_value);
+ }
+ }
+
+ static ALWAYS_INLINE uint32_t EncodeForRuntime(uint32_t runtime_access_flags, ApiList value) {
+ CHECK_EQ(runtime_access_flags & kAccIntrinsic, 0u);
+
+ uint32_t hidden_api_flags = static_cast<uint32_t>(value) << kAccFlagsShift;
+ CHECK_EQ(hidden_api_flags & ~kAccHiddenApiBits, 0u);
+
+ runtime_access_flags &= ~kAccHiddenApiBits;
+ return runtime_access_flags | hidden_api_flags;
+ }
+
+ private:
+ static const int kAccFlagsShift = CTZ(kAccHiddenApiBits);
+ static_assert(IsPowerOfTwo((kAccHiddenApiBits >> kAccFlagsShift) + 1),
+ "kAccHiddenApiBits are not continuous");
+
+ struct DexHiddenAccessFlags {
+ explicit DexHiddenAccessFlags(uint32_t access_flags) : access_flags_(access_flags) {}
+
+ ALWAYS_INLINE uint32_t GetSecondFlag() {
+ return ((access_flags_ & kAccNative) != 0) ? kAccDexHiddenBitNative : kAccDexHiddenBit;
+ }
+
+ ALWAYS_INLINE bool IsFirstBitSet() {
+ static_assert(IsPowerOfTwo(0u), "Following statement checks if *at most* one bit is set");
+ return !IsPowerOfTwo(access_flags_ & kAccVisibilityFlags);
+ }
+
+ ALWAYS_INLINE void SetFirstBit(bool value) {
+ if (IsFirstBitSet() != value) {
+ access_flags_ ^= kAccVisibilityFlags;
+ }
+ }
+
+ ALWAYS_INLINE bool IsSecondBitSet() {
+ return (access_flags_ & GetSecondFlag()) != 0;
+ }
+
+ ALWAYS_INLINE void SetSecondBit(bool value) {
+ if (value) {
+ access_flags_ |= GetSecondFlag();
+ } else {
+ access_flags_ &= ~GetSecondFlag();
+ }
+ }
+
+ ALWAYS_INLINE uint32_t GetEncoding() const {
+ return access_flags_;
+ }
+
+ uint32_t access_flags_;
+ };
+};
+
+} // namespace art
+
+
+#endif // ART_RUNTIME_HIDDEN_API_ACCESS_FLAGS_H_
diff --git a/runtime/image.cc b/runtime/image.cc
index b9d955c08c..8e3615ffcf 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '2', '\0' }; // 4-bit ClassStatus.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '4', '\0' }; // Math.pow() intrinsic.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index 99a4f763c9..681a582b5d 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -478,6 +478,7 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame,
UNIMPLEMENTED_CASE(MathLog /* (D)D */)
UNIMPLEMENTED_CASE(MathLog10 /* (D)D */)
UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */)
+ UNIMPLEMENTED_CASE(MathPow /* (DD)D */)
UNIMPLEMENTED_CASE(MathSinh /* (D)D */)
INTRINSIC_CASE(MathTan)
UNIMPLEMENTED_CASE(MathTanh /* (D)D */)
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index d007728750..da08793f59 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -136,6 +136,7 @@
V(MathAsin, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "asin", "(D)D") \
V(MathAtan, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan", "(D)D") \
V(MathAtan2, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "atan2", "(DD)D") \
+ V(MathPow, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "pow", "(DD)D") \
V(MathCbrt, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cbrt", "(D)D") \
V(MathCosh, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "cosh", "(D)D") \
V(MathExp, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/lang/Math;", "exp", "(D)D") \
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 4d1c85a1c2..0e295e2442 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -42,6 +42,7 @@ extern "C" {
JITCodeEntry* prev_;
const uint8_t *symfile_addr_;
uint64_t symfile_size_;
+ uint32_t ref_count; // ART internal field.
};
struct JITDescriptor {
@@ -69,10 +70,11 @@ extern "C" {
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
}
-static Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
-static JITCodeEntry* CreateJITCodeEntryInternal(std::vector<uint8_t> symfile)
- REQUIRES(g_jit_debug_mutex) {
+static size_t g_jit_debug_mem_usage = 0;
+
+JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
// Make a copy of the buffer. We want to shrink it anyway.
@@ -85,20 +87,20 @@ static JITCodeEntry* CreateJITCodeEntryInternal(std::vector<uint8_t> symfile)
entry->symfile_addr_ = symfile_copy;
entry->symfile_size_ = symfile.size();
entry->prev_ = nullptr;
-
+ entry->ref_count = 0;
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != nullptr) {
entry->next_->prev_ = entry;
}
+ g_jit_debug_mem_usage += sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
-
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
(*__jit_debug_register_code_ptr)();
return entry;
}
-static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug_mutex) {
+void DeleteJITCodeEntry(JITCodeEntry* entry) {
if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
} else {
@@ -109,6 +111,7 @@ static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug
entry->next_->prev_ = entry->prev_;
}
+ g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
(*__jit_debug_register_code_ptr)();
@@ -116,41 +119,33 @@ static void DeleteJITCodeEntryInternal(JITCodeEntry* entry) REQUIRES(g_jit_debug
delete entry;
}
-JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- return CreateJITCodeEntryInternal(std::move(symfile));
+// Mapping from code address to entry. Used to manage life-time of the entries.
+static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
+ GUARDED_BY(g_jit_debug_mutex);
+
+void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
+ DCHECK(entry != nullptr);
+ DCHECK_EQ(g_jit_code_entries.count(code_address), 0u);
+ entry->ref_count++;
+ g_jit_code_entries.emplace(code_address, entry);
}
-void DeleteJITCodeEntry(JITCodeEntry* entry) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- DeleteJITCodeEntryInternal(entry);
+void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
+ DCHECK(entry != nullptr);
+ DCHECK(g_jit_code_entries[code_address] == entry);
+ if (--entry->ref_count == 0) {
+ DeleteJITCodeEntry(entry);
+ }
+ g_jit_code_entries.erase(code_address);
}
-// Mapping from address to entry. It takes ownership of the entries
-// so that the user of the JIT interface does not have to store them.
-static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries;
-
-void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- DCHECK_NE(address, 0u);
- DCHECK(g_jit_code_entries.find(address) == g_jit_code_entries.end());
- JITCodeEntry* entry = CreateJITCodeEntryInternal(std::move(symfile));
- g_jit_code_entries.emplace(address, entry);
+JITCodeEntry* GetJITCodeEntry(uintptr_t code_address) {
+ auto it = g_jit_code_entries.find(code_address);
+ return it == g_jit_code_entries.end() ? nullptr : it->second;
}
-bool DeleteJITCodeEntryForAddress(uintptr_t address) {
- Thread* self = Thread::Current();
- MutexLock mu(self, g_jit_debug_mutex);
- const auto it = g_jit_code_entries.find(address);
- if (it == g_jit_code_entries.end()) {
- return false;
- }
- DeleteJITCodeEntryInternal(it->second);
- g_jit_code_entries.erase(it);
- return true;
+size_t GetJITCodeEntryMemUsage() {
+ return g_jit_debug_mem_usage + g_jit_code_entries.size() * 2 * sizeof(void*);
}
} // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index d9bf331aab..9aec988f67 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -21,28 +21,45 @@
#include <memory>
#include <vector>
+#include "base/array_ref.h"
+#include "base/mutex.h"
+
namespace art {
extern "C" {
struct JITCodeEntry;
}
+extern Mutex g_jit_debug_mutex;
+
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
-JITCodeEntry* CreateJITCodeEntry(std::vector<uint8_t> symfile);
+JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
+ REQUIRES(g_jit_debug_mutex);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
-void DeleteJITCodeEntry(JITCodeEntry* entry);
+void DeleteJITCodeEntry(JITCodeEntry* entry)
+ REQUIRES(g_jit_debug_mutex);
-// Notify native debugger about new JITed code by passing in-memory ELF.
-// The address is used only to uniquely identify the entry.
-// It takes ownership of the in-memory ELF file.
-void CreateJITCodeEntryForAddress(uintptr_t address, std::vector<uint8_t> symfile);
+// Helper method to track life-time of JITCodeEntry.
+// It registers given code address as being described by the given entry.
+void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
+ REQUIRES(g_jit_debug_mutex);
-// Notify native debugger that JITed code has been removed.
-// Returns false if entry for the given address was not found.
-bool DeleteJITCodeEntryForAddress(uintptr_t address);
+// Helper method to track life-time of JITCodeEntry.
+// It de-registers given code address as being described by the given entry.
+void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
+ REQUIRES(g_jit_debug_mutex);
+
+// Find the registered JITCodeEntry for given code address.
+// There can be only one entry per address at any given time.
+JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
+ REQUIRES(g_jit_debug_mutex);
+
+// Returns approximate memory used by all JITCodeEntries.
+size_t GetJITCodeEntryMemUsage()
+ REQUIRES(g_jit_debug_mutex);
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 659c55a289..e41667a5bf 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,11 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- DeleteJITCodeEntryForAddress(reinterpret_cast<uintptr_t>(code_ptr));
+ MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
+ if (entry != nullptr) {
+ DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
+ }
if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
FreeData(GetRootTable(code_ptr));
} // else this is a JNI stub without any data.
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 2bfed7f539..9fb09d8fc2 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -241,7 +241,7 @@ static inline void EncodeUnsignedLeb128(Vector* dest, uint32_t value) {
static inline void UpdateUnsignedLeb128(uint8_t* dest, uint32_t value) {
const uint8_t* old_end = dest;
uint32_t old_value = DecodeUnsignedLeb128(&old_end);
- DCHECK_LE(value, old_value);
+ DCHECK_LE(UnsignedLeb128Size(value), UnsignedLeb128Size(old_value));
for (uint8_t* end = EncodeUnsignedLeb128(dest, value); end < old_end; end++) {
// Use longer encoding than necessary to fill the allocated space.
end[-1] |= 0x80;
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 8abf8a6003..55e9c390cd 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -927,9 +927,6 @@ void* MemMap::MapInternal(void* addr,
UNUSED(low_4gb);
#endif
DCHECK_ALIGNED(length, kPageSize);
- if (low_4gb) {
- DCHECK_EQ(flags & MAP_FIXED, 0);
- }
// TODO:
// A page allocator would be a useful abstraction here, as
// 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 55c588930e..84b032620f 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -81,9 +81,9 @@ class MANAGED Class FINAL : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ClassStatus GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid including "subtype_check_bits_and_status.h" to get the field.
- // The ClassStatus is always in the least-significant bits of status_.
+ // The ClassStatus is always in the 4 most-significant of status_.
return enum_cast<ClassStatus>(
- static_cast<uint32_t>(GetField32Volatile<kVerifyFlags>(StatusOffset())) & 0xff);
+ static_cast<uint32_t>(GetField32Volatile<kVerifyFlags>(StatusOffset())) >> (32 - 4));
}
// This is static because 'this' may be moved by GC.
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index d7d647b8fd..0e2db932bb 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -42,6 +42,12 @@ static constexpr uint32_t kAccEnum = 0x4000; // class, field, ic (1.5)
static constexpr uint32_t kAccJavaFlagsMask = 0xffff; // bits set from Java sources (low 16)
+// The following flags are used to insert hidden API access flags into boot
+// class path dex files. They are decoded by DexFile::ClassDataItemIterator and
+// removed from the access flags before used by the runtime.
+static constexpr uint32_t kAccDexHiddenBit = 0x00000020; // field, method (not native)
+static constexpr uint32_t kAccDexHiddenBitNative = 0x00000200; // method (native)
+
static constexpr uint32_t kAccConstructor = 0x00010000; // method (dex only) <(cl)init>
static constexpr uint32_t kAccDeclaredSynchronized = 0x00020000; // method (dex only)
static constexpr uint32_t kAccClassIsProxy = 0x00040000; // class (dex only)
@@ -83,9 +89,11 @@ static constexpr uint32_t kAccMustCountLocks = 0x04000000; // method (ru
// virtual call.
static constexpr uint32_t kAccSingleImplementation = 0x08000000; // method (runtime)
+static constexpr uint32_t kAccHiddenApiBits = 0x30000000; // field, method
+
// Not currently used, except for intrinsic methods where these bits
// are part of the intrinsic ordinal.
-static constexpr uint32_t kAccMayBeUnusedBits = 0x70000000;
+static constexpr uint32_t kAccMayBeUnusedBits = 0x40000000;
// Set by the compiler driver when compiling boot classes with instrinsic methods.
static constexpr uint32_t kAccIntrinsic = 0x80000000; // method (runtime)
@@ -100,8 +108,9 @@ static constexpr uint32_t kAccClassIsFinalizable = 0x80000000;
// Continuous sequence of bits used to hold the ordinal of an intrinsic method. Flags
// which overlap are not valid when kAccIntrinsic is set.
-static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccSingleImplementation |
- kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict | kAccPreviouslyWarm;
+static constexpr uint32_t kAccIntrinsicBits = kAccMayBeUnusedBits | kAccHiddenApiBits |
+ kAccSingleImplementation | kAccMustCountLocks | kAccCompileDontBother | kAccDefaultConflict |
+ kAccPreviouslyWarm;
// Valid (meaningful) bits for a field.
static constexpr uint32_t kAccValidFieldFlags = kAccPublic | kAccPrivate | kAccProtected |
@@ -127,6 +136,8 @@ static constexpr uint32_t kAccValidClassFlags = kAccPublic | kAccFinal | kAccSup
static constexpr uint32_t kAccValidInterfaceFlags = kAccPublic | kAccInterface |
kAccAbstract | kAccSynthetic | kAccAnnotation;
+static constexpr uint32_t kAccVisibilityFlags = kAccPublic | kAccPrivate | kAccProtected;
+
} // namespace art
#endif // ART_RUNTIME_MODIFIERS_H_
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index a992b5cb5b..0f430874cf 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,6 +27,7 @@
#include <class_loader_context.h>
#include "common_throws.h"
#include "compiler_filter.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "jni_internal.h"
@@ -188,12 +189,13 @@ static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem
dex_mem_map->Begin(),
dex_mem_map->End());
std::string error_message;
- std::unique_ptr<const DexFile> dex_file(DexFileLoader::Open(location,
- 0,
- std::move(dex_mem_map),
- /* verify */ true,
- /* verify_location */ true,
- &error_message));
+ const ArtDexFileLoader dex_file_loader;
+ std::unique_ptr<const DexFile> dex_file(dex_file_loader.Open(location,
+ 0,
+ std::move(dex_mem_map),
+ /* verify */ true,
+ /* verify_location */ true,
+ &error_message));
if (dex_file == nullptr) {
ScopedObjectAccess soa(env);
ThrowWrappedIOException("%s", error_message.c_str());
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 3e8040bfa5..ed0eb97da1 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -160,12 +160,22 @@ static jobjectArray VMStack_getThreadStackTrace(JNIEnv* env, jclass, jobject jav
return Thread::InternalStackTraceToStackTraceElementArray(soa, trace);
}
+static jobjectArray VMStack_getAnnotatedThreadStackTrace(JNIEnv* env, jclass, jobject javaThread) {
+ ScopedFastNativeObjectAccess soa(env);
+ auto fn = [](Thread* thread, const ScopedFastNativeObjectAccess& soaa)
+ REQUIRES_SHARED(Locks::mutator_lock_) -> jobjectArray {
+ return thread->CreateAnnotatedStackTrace(soaa);
+ };
+ return GetThreadStack(soa, javaThread, fn);
+}
+
static JNINativeMethod gMethods[] = {
FAST_NATIVE_METHOD(VMStack, fillStackTraceElements, "(Ljava/lang/Thread;[Ljava/lang/StackTraceElement;)I"),
FAST_NATIVE_METHOD(VMStack, getCallingClassLoader, "()Ljava/lang/ClassLoader;"),
FAST_NATIVE_METHOD(VMStack, getClosestUserClassLoader, "()Ljava/lang/ClassLoader;"),
FAST_NATIVE_METHOD(VMStack, getStackClass2, "()Ljava/lang/Class;"),
FAST_NATIVE_METHOD(VMStack, getThreadStackTrace, "(Ljava/lang/Thread;)[Ljava/lang/StackTraceElement;"),
+ FAST_NATIVE_METHOD(VMStack, getAnnotatedThreadStackTrace, "(Ljava/lang/Thread;)[Ldalvik/system/AnnotatedStackTraceElement;"),
};
void register_dalvik_system_VMStack(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index fd80aaeaf7..e22726b79b 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -173,6 +173,7 @@ enum {
DEBUG_JAVA_DEBUGGABLE = 1 << 8,
DISABLE_VERIFIER = 1 << 9,
ONLY_USE_SYSTEM_OAT_FILES = 1 << 10,
+ DISABLE_HIDDEN_API_CHECKS = 1 << 11,
};
static uint32_t EnableDebugFeatures(uint32_t runtime_flags) {
@@ -284,6 +285,11 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
runtime_flags &= ~ONLY_USE_SYSTEM_OAT_FILES;
}
+ if ((runtime_flags & DISABLE_HIDDEN_API_CHECKS) != 0) {
+ Runtime::Current()->DisableHiddenApiChecks();
+ runtime_flags &= ~DISABLE_HIDDEN_API_CHECKS;
+ }
+
if (runtime_flags != 0) {
LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags);
}
@@ -331,6 +337,9 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
}
}
+ DCHECK(!is_system_server || !Runtime::Current()->AreHiddenApiChecksEnabled())
+ << "SystemServer should be forked with DISABLE_HIDDEN_API_CHECKS";
+
if (instruction_set != nullptr && !is_system_server) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index ec9455289f..2fef70b2ae 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -333,27 +333,35 @@ void DumpNativeStack(std::ostream& os,
os << prefix << StringPrintf("#%02zu pc ", it->num);
bool try_addr2line = false;
if (!BacktraceMap::IsValid(it->map)) {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " ???"
- : "%08" PRIxPTR " ???",
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " ???"
+ : "%08" PRIx64 " ???",
it->pc);
} else {
- os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIxPTR " "
- : "%08" PRIxPTR " ",
+ os << StringPrintf(Is64BitInstructionSet(kRuntimeISA) ? "%016" PRIx64 " "
+ : "%08" PRIx64 " ",
it->rel_pc);
- os << it->map.name;
+ if (it->map.name.empty()) {
+ os << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
+ } else {
+ os << it->map.name;
+ }
os << " (";
if (!it->func_name.empty()) {
os << it->func_name;
if (it->func_offset != 0) {
os << "+" << it->func_offset;
}
- try_addr2line = true;
+ // Functions found using the gdb jit interface will be in an empty
+ // map that cannot be found using addr2line.
+ if (!it->map.name.empty()) {
+ try_addr2line = true;
+ }
} else if (current_method != nullptr &&
Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
PcIsWithinQuickCode(current_method, it->pc)) {
const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
os << current_method->JniLongName() << "+"
- << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
+ << (it->pc - reinterpret_cast<uint64_t>(start_of_code));
} else {
os << "???";
}
diff --git a/runtime/oat.h b/runtime/oat.h
index 6d4f18bdb1..8f81010a06 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: 4-bit ClassStatus.
- static constexpr uint8_t kOatVersion[] = { '1', '3', '6', '\0' };
+ // Last oat version changed reason: Math.pow() intrinsic.
+ static constexpr uint8_t kOatVersion[] = { '1', '3', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index df07a191bc..c03dbccbc4 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -43,6 +43,7 @@
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
#include "dex/standard_dex_file.h"
@@ -194,10 +195,6 @@ OatFileBase* OatFileBase::OpenOatFile(const std::string& vdex_filename,
ret->PreLoad();
- if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
- return nullptr;
- }
-
if (!ret->Load(elf_filename,
oat_file_begin,
writable,
@@ -211,6 +208,10 @@ OatFileBase* OatFileBase::OpenOatFile(const std::string& vdex_filename,
return nullptr;
}
+ if (!ret->LoadVdex(vdex_filename, writable, low_4gb, error_msg)) {
+ return nullptr;
+ }
+
ret->PreSetup(elf_filename);
if (!ret->Setup(abs_dex_location, error_msg)) {
@@ -234,10 +235,6 @@ OatFileBase* OatFileBase::OpenOatFile(int vdex_fd,
std::string* error_msg) {
std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
- if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
- return nullptr;
- }
-
if (!ret->Load(oat_fd,
oat_file_begin,
writable,
@@ -251,6 +248,10 @@ OatFileBase* OatFileBase::OpenOatFile(int vdex_fd,
return nullptr;
}
+ if (!ret->LoadVdex(vdex_fd, vdex_location, writable, low_4gb, error_msg)) {
+ return nullptr;
+ }
+
ret->PreSetup(oat_location);
if (!ret->Setup(abs_dex_location, error_msg)) {
@@ -264,7 +265,14 @@ bool OatFileBase::LoadVdex(const std::string& vdex_filename,
bool writable,
bool low_4gb,
std::string* error_msg) {
- vdex_ = VdexFile::Open(vdex_filename, writable, low_4gb, /* unquicken*/ false, error_msg);
+ vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
+ vdex_end_ - vdex_begin_,
+ vdex_begin_ != nullptr /* mmap_reuse */,
+ vdex_filename,
+ writable,
+ low_4gb,
+ /* unquicken*/ false,
+ error_msg);
if (vdex_.get() == nullptr) {
*error_msg = StringPrintf("Failed to load vdex file '%s' %s",
vdex_filename.c_str(),
@@ -285,13 +293,16 @@ bool OatFileBase::LoadVdex(int vdex_fd,
if (rc == -1) {
PLOG(WARNING) << "Failed getting length of vdex file";
} else {
- vdex_ = VdexFile::Open(vdex_fd,
- s.st_size,
- vdex_filename,
- writable,
- low_4gb,
- false /* unquicken */,
- error_msg);
+ vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
+ vdex_end_ - vdex_begin_,
+ vdex_begin_ != nullptr /* mmap_reuse */,
+ vdex_fd,
+ s.st_size,
+ vdex_filename,
+ writable,
+ low_4gb,
+ false /* unquicken */,
+ error_msg);
if (vdex_.get() == nullptr) {
*error_msg = "Failed opening vdex file.";
return false;
@@ -339,7 +350,7 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
} else {
bss_end_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbsslastword", &symbol_error_msg));
if (bss_end_ == nullptr) {
- *error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'", file_path.c_str());
+ *error_msg = StringPrintf("Failed to find oatbsslastword symbol in '%s'", file_path.c_str());
return false;
}
// Readjust to be non-inclusive upper bound.
@@ -351,6 +362,20 @@ bool OatFileBase::ComputeFields(uint8_t* requested_base,
bss_roots_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatbssroots", &symbol_error_msg));
}
+ vdex_begin_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatdex", &symbol_error_msg));
+ if (vdex_begin_ == nullptr) {
+ // No .vdex section.
+ vdex_end_ = nullptr;
+ } else {
+ vdex_end_ = const_cast<uint8_t*>(FindDynamicSymbolAddress("oatdexlastword", &symbol_error_msg));
+ if (vdex_end_ == nullptr) {
+ *error_msg = StringPrintf("Failed to find oatdexlastword symbol in '%s'", file_path.c_str());
+ return false;
+ }
+ // Readjust to be non-inclusive upper bound.
+ vdex_end_ += sizeof(uint32_t);
+ }
+
return true;
}
@@ -1441,6 +1466,8 @@ OatFile::OatFile(const std::string& location, bool is_executable)
bss_methods_(nullptr),
bss_roots_(nullptr),
is_executable_(is_executable),
+ vdex_begin_(nullptr),
+ vdex_end_(nullptr),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -1471,6 +1498,14 @@ const uint8_t* OatFile::BssEnd() const {
return bss_end_;
}
+const uint8_t* OatFile::VdexBegin() const {
+ return vdex_begin_;
+}
+
+const uint8_t* OatFile::VdexEnd() const {
+ return vdex_end_;
+}
+
const uint8_t* OatFile::DexBegin() const {
return vdex_->Begin();
}
@@ -1500,21 +1535,6 @@ ArrayRef<GcRoot<mirror::Object>> OatFile::GetBssGcRoots() const {
}
}
-uint32_t OatFile::GetDebugInfoOffset(const DexFile& dex_file, uint32_t debug_info_off) {
- // Note that although the specification says that 0 should be used if there
- // is no debug information, some applications incorrectly use 0xFFFFFFFF.
- // The following check also handles debug_info_off == 0.
- if (debug_info_off < dex_file.Size() || debug_info_off == 0xFFFFFFFF) {
- return debug_info_off;
- }
- const OatFile::OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
- if (oat_dex_file == nullptr || (oat_dex_file->GetOatFile() == nullptr)) {
- return debug_info_off;
- }
- return oat_dex_file->GetOatFile()->GetVdexFile()->GetDebugInfoOffset(
- dex_file, debug_info_off);
-}
-
const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location,
const uint32_t* dex_location_checksum,
std::string* error_msg) const {
@@ -1632,14 +1652,15 @@ std::unique_ptr<const DexFile> OatFile::OatDexFile::OpenDexFile(std::string* err
ScopedTrace trace(__PRETTY_FUNCTION__);
static constexpr bool kVerify = false;
static constexpr bool kVerifyChecksum = false;
- return DexFileLoader::Open(dex_file_pointer_,
- FileSize(),
- dex_file_location_,
- dex_file_location_checksum_,
- this,
- kVerify,
- kVerifyChecksum,
- error_msg);
+ const ArtDexFileLoader dex_file_loader;
+ return dex_file_loader.Open(dex_file_pointer_,
+ FileSize(),
+ dex_file_location_,
+ dex_file_location_checksum_,
+ this,
+ kVerify,
+ kVerifyChecksum,
+ error_msg);
}
uint32_t OatFile::OatDexFile::GetOatClassOffset(uint16_t class_def_index) const {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 02318b68b7..bf22e0b88b 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -115,10 +115,6 @@ class OatFile {
const char* abs_dex_location,
std::string* error_msg);
- // Return the actual debug info offset for an offset that might be actually pointing to
- // dequickening info. The returned debug info offset is the one originally in the the dex file.
- static uint32_t GetDebugInfoOffset(const DexFile& dex_file, uint32_t debug_info_off);
-
virtual ~OatFile();
bool IsExecutable() const {
@@ -279,6 +275,10 @@ class OatFile {
return BssEnd() - BssBegin();
}
+ size_t VdexSize() const {
+ return VdexEnd() - VdexBegin();
+ }
+
size_t BssMethodsOffset() const {
// Note: This is used only for symbolizer and needs to return a valid .bss offset.
return (bss_methods_ != nullptr) ? bss_methods_ - BssBegin() : BssRootsOffset();
@@ -299,6 +299,9 @@ class OatFile {
const uint8_t* BssBegin() const;
const uint8_t* BssEnd() const;
+ const uint8_t* VdexBegin() const;
+ const uint8_t* VdexEnd() const;
+
const uint8_t* DexBegin() const;
const uint8_t* DexEnd() const;
@@ -358,6 +361,12 @@ class OatFile {
// Was this oat_file loaded executable?
const bool is_executable_;
+ // Pointer to the .vdex section, if present, otherwise null.
+ uint8_t* vdex_begin_;
+
+ // Pointer to the end of the .vdex section, if present, otherwise null.
+ uint8_t* vdex_end_;
+
// Owning storage for the OatDexFile objects.
std::vector<const OatDexFile*> oat_dex_files_storage_;
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 240030cf5b..73ca19a363 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -28,6 +28,7 @@
#include "base/stl_util.h"
#include "class_linker.h"
#include "compiler_filter.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/heap.h"
@@ -869,10 +870,11 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() {
required_dex_checksums_found_ = false;
cached_required_dex_checksums_.clear();
std::string error_msg;
- if (DexFileLoader::GetMultiDexChecksums(dex_location_.c_str(),
- &cached_required_dex_checksums_,
- &error_msg,
- zip_fd_)) {
+ const ArtDexFileLoader dex_file_loader;
+ if (dex_file_loader.GetMultiDexChecksums(dex_location_.c_str(),
+ &cached_required_dex_checksums_,
+ &error_msg,
+ zip_fd_)) {
required_dex_checksums_found_ = true;
has_original_dex_files_ = true;
} else {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 29b9bfcf7f..9503360167 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -31,6 +31,7 @@
#include "base/systrace.h"
#include "class_linker.h"
#include "class_loader_context.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_tracking_registrar.h"
@@ -40,6 +41,7 @@
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
+#include "oat_file.h"
#include "oat_file_assistant.h"
#include "obj_ptr-inl.h"
#include "scoped_thread_state_change-inl.h"
@@ -527,8 +529,14 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (source_oat_file != nullptr) {
bool added_image_space = false;
if (source_oat_file->IsExecutable()) {
- std::unique_ptr<gc::space::ImageSpace> image_space =
- kEnableAppImage ? oat_file_assistant.OpenImageSpace(source_oat_file) : nullptr;
+ // We need to throw away the image space if we are debuggable but the oat-file source of the
+ // image is not otherwise we might get classes with inlined methods or other such things.
+ std::unique_ptr<gc::space::ImageSpace> image_space;
+ if (kEnableAppImage && (!runtime->IsJavaDebuggable() || source_oat_file->IsDebuggable())) {
+ image_space = oat_file_assistant.OpenImageSpace(source_oat_file);
+ } else {
+ image_space = nullptr;
+ }
if (image_space != nullptr) {
ScopedObjectAccess soa(self);
StackHandleScope<1> hs(self);
@@ -606,12 +614,13 @@ std::vector<std::unique_ptr<const DexFile>> OatFileManager::OpenDexFilesFromOat(
if (oat_file_assistant.HasOriginalDexFiles()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
static constexpr bool kVerifyChecksum = true;
- if (!DexFileLoader::Open(dex_location,
- dex_location,
- Runtime::Current()->IsVerificationEnabled(),
- kVerifyChecksum,
- /*out*/ &error_msg,
- &dex_files)) {
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(dex_location,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ /*out*/ &error_msg,
+ &dex_files)) {
LOG(WARNING) << error_msg;
error_msgs->push_back("Failed to open dex files from " + std::string(dex_location)
+ " because: " + error_msg);
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 7bf0f84596..8d864018ab 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -21,11 +21,13 @@
#include <gtest/gtest.h>
#include "common_runtime_test.h"
+#include "dexopt_test.h"
#include "scoped_thread_state_change-inl.h"
+#include "vdex_file.h"
namespace art {
-class OatFileTest : public CommonRuntimeTest {
+class OatFileTest : public DexoptTest {
};
TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
@@ -62,4 +64,28 @@ TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation) {
"/data/app/foo/base.apk", "o/base.apk"));
}
+TEST_F(OatFileTest, LoadOat) {
+ std::string dex_location = GetScratchDir() + "/LoadOat.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str(), CompilerFilter::kSpeed);
+
+ std::string oat_location;
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::DexLocationToOatFilename(
+ dex_location, kRuntimeISA, &oat_location, &error_msg)) << error_msg;
+ std::unique_ptr<OatFile> odex_file(OatFile::Open(oat_location.c_str(),
+ oat_location.c_str(),
+ nullptr,
+ nullptr,
+ false,
+ /*low_4gb*/false,
+ dex_location.c_str(),
+ &error_msg));
+ ASSERT_TRUE(odex_file.get() != nullptr);
+
+ // Check that the vdex file was loaded in the reserved space of odex file.
+ EXPECT_EQ(odex_file->GetVdexFile()->Begin(), odex_file->VdexBegin());
+}
+
} // namespace art
diff --git a/runtime/obj_ptr.h b/runtime/obj_ptr.h
index 70e767acf6..14fdba31d9 100644
--- a/runtime/obj_ptr.h
+++ b/runtime/obj_ptr.h
@@ -37,7 +37,7 @@ constexpr bool kObjPtrPoisoningValidateOnCopy = false;
template<class MirrorType>
class ObjPtr {
static constexpr size_t kCookieShift =
- sizeof(kHeapReferenceSize) * kBitsPerByte - kObjectAlignmentShift;
+ kHeapReferenceSize * kBitsPerByte - kObjectAlignmentShift;
static constexpr size_t kCookieBits = sizeof(uintptr_t) * kBitsPerByte - kCookieShift;
static constexpr uintptr_t kCookieMask = (static_cast<uintptr_t>(1u) << kCookieBits) - 1;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 3ac3d03e90..92eb703338 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -327,6 +327,11 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.WithType<bool>()
.WithValueMap({{"false", false}, {"true", true}})
.IntoKey(M::SlowDebug)
+ .Define("-Xtarget-sdk-version:_")
+ .WithType<int>()
+ .IntoKey(M::TargetSdkVersion)
+ .Define("-Xno-hidden-api-checks")
+ .IntoKey(M::NoHiddenApiChecks)
.Ignore({
"-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
"-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
diff --git a/runtime/quicken_info.h b/runtime/quicken_info.h
index ce11f3c19b..52eca61c06 100644
--- a/runtime/quicken_info.h
+++ b/runtime/quicken_info.h
@@ -17,15 +17,93 @@
#ifndef ART_RUNTIME_QUICKEN_INFO_H_
#define ART_RUNTIME_QUICKEN_INFO_H_
+#include "base/array_ref.h"
#include "dex/dex_instruction.h"
+#include "leb128.h"
namespace art {
-// QuickenInfoTable is a table of 16 bit dex indices. There is one slot fo every instruction that is
-// possibly dequickenable.
+// Table for getting the offset of quicken info. Doesn't have one slot for each index, so a
+// combination of iteration and indexing is required to get the quicken info for a given dex method
+// index.
+class QuickenInfoOffsetTableAccessor {
+ public:
+ using TableType = uint32_t;
+ static constexpr uint32_t kElementsPerIndex = 16;
+
+ class Builder {
+ public:
+ explicit Builder(std::vector<uint8_t>* out_data) : out_data_(out_data) {}
+
+ void AddOffset(uint32_t index) {
+ out_data_->insert(out_data_->end(),
+ reinterpret_cast<const uint8_t*>(&index),
+ reinterpret_cast<const uint8_t*>(&index + 1));
+ }
+
+ private:
+ std::vector<uint8_t>* const out_data_;
+ };
+
+ // The table only covers every kElementsPerIndex indices.
+ static bool IsCoveredIndex(uint32_t index) {
+ return index % kElementsPerIndex == 0;
+ }
+
+ explicit QuickenInfoOffsetTableAccessor(const uint8_t* data, uint32_t max_index)
+ : table_(reinterpret_cast<const uint32_t*>(data)),
+ num_indices_(RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex) {}
+
+ size_t SizeInBytes() const {
+ return NumIndices() * sizeof(table_[0]);
+ }
+
+ uint32_t NumIndices() const {
+ return num_indices_;
+ }
+
+ // Returns the offset for the index at or before the desired index. If the offset is for an index
+ // before the desired one, remainder is how many elements to traverse to reach the desired index.
+ TableType ElementOffset(uint32_t index, uint32_t* remainder) const {
+ *remainder = index % kElementsPerIndex;
+ return table_[index / kElementsPerIndex];
+ }
+
+ const uint8_t* DataEnd() const {
+ return reinterpret_cast<const uint8_t*>(table_ + NumIndices());
+ }
+
+ static uint32_t Alignment() {
+ return alignof(TableType);
+ }
+
+ private:
+ const TableType* table_;
+ uint32_t num_indices_;
+};
+
+// QuickenInfoTable is a table of 16 bit dex indices. There is one slot for every instruction that
+// is possibly dequickenable.
class QuickenInfoTable {
public:
- explicit QuickenInfoTable(const uint8_t* data) : data_(data) {}
+ class Builder {
+ public:
+ Builder(std::vector<uint8_t>* out_data, size_t num_elements) : out_data_(out_data) {
+ EncodeUnsignedLeb128(out_data_, num_elements);
+ }
+
+ void AddIndex(uint16_t index) {
+ out_data_->push_back(static_cast<uint8_t>(index));
+ out_data_->push_back(static_cast<uint8_t>(index >> 8));
+ }
+
+ private:
+ std::vector<uint8_t>* const out_data_;
+ };
+
+ explicit QuickenInfoTable(ArrayRef<const uint8_t> data)
+ : data_(data.data()),
+ num_elements_(!data.empty() ? DecodeUnsignedLeb128(&data_) : 0u) {}
bool IsNull() const {
return data_ == nullptr;
@@ -44,8 +122,18 @@ class QuickenInfoTable {
return bytes / sizeof(uint16_t);
}
+ static size_t SizeInBytes(ArrayRef<const uint8_t> data) {
+ QuickenInfoTable table(data);
+ return table.data_ + table.NumIndices() * 2 - data.data();
+ }
+
+ uint32_t NumIndices() const {
+ return num_elements_;
+ }
+
private:
- const uint8_t* const data_;
+ const uint8_t* data_;
+ const uint32_t num_elements_;
DISALLOW_COPY_AND_ASSIGN(QuickenInfoTable);
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 2f45b100d7..33bebe0887 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -69,6 +69,7 @@
#include "class_linker-inl.h"
#include "compiler_callbacks.h"
#include "debugger.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "elf_file.h"
#include "entrypoints/runtime_asm_entrypoints.h"
@@ -249,7 +250,7 @@ Runtime::Runtime()
preinitialization_transactions_(),
verify_(verifier::VerifyMode::kNone),
allow_dex_file_fallback_(true),
- target_sdk_version_(0),
+ target_sdk_version_(kUnsetSdkVersion),
implicit_null_checks_(false),
implicit_so_checks_(false),
implicit_suspend_checks_(false),
@@ -264,6 +265,7 @@ Runtime::Runtime()
oat_file_manager_(nullptr),
is_low_memory_mode_(false),
safe_mode_(false),
+ do_hidden_api_checks_(false),
dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
@@ -291,11 +293,21 @@ Runtime::~Runtime() {
const bool attach_shutdown_thread = self == nullptr;
if (attach_shutdown_thread) {
// We can only create a peer if the runtime is actually started. This is only not true during
- // some tests.
- CHECK(AttachCurrentThread("Shutdown thread",
- false,
- GetSystemThreadGroup(),
- /* Create peer */IsStarted()));
+ // some tests. If there is extreme memory pressure the allocation of the thread peer can fail.
+ // In this case we will just try again without allocating a peer so that shutdown can continue.
+ // Very few things are actually capable of distinguishing between the peer & peerless states so
+ // this should be fine.
+ bool thread_attached = AttachCurrentThread("Shutdown thread",
+ /* as_daemon */ false,
+ GetSystemThreadGroup(),
+ /* Create peer */ IsStarted());
+ if (UNLIKELY(!thread_attached)) {
+ LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
+ CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
+ /* as_daemon */ false,
+ /* thread_group*/ nullptr,
+ /* Create peer */ false));
+ }
self = Thread::Current();
} else {
LOG(WARNING) << "Current thread not detached in Runtime shutdown";
@@ -1031,6 +1043,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
+ const ArtDexFileLoader dex_file_loader;
failure_count = 0;
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i].c_str();
@@ -1041,12 +1054,12 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
continue;
}
- if (!DexFileLoader::Open(dex_filename,
- dex_location,
- Runtime::Current()->IsVerificationEnabled(),
- kVerifyChecksum,
- &error_msg,
- dex_files)) {
+ if (!dex_file_loader.Open(dex_filename,
+ dex_location,
+ Runtime::Current()->IsVerificationEnabled(),
+ kVerifyChecksum,
+ &error_msg,
+ dex_files)) {
LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
++failure_count;
}
@@ -1154,6 +1167,12 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
verify_ = runtime_options.GetOrDefault(Opt::Verify);
allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
+ target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
+
+ if (runtime_options.Exists(Opt::NoHiddenApiChecks)) {
+ do_hidden_api_checks_ = false;
+ }
+
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
@@ -1541,6 +1560,7 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
}
static bool EnsureJvmtiPlugin(Runtime* runtime,
+ bool allow_non_debuggable_tooling,
std::vector<Plugin>* plugins,
std::string* error_msg) {
constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
@@ -1552,9 +1572,9 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
}
}
- // Is the process debuggable? Otherwise, do not attempt to load the plugin.
- // TODO Support a crimped jvmti for non-debuggable runtimes.
- if (!runtime->IsJavaDebuggable()) {
+ // Is the process debuggable? Otherwise, do not attempt to load the plugin unless we are
+ // specifically allowed.
+ if (!allow_non_debuggable_tooling && !runtime->IsJavaDebuggable()) {
*error_msg = "Process is not debuggable.";
return false;
}
@@ -1575,9 +1595,12 @@ static bool EnsureJvmtiPlugin(Runtime* runtime,
// revisit this and make sure we're doing this on the right thread
// (and we synchronize access to any shared data structures like "agents_")
//
-void Runtime::AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader) {
+void Runtime::AttachAgent(JNIEnv* env,
+ const std::string& agent_arg,
+ jobject class_loader,
+ bool allow_non_debuggable_tooling) {
std::string error_msg;
- if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+ if (!EnsureJvmtiPlugin(this, allow_non_debuggable_tooling, &plugins_, &error_msg)) {
LOG(WARNING) << "Could not load plugin: " << error_msg;
ScopedObjectAccess soa(Thread::Current());
ThrowIOException("%s", error_msg.c_str());
diff --git a/runtime/runtime.h b/runtime/runtime.h
index c8edabce09..022a1be124 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -520,6 +520,14 @@ class Runtime {
bool IsVerificationEnabled() const;
bool IsVerificationSoftFail() const;
+ void DisableHiddenApiChecks() {
+ do_hidden_api_checks_ = false;
+ }
+
+ bool AreHiddenApiChecksEnabled() const {
+ return do_hidden_api_checks_;
+ }
+
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
@@ -661,7 +669,10 @@ class Runtime {
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
- void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
+ void AttachAgent(JNIEnv* env,
+ const std::string& agent_arg,
+ jobject class_loader,
+ bool allow_non_debuggable_tooling = false);
const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
return agents_;
@@ -706,6 +717,8 @@ class Runtime {
return jdwp_provider_;
}
+ static constexpr int32_t kUnsetSdkVersion = 0u;
+
private:
static void InitPlatformSignalHandlers();
@@ -952,6 +965,9 @@ class Runtime {
// Whether the application should run in safe mode, that is, interpreter only.
bool safe_mode_;
+ // Whether access checks on hidden API should be performed.
+ bool do_hidden_api_checks_;
+
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 1dd3de5039..6e1a68b07d 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -118,6 +118,8 @@ RUNTIME_OPTIONS_KEY (std::vector<std::string>, \
ImageCompilerOptions) // -Ximage-compiler-option ...
RUNTIME_OPTIONS_KEY (verifier::VerifyMode, \
Verify, verifier::VerifyMode::kEnable)
+RUNTIME_OPTIONS_KEY (int, TargetSdkVersion, Runtime::kUnsetSdkVersion)
+RUNTIME_OPTIONS_KEY (Unit, NoHiddenApiChecks)
RUNTIME_OPTIONS_KEY (std::string, NativeBridge)
RUNTIME_OPTIONS_KEY (unsigned int, ZygoteMaxFailedBoots, 10)
RUNTIME_OPTIONS_KEY (Unit, NoDexFileFallback)
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 4305ff849a..462f203978 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -26,22 +26,22 @@ namespace art {
/**
* The SubtypeCheckBits memory layout (in bits):
*
- * Variable
- * |
- * <---- up to 23 bits ----> v +---> 1 bit
- * |
- * +-------------------------+--------+-----------+---++
- * | Bitstring | |
- * +-------------------------+--------+-----------+ |
- * | Path To Root | Next | (unused) | OF |
- * +---+---------------------+--------+ | |
- * | | | | | ... | | (0....0) | |
- * +---+---------------------+--------+-----------+----+
- * MSB LSB
+ * 1 bit Variable
+ * | |
+ * v v <---- up to 23 bits ---->
+ *
+ * +----+-----------+--------+-------------------------+
+ * | | Bitstring |
+ * + +-----------+--------+-------------------------+
+ * | OF | (unused) | Next | Path To Root |
+ * + | |--------+----+----------+----+----+
+ * | | (0....0) | | | ... | | |
+ * +----+-----------+--------+----+----------+----+----+
+ * MSB (most significant bit) LSB
*
* The bitstring takes up to 23 bits; anything exceeding that is truncated:
* - Path To Root is a list of chars, encoded as a BitString:
- * starting at the root (in MSB), each character is a sibling index unique to the parent,
+ * starting at the root (in LSB), each character is a sibling index unique to the parent,
* Paths longer than BitString::kCapacity are truncated to fit within the BitString.
* - Next is a single BitStringChar (immediatelly following Path To Root)
* When new children are assigned paths, they get allocated the parent's Next value.
@@ -57,8 +57,8 @@ namespace art {
* See subtype_check.h and subtype_check_info.h for more details.
*/
BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size*/ BitString::BitStructSizeOf() + 1u)
- BitStructUint</*lsb*/0, /*width*/1> overflow_;
- BitStructField<BitString, /*lsb*/1> bitstring_;
+ BitStructField<BitString, /*lsb*/ 0> bitstring_;
+ BitStructUint</*lsb*/ BitString::BitStructSizeOf(), /*width*/ 1> overflow_;
BITSTRUCT_DEFINE_END(SubtypeCheckBits);
} // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index 11cb9b9d21..321a723985 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -19,6 +19,7 @@
#include "base/bit_struct.h"
#include "base/bit_utils.h"
+#include "base/casts.h"
#include "class_status.h"
#include "subtype_check_bits.h"
@@ -36,13 +37,13 @@ static constexpr size_t NonNumericBitSizeOf() {
}
/**
- * MSB LSB
- * +---------------------------------------------------+---------------+
- * | | |
- * | SubtypeCheckBits | ClassStatus |
- * | | |
- * +---------------------------------------------------+---------------+
- * <----- 24 bits -----> <-- 8 bits -->
+ * MSB (most significant bit) LSB
+ * +---------------+---------------------------------------------------+
+ * | | |
+ * | ClassStatus | SubtypeCheckBits |
+ * | | |
+ * +---------------+---------------------------------------------------+
+ * <-- 4 bits --> <----- 28 bits ----->
*
* Invariants:
*
@@ -53,20 +54,25 @@ static constexpr size_t NonNumericBitSizeOf() {
* This enables a highly efficient path comparison between any two labels:
*
* src <: target :=
- * src >> (32 - len(path-to-root(target))) == target >> (32 - len(path-to-root(target))
+ * (src & mask) == (target & mask) where mask := (1u << len(path-to-root(target)) - 1u
*
- * In the above example, the RHS operands are a function of the depth. Since the target
- * is known at compile time, it becomes:
- *
- * src >> #imm_target_shift == #imm
+ * In the above example, the `len()` (and thus `mask`) is a function of the depth.
+ * Since the target is known at compile time, it becomes
+ * (src & #imm_mask) == #imm
+ * or
+ * ((src - #imm) << #imm_shift_to_remove_high_bits) == 0
+ * or a similar expression chosen for the best performance or code size.
*
* (This requires that path-to-root in `target` is not truncated, i.e. it is in the Assigned state).
*/
-static constexpr size_t kClassStatusBitSize = 8u; // NonNumericBitSizeOf<ClassStatus>()
+static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
+static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
- BitStructField<ClassStatus, /*lsb*/0, /*width*/kClassStatusBitSize> status_;
- BitStructField<SubtypeCheckBits, /*lsb*/kClassStatusBitSize> subtype_check_info_;
- BitStructInt</*lsb*/0, /*width*/BitSizeOf<BitString::StorageType>()> int32_alias_;
+ BitStructField<SubtypeCheckBits, /*lsb*/ 0> subtype_check_info_;
+ BitStructField<ClassStatus,
+ /*lsb*/ SubtypeCheckBits::BitStructSizeOf(),
+ /*width*/ kClassStatusBitSize> status_;
+ BitStructInt</*lsb*/ 0, /*width*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
// Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/subtype_check_info.h b/runtime/subtype_check_info.h
index 61d590bd59..08db77030e 100644
--- a/runtime/subtype_check_info.h
+++ b/runtime/subtype_check_info.h
@@ -296,8 +296,7 @@ struct SubtypeCheckInfo {
BitString::StorageType GetEncodedPathToRoot() const {
BitString::StorageType data = static_cast<BitString::StorageType>(GetPathToRoot());
// Bit strings are logically in the least-significant memory.
- // Shift it so the bits are all most-significant.
- return data << (BitSizeOf(data) - BitStructSizeOf<BitString>());
+ return data;
}
// Retrieve the path to root bitstring mask as a plain uintN_t that is amenable to
@@ -305,17 +304,7 @@ struct SubtypeCheckInfo {
BitString::StorageType GetEncodedPathToRootMask() const {
size_t num_bitchars = GetSafeDepth();
size_t bitlength = BitString::GetBitLengthTotalAtPosition(num_bitchars);
-
- BitString::StorageType mask_all =
- std::numeric_limits<BitString::StorageType>::max();
- BitString::StorageType mask_lsb =
- MaskLeastSignificant<BitString::StorageType>(
- BitSizeOf<BitString::StorageType>() - bitlength);
-
- BitString::StorageType result = mask_all & ~mask_lsb;
-
- // TODO: refactor above code into MaskMostSignificant?
- return result;
+ return MaskLeastSignificant<BitString::StorageType>(bitlength);
}
// Get the "Next" bitchar, assuming that there is one to get.
diff --git a/runtime/subtype_check_info_test.cc b/runtime/subtype_check_info_test.cc
index 338d75a285..91fcc07d65 100644
--- a/runtime/subtype_check_info_test.cc
+++ b/runtime/subtype_check_info_test.cc
@@ -65,7 +65,7 @@ size_t AsUint(const T& value) {
return uint_value;
}
-// Make max bistring, e.g. BitString[4095,7,255] for {12,3,8}
+// Make max bistring, e.g. BitString[4095,15,2047] for {12,4,11}
template <size_t kCount = BitString::kCapacity>
BitString MakeBitStringMax() {
BitString bs{};
@@ -258,60 +258,62 @@ size_t LenForPos() { return BitString::GetBitLengthTotalAtPosition(kPos); }
TEST_F(SubtypeCheckInfoTest, EncodedPathToRoot) {
using StorageType = BitString::StorageType;
- SubtypeCheckInfo io =
+ SubtypeCheckInfo sci =
MakeSubtypeCheckInfo(/*path_to_root*/MakeBitStringMax(),
/*next*/BitStringChar{},
/*overflow*/false,
/*depth*/BitString::kCapacity);
- // 0b11111...000 where MSB == 1, and leading 1s = the maximum bitstring representation.
- EXPECT_EQ(MaxInt<StorageType>(LenForPos()) << (BitSizeOf<StorageType>() - LenForPos()),
- io.GetEncodedPathToRoot());
-
- EXPECT_EQ(MaxInt<StorageType>(LenForPos()) << (BitSizeOf<StorageType>() - LenForPos()),
- io.GetEncodedPathToRootMask());
-
- // 0b11111...000 where MSB == 1, and leading 1s = the maximum bitstring representation.
+ // 0b000...111 where LSB == 1, and trailing 1s = the maximum bitstring representation.
+ EXPECT_EQ(MaxInt<StorageType>(LenForPos()), sci.GetEncodedPathToRoot());
// The rest of this test is written assuming kCapacity == 3 for convenience.
// Please update the test if this changes.
ASSERT_EQ(3u, BitString::kCapacity);
ASSERT_EQ(12u, BitString::kBitSizeAtPosition[0]);
- ASSERT_EQ(3u, BitString::kBitSizeAtPosition[1]);
- ASSERT_EQ(8u, BitString::kBitSizeAtPosition[2]);
+ ASSERT_EQ(4u, BitString::kBitSizeAtPosition[1]);
+ ASSERT_EQ(11u, BitString::kBitSizeAtPosition[2]);
- SubtypeCheckInfo io2 =
+ SubtypeCheckInfo sci2 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
/*overflow*/false,
/*depth*/BitString::kCapacity);
-#define MAKE_ENCODED_PATH(pos0, pos1, pos2) (((pos0) << 3u << 8u << 9u) | ((pos1) << 8u << 9u) | ((pos2) << 9u))
+#define MAKE_ENCODED_PATH(pos0, pos1, pos2) \
+ (((pos0) << 0) | \
+ ((pos1) << BitString::kBitSizeAtPosition[0]) | \
+ ((pos2) << (BitString::kBitSizeAtPosition[0] + BitString::kBitSizeAtPosition[1])))
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io2.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b11111111), io2.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci2.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b11111111111),
+ sci2.GetEncodedPathToRootMask());
- SubtypeCheckInfo io3 =
+ SubtypeCheckInfo sci3 =
MakeSubtypeCheckInfoUnchecked(MakeBitStringMax<2u>(),
/*overflow*/false,
/*depth*/BitString::kCapacity - 1u);
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io3.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b111, 0b0), io3.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci3.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b1111, 0b0),
+ sci3.GetEncodedPathToRootMask());
- SubtypeCheckInfo io4 =
+ SubtypeCheckInfo sci4 =
MakeSubtypeCheckInfoUnchecked(MakeBitString({0b1010101u}),
/*overflow*/false,
/*depth*/BitString::kCapacity - 2u);
- EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b000, 0b0), io4.GetEncodedPathToRoot());
- EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b000, 0b0), io4.GetEncodedPathToRootMask());
+ EXPECT_EQ(MAKE_ENCODED_PATH(0b1010101u, 0b0000, 0b0), sci4.GetEncodedPathToRoot());
+ EXPECT_EQ(MAKE_ENCODED_PATH(MaxInt<BitString::StorageType>(12), 0b0000, 0b0),
+ sci4.GetEncodedPathToRootMask());
}
TEST_F(SubtypeCheckInfoTest, NewForRoot) {
- SubtypeCheckInfo io = SubtypeCheckInfo::CreateRoot();
- EXPECT_EQ(SubtypeCheckInfo::kAssigned, io.GetState()); // Root is always assigned.
- EXPECT_EQ(0u, GetPathToRoot(io).Length()); // Root's path length is 0.
- EXPECT_TRUE(HasNext(io)); // Root always has a "Next".
- EXPECT_EQ(MakeBitStringChar(1u), io.GetNext()); // Next>=1 to disambiguate from Uninitialized.
+ SubtypeCheckInfo sci = SubtypeCheckInfo::CreateRoot();
+ EXPECT_EQ(SubtypeCheckInfo::kAssigned, sci.GetState()); // Root is always assigned.
+ EXPECT_EQ(0u, GetPathToRoot(sci).Length()); // Root's path length is 0.
+ EXPECT_TRUE(HasNext(sci)); // Root always has a "Next".
+ EXPECT_EQ(MakeBitStringChar(1u), sci.GetNext()); // Next>=1 to disambiguate from Uninitialized.
}
TEST_F(SubtypeCheckInfoTest, CopyCleared) {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 9f4e5441a5..46cb751b93 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2743,6 +2743,199 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
return result;
}
+jobjectArray Thread::CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
+ // This code allocates. Do not allow it to operate with a pending exception.
+ if (IsExceptionPending()) {
+ return nullptr;
+ }
+
+ // If flip_function is not null, it means we have run a checkpoint
+ // before the thread wakes up to execute the flip function and the
+ // thread roots haven't been forwarded. So the following access to
+ // the roots (locks or methods in the frames) would be bad. Run it
+ // here. TODO: clean up.
+ // Note: copied from DumpJavaStack.
+ {
+ Thread* this_thread = const_cast<Thread*>(this);
+ Closure* flip_func = this_thread->GetFlipFunction();
+ if (flip_func != nullptr) {
+ flip_func->Run(this_thread);
+ }
+ }
+
+ class CollectFramesAndLocksStackVisitor : public MonitorObjectsStackVisitor {
+ public:
+ CollectFramesAndLocksStackVisitor(const ScopedObjectAccessAlreadyRunnable& soaa_in,
+ Thread* self,
+ Context* context)
+ : MonitorObjectsStackVisitor(self, context),
+ wait_jobject_(soaa_in.Env(), nullptr),
+ block_jobject_(soaa_in.Env(), nullptr),
+ soaa_(soaa_in) {}
+
+ protected:
+ VisitMethodResult StartMethod(ArtMethod* m, size_t frame_nr ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ObjPtr<mirror::StackTraceElement> obj = CreateStackTraceElement(
+ soaa_, m, GetDexPc(/* abort on error */ false));
+ if (obj == nullptr) {
+ return VisitMethodResult::kEndStackWalk;
+ }
+ stack_trace_elements_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj.Ptr()));
+ return VisitMethodResult::kContinueMethod;
+ }
+
+ VisitMethodResult EndMethod(ArtMethod* m ATTRIBUTE_UNUSED) OVERRIDE {
+ lock_objects_.push_back({});
+ lock_objects_[lock_objects_.size() - 1].swap(frame_lock_objects_);
+
+ DCHECK_EQ(lock_objects_.size(), stack_trace_elements_.size());
+
+ return VisitMethodResult::kContinueMethod;
+ }
+
+ void VisitWaitingObject(mirror::Object* obj, ThreadState state ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitSleepingObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ wait_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitBlockedOnObject(mirror::Object* obj,
+ ThreadState state ATTRIBUTE_UNUSED,
+ uint32_t owner_tid ATTRIBUTE_UNUSED)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ block_jobject_.reset(soaa_.AddLocalReference<jobject>(obj));
+ }
+ void VisitLockedObject(mirror::Object* obj)
+ OVERRIDE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ frame_lock_objects_.emplace_back(soaa_.Env(), soaa_.AddLocalReference<jobject>(obj));
+ }
+
+ public:
+ std::vector<ScopedLocalRef<jobject>> stack_trace_elements_;
+ ScopedLocalRef<jobject> wait_jobject_;
+ ScopedLocalRef<jobject> block_jobject_;
+ std::vector<std::vector<ScopedLocalRef<jobject>>> lock_objects_;
+
+ private:
+ const ScopedObjectAccessAlreadyRunnable& soaa_;
+
+ std::vector<ScopedLocalRef<jobject>> frame_lock_objects_;
+ };
+
+ std::unique_ptr<Context> context(Context::Create());
+ CollectFramesAndLocksStackVisitor dumper(soa, const_cast<Thread*>(this), context.get());
+ dumper.WalkStack();
+
+ // There should not be a pending exception. Otherwise, return with it pending.
+ if (IsExceptionPending()) {
+ return nullptr;
+ }
+
+ // Now go and create Java arrays.
+
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ StackHandleScope<6> hs(soa.Self());
+ mirror::Class* aste_array_class = class_linker->FindClass(
+ soa.Self(),
+ "[Ldalvik/system/AnnotatedStackTraceElement;",
+ ScopedNullHandle<mirror::ClassLoader>());
+ if (aste_array_class == nullptr) {
+ return nullptr;
+ }
+ Handle<mirror::Class> h_aste_array_class(hs.NewHandle<mirror::Class>(aste_array_class));
+
+ mirror::Class* o_array_class = class_linker->FindClass(soa.Self(),
+ "[Ljava/lang/Object;",
+ ScopedNullHandle<mirror::ClassLoader>());
+ if (o_array_class == nullptr) {
+ // This should not fail in a healthy runtime.
+ soa.Self()->AssertPendingException();
+ return nullptr;
+ }
+ Handle<mirror::Class> h_o_array_class(hs.NewHandle<mirror::Class>(o_array_class));
+
+ Handle<mirror::Class> h_aste_class(hs.NewHandle<mirror::Class>(
+ h_aste_array_class->GetComponentType()));
+ ArtField* stack_trace_element_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "stackTraceElement", "Ljava/lang/StackTraceElement;");
+ DCHECK(stack_trace_element_field != nullptr);
+ ArtField* held_locks_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "heldLocks", "[Ljava/lang/Object;");
+ DCHECK(held_locks_field != nullptr);
+ ArtField* blocked_on_field = h_aste_class->FindField(
+ soa.Self(), h_aste_class.Get(), "blockedOn", "Ljava/lang/Object;");
+ DCHECK(blocked_on_field != nullptr);
+
+ size_t length = dumper.stack_trace_elements_.size();
+ ObjPtr<mirror::ObjectArray<mirror::Object>> array =
+ mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), aste_array_class, length);
+ if (array == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ ScopedLocalRef<jobjectArray> result(soa.Env(), soa.Env()->AddLocalReference<jobjectArray>(array));
+
+ MutableHandle<mirror::Object> handle(hs.NewHandle<mirror::Object>(nullptr));
+ MutableHandle<mirror::ObjectArray<mirror::Object>> handle2(
+ hs.NewHandle<mirror::ObjectArray<mirror::Object>>(nullptr));
+ for (size_t i = 0; i != length; ++i) {
+ handle.Assign(h_aste_class->AllocObject(soa.Self()));
+ if (handle == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+
+ // Set stack trace element.
+ stack_trace_element_field->SetObject<false>(
+ handle.Get(), soa.Decode<mirror::Object>(dumper.stack_trace_elements_[i].get()));
+
+ // Create locked-on array.
+ if (!dumper.lock_objects_[i].empty()) {
+ handle2.Assign(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(),
+ h_o_array_class.Get(),
+ dumper.lock_objects_[i].size()));
+ if (handle2 == nullptr) {
+ soa.Self()->AssertPendingOOMException();
+ return nullptr;
+ }
+ int32_t j = 0;
+ for (auto& scoped_local : dumper.lock_objects_[i]) {
+ if (scoped_local == nullptr) {
+ continue;
+ }
+ handle2->Set(j, soa.Decode<mirror::Object>(scoped_local.get()));
+ DCHECK(!soa.Self()->IsExceptionPending());
+ j++;
+ }
+ held_locks_field->SetObject<false>(handle.Get(), handle2.Get());
+ }
+
+ // Set blocked-on object.
+ if (i == 0) {
+ if (dumper.block_jobject_ != nullptr) {
+ blocked_on_field->SetObject<false>(
+ handle.Get(), soa.Decode<mirror::Object>(dumper.block_jobject_.get()));
+ }
+ }
+
+ ScopedLocalRef<jobject> elem(soa.Env(), soa.AddLocalReference<jobject>(handle.Get()));
+ soa.Env()->SetObjectArrayElement(result.get(), i, elem.get());
+ DCHECK(!soa.Self()->IsExceptionPending());
+ }
+
+ return result.release();
+}
+
void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
diff --git a/runtime/thread.h b/runtime/thread.h
index 1e89887c3e..426d27d1b4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -599,6 +599,9 @@ class Thread {
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
REQUIRES_SHARED(Locks::mutator_lock_);
+ jobjectArray CreateAnnotatedStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
bool HasDebuggerShadowFrames() const {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
diff --git a/runtime/ti/agent.cc b/runtime/ti/agent.cc
index 62bdde6790..15c514e593 100644
--- a/runtime/ti/agent.cc
+++ b/runtime/ti/agent.cc
@@ -117,15 +117,18 @@ std::unique_ptr<Agent> AgentSpec::DoDlOpen(JNIEnv* env,
: JavaVMExt::GetLibrarySearchPath(env, class_loader));
bool needs_native_bridge = false;
+ std::string nativeloader_error_msg;
void* dlopen_handle = android::OpenNativeLibrary(env,
Runtime::Current()->GetTargetSdkVersion(),
name_.c_str(),
class_loader,
library_path.get(),
&needs_native_bridge,
- error_msg);
+ &nativeloader_error_msg);
if (dlopen_handle == nullptr) {
- *error_msg = StringPrintf("Unable to dlopen %s: %s", name_.c_str(), dlerror());
+ *error_msg = StringPrintf("Unable to dlopen %s: %s",
+ name_.c_str(),
+ nativeloader_error_msg.c_str());
*error = kLoadingError;
return nullptr;
}
diff --git a/runtime/utils.cc b/runtime/utils.cc
index bd4175f5fd..79ddcb9bff 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -26,10 +26,10 @@
#include <memory>
+#include "android-base/file.h"
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
-#include "base/file_utils.h"
#include "dex/dex_file-inl.h"
#include "os.h"
#include "utf-inl.h"
@@ -46,6 +46,7 @@
namespace art {
+using android::base::ReadFileToString;
using android::base::StringAppendF;
using android::base::StringPrintf;
@@ -63,6 +64,7 @@ pid_t GetTid() {
std::string GetThreadName(pid_t tid) {
std::string result;
+ // TODO: make this less Linux-specific.
if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) {
result.resize(result.size() - 1); // Lose the trailing '\n'.
} else {
@@ -611,6 +613,7 @@ void SetThreadName(const char* thread_name) {
void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) {
*utime = *stime = *task_cpu = 0;
std::string stats;
+ // TODO: make this less Linux-specific.
if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) {
return;
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 789498ce09..7402c12280 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -289,6 +289,20 @@ static inline void CheckedCall(const Func& function, const char* what, Args... a
}
}
+// Hash bytes using a relatively fast hash.
+static inline size_t HashBytes(const uint8_t* data, size_t len) {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+}
+
} // namespace art
#endif // ART_RUNTIME_UTILS_H_
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index a53556ffcc..cab91dfe76 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -19,15 +19,18 @@
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <memory>
+#include <unordered_set>
#include <android-base/logging.h>
#include "base/bit_utils.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex_to_dex_decompiler.h"
+#include "quicken_info.h"
namespace art {
@@ -57,11 +60,14 @@ VdexFile::Header::Header(uint32_t number_of_dex_files,
DCHECK(IsVersionValid());
}
-std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- bool unquicken,
- std::string* error_msg) {
+std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg) {
if (!OS::FileExists(vdex_filename.c_str())) {
*error_msg = "File " + vdex_filename + " does not exist.";
return nullptr;
@@ -85,23 +91,43 @@ std::unique_ptr<VdexFile> VdexFile::Open(const std::string& vdex_filename,
return nullptr;
}
- return Open(vdex_file->Fd(), vdex_length, vdex_filename, writable, low_4gb, unquicken, error_msg);
+ return OpenAtAddress(mmap_addr,
+ mmap_size,
+ mmap_reuse,
+ vdex_file->Fd(),
+ vdex_length,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
}
-std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
- size_t vdex_length,
- const std::string& vdex_filename,
- bool writable,
- bool low_4gb,
- bool unquicken,
- std::string* error_msg) {
- std::unique_ptr<MemMap> mmap(MemMap::MapFile(
+std::unique_ptr<VdexFile> VdexFile::OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg) {
+ if (mmap_addr != nullptr && mmap_size < vdex_length) {
+ LOG(WARNING) << "Insufficient pre-allocated space to mmap vdex.";
+ mmap_addr = nullptr;
+ mmap_reuse = false;
+ }
+ CHECK(!mmap_reuse || mmap_addr != nullptr);
+ std::unique_ptr<MemMap> mmap(MemMap::MapFileAtAddress(
+ mmap_addr,
vdex_length,
(writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
unquicken ? MAP_PRIVATE : MAP_SHARED,
file_fd,
0 /* start offset */,
low_4gb,
+ mmap_reuse,
vdex_filename.c_str(),
error_msg));
if (mmap == nullptr) {
@@ -120,9 +146,8 @@ std::unique_ptr<VdexFile> VdexFile::Open(int file_fd,
if (!vdex->OpenAllDexFiles(&unique_ptr_dex_files, error_msg)) {
return nullptr;
}
- Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
- vdex->GetQuickeningInfo(),
- /* decompile_return_instruction */ false);
+ vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
+ /* decompile_return_instruction */ false);
// Update the quickening info size to pretend there isn't any.
reinterpret_cast<Header*>(vdex->mmap_->Begin())->quickening_info_size_ = 0;
}
@@ -135,19 +160,21 @@ const uint8_t* VdexFile::GetNextDexFileData(const uint8_t* cursor) const {
DCHECK(cursor == nullptr || (cursor > Begin() && cursor <= End()));
if (cursor == nullptr) {
// Beginning of the iteration, return the first dex file if there is one.
- return HasDexSection() ? DexBegin() : nullptr;
+ return HasDexSection() ? DexBegin() + sizeof(QuickeningTableOffsetType) : nullptr;
} else {
// Fetch the next dex file. Return null if there is none.
const uint8_t* data = cursor + reinterpret_cast<const DexFile::Header*>(cursor)->file_size_;
// Dex files are required to be 4 byte aligned. the OatWriter makes sure they are, see
// OatWriter::SeekToDexFiles.
data = AlignUp(data, 4);
- return (data == DexEnd()) ? nullptr : data;
+
+ return (data == DexEnd()) ? nullptr : data + sizeof(QuickeningTableOffsetType);
}
}
bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
std::string* error_msg) {
+ const ArtDexFileLoader dex_file_loader;
size_t i = 0;
for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr);
dex_file_start != nullptr;
@@ -156,14 +183,14 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_
// TODO: Supply the location information for a vdex file.
static constexpr char kVdexLocation[] = "";
std::string location = DexFileLoader::GetMultiDexLocation(i, kVdexLocation);
- std::unique_ptr<const DexFile> dex(DexFileLoader::Open(dex_file_start,
- size,
- location,
- GetLocationChecksum(i),
- nullptr /*oat_dex_file*/,
- false /*verify*/,
- false /*verify_checksum*/,
- error_msg));
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open(dex_file_start,
+ size,
+ location,
+ GetLocationChecksum(i),
+ nullptr /*oat_dex_file*/,
+ false /*verify*/,
+ false /*verify_checksum*/,
+ error_msg));
if (dex == nullptr) {
return false;
}
@@ -172,69 +199,75 @@ bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_
return true;
}
-void VdexFile::Unquicken(const std::vector<const DexFile*>& dex_files,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction) {
- if (quickening_info.size() == 0 && !decompile_return_instruction) {
- // Bail early if there is no quickening info and no need to decompile
- // RETURN_VOID_NO_BARRIER instructions to RETURN_VOID instructions.
- return;
- }
-
- for (uint32_t i = 0; i < dex_files.size(); ++i) {
- UnquickenDexFile(*dex_files[i], quickening_info, decompile_return_instruction);
+void VdexFile::Unquicken(const std::vector<const DexFile*>& target_dex_files,
+ bool decompile_return_instruction) const {
+ const uint8_t* source_dex = GetNextDexFileData(nullptr);
+ for (const DexFile* target_dex : target_dex_files) {
+ UnquickenDexFile(*target_dex, source_dex, decompile_return_instruction);
+ source_dex = GetNextDexFileData(source_dex);
}
+ DCHECK(source_dex == nullptr);
}
-typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
-
-static uint32_t GetDebugInfoOffsetInternal(const DexFile& dex_file,
- uint32_t offset_in_code_item,
- const ArrayRef<const uint8_t>& quickening_info) {
- if (quickening_info.size() == 0) {
- // No quickening info: offset is the right one, return it.
- return offset_in_code_item;
- }
- uint32_t quickening_offset = offset_in_code_item - dex_file.Size();
- return *reinterpret_cast<const unaligned_uint32_t*>(quickening_info.data() + quickening_offset);
+uint32_t VdexFile::GetQuickeningInfoTableOffset(const uint8_t* source_dex_begin) const {
+ DCHECK_GE(source_dex_begin, DexBegin());
+ DCHECK_LT(source_dex_begin, DexEnd());
+ return reinterpret_cast<const QuickeningTableOffsetType*>(source_dex_begin)[-1];
}
-static uint32_t GetQuickeningInfoOffsetFrom(const DexFile& dex_file,
- uint32_t offset_in_code_item,
- const ArrayRef<const uint8_t>& quickening_info) {
- if (offset_in_code_item < dex_file.Size()) {
- return VdexFile::kNoQuickeningInfoOffset;
- }
- if (quickening_info.size() == 0) {
- // No quickening info.
- return VdexFile::kNoQuickeningInfoOffset;
- }
- uint32_t quickening_offset = offset_in_code_item - dex_file.Size();
+QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
+ const uint8_t* source_dex_begin,
+ uint32_t num_method_ids,
+ const ArrayRef<const uint8_t>& quickening_info) const {
+ // The offset a is in preheader right before the dex file.
+ const uint32_t offset = GetQuickeningInfoTableOffset(source_dex_begin);
+ const uint8_t* data_ptr = quickening_info.data() + offset;
+ return QuickenInfoOffsetTableAccessor(data_ptr, num_method_ids);
+}
- // Add 2 * sizeof(uint32_t) for the debug info offset and the data offset.
- CHECK_LE(quickening_offset + 2 * sizeof(uint32_t), quickening_info.size());
- return *reinterpret_cast<const unaligned_uint32_t*>(
- quickening_info.data() + quickening_offset + sizeof(uint32_t));
+QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
+ const DexFile& dex_file,
+ const ArrayRef<const uint8_t>& quickening_info) const {
+ return GetQuickenInfoOffsetTable(dex_file.Begin(), dex_file.NumMethodIds(), quickening_info);
}
static ArrayRef<const uint8_t> GetQuickeningInfoAt(const ArrayRef<const uint8_t>& quickening_info,
uint32_t quickening_offset) {
- return (quickening_offset == VdexFile::kNoQuickeningInfoOffset)
- ? ArrayRef<const uint8_t>(nullptr, 0)
- : quickening_info.SubArray(
- quickening_offset + sizeof(uint32_t),
- *reinterpret_cast<const unaligned_uint32_t*>(
- quickening_info.data() + quickening_offset));
+ ArrayRef<const uint8_t> remaining = quickening_info.SubArray(quickening_offset);
+ return remaining.SubArray(0u, QuickenInfoTable::SizeInBytes(remaining));
+}
+
+static uint32_t GetQuickeningInfoOffset(const QuickenInfoOffsetTableAccessor& table,
+ uint32_t dex_method_index,
+ const ArrayRef<const uint8_t>& quickening_info) {
+ DCHECK(!quickening_info.empty());
+ uint32_t remainder;
+ uint32_t offset = table.ElementOffset(dex_method_index, &remainder);
+ // Decode the sizes for the remainder offsets (not covered by the table).
+ while (remainder != 0) {
+ offset += GetQuickeningInfoAt(quickening_info, offset).size();
+ --remainder;
+ }
+ return offset;
}
void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction) {
+ const DexFile& source_dex_file,
+ bool decompile_return_instruction) const {
+ UnquickenDexFile(target_dex_file, source_dex_file.Begin(), decompile_return_instruction);
+}
+
+void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
+ const uint8_t* source_dex_begin,
+ bool decompile_return_instruction) const {
+ ArrayRef<const uint8_t> quickening_info = GetQuickeningInfo();
if (quickening_info.size() == 0 && !decompile_return_instruction) {
// Bail early if there is no quickening info and no need to decompile
// RETURN_VOID_NO_BARRIER instructions to RETURN_VOID instructions.
return;
}
+ // Make sure to not unquicken the same code item multiple times.
+ std::unordered_set<const DexFile::CodeItem*> unquickened_code_item;
for (uint32_t i = 0; i < target_dex_file.NumClassDefs(); ++i) {
const DexFile::ClassDef& class_def = target_dex_file.GetClassDef(i);
const uint8_t* class_data = target_dex_file.GetClassData(class_def);
@@ -244,19 +277,24 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
class_it.Next()) {
if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
const DexFile::CodeItem* code_item = class_it.GetMethodCodeItem();
- uint32_t quickening_offset = GetQuickeningInfoOffsetFrom(
- target_dex_file, code_item->debug_info_off_, quickening_info);
- if (quickening_offset != VdexFile::kNoQuickeningInfoOffset) {
- // If we have quickening data, put back the original debug_info_off.
- const_cast<DexFile::CodeItem*>(code_item)->SetDebugInfoOffset(
- GetDebugInfoOffsetInternal(target_dex_file,
- code_item->debug_info_off_,
- quickening_info));
+ if (!unquickened_code_item.emplace(code_item).second) {
+ // Already unquickened this code item, do not do it again.
+ continue;
+ }
+ ArrayRef<const uint8_t> quicken_data;
+ if (!quickening_info.empty()) {
+ const uint32_t quickening_offset = GetQuickeningInfoOffset(
+ GetQuickenInfoOffsetTable(source_dex_begin,
+ target_dex_file.NumMethodIds(),
+ quickening_info),
+ class_it.GetMemberIndex(),
+ quickening_info);
+ quicken_data = GetQuickeningInfoAt(quickening_info, quickening_offset);
}
optimizer::ArtDecompileDEX(
target_dex_file,
*code_item,
- GetQuickeningInfoAt(quickening_info, quickening_offset),
+ quicken_data,
decompile_return_instruction);
}
}
@@ -264,25 +302,17 @@ void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
}
}
-uint32_t VdexFile::GetDebugInfoOffset(const DexFile& dex_file, uint32_t offset_in_code_item) const {
- return GetDebugInfoOffsetInternal(dex_file, offset_in_code_item, GetQuickeningInfo());
-}
-
-const uint8_t* VdexFile::GetQuickenedInfoOf(const DexFile& dex_file,
- uint32_t code_item_offset) const {
+ArrayRef<const uint8_t> VdexFile::GetQuickenedInfoOf(const DexFile& dex_file,
+ uint32_t dex_method_idx) const {
ArrayRef<const uint8_t> quickening_info = GetQuickeningInfo();
- uint32_t quickening_offset = GetQuickeningInfoOffsetFrom(
- dex_file, dex_file.GetCodeItem(code_item_offset)->debug_info_off_, quickening_info);
-
- return GetQuickeningInfoAt(quickening_info, quickening_offset).data();
-}
-
-bool VdexFile::CanEncodeQuickenedData(const DexFile& dex_file) {
- // We are going to use the debug_info_off_ to signal there is
- // quickened data, by putting a value greater than dex_file.Size(). So
- // make sure we have some room in the offset by checking that we have at least
- // half of the range of a uint32_t.
- return dex_file.Size() <= (std::numeric_limits<uint32_t>::max() >> 1);
+ if (quickening_info.empty()) {
+ return ArrayRef<const uint8_t>();
+ }
+ const uint32_t quickening_offset = GetQuickeningInfoOffset(
+ GetQuickenInfoOffsetTable(dex_file, quickening_info),
+ dex_method_idx,
+ quickening_info);
+ return GetQuickeningInfoAt(quickening_info, quickening_offset);
}
} // namespace art
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 2d9fcab59c..4e45128420 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -24,6 +24,7 @@
#include "base/macros.h"
#include "mem_map.h"
#include "os.h"
+#include "quicken_info.h"
namespace art {
@@ -35,18 +36,17 @@ class DexFile;
// File format:
// VdexFile::Header fixed-length header
//
-// DEX[0] array of the input DEX files
-// DEX[1] the bytecode may have been quickened
+// quicken_table_off[0] offset into QuickeningInfo section for offset table for DEX[0].
+// DEX[0] array of the input DEX files, the bytecode may have been quickened.
+// quicken_table_off[1]
+// DEX[1]
// ...
// DEX[D]
// VerifierDeps
// uint8[D][] verification dependencies
// QuickeningInfo
// uint8[D][] quickening data
-// unaligned_uint32_t[D][2][] table of offsets pair:
-// uint32_t[0] contains original CodeItem::debug_info_off_
-// uint32_t[1] contains quickening data offset from the start
-// of QuickeningInfo
+// uint32[D][] quickening data offset tables
class VdexFile {
public:
@@ -84,8 +84,8 @@ class VdexFile {
private:
static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' };
- // Last update: Lookup-friendly encoding for quickening info.
- static constexpr uint8_t kVdexVersion[] = { '0', '1', '1', '\0' };
+ // Last update: Use efficient encoding for compact dex code item fields
+ static constexpr uint8_t kVdexVersion[] = { '0', '1', '5', '\0' };
uint8_t magic_[4];
uint8_t version_[4];
@@ -98,15 +98,49 @@ class VdexFile {
};
typedef uint32_t VdexChecksum;
+ using QuickeningTableOffsetType = uint32_t;
explicit VdexFile(MemMap* mmap) : mmap_(mmap) {}
// Returns nullptr if the vdex file cannot be opened or is not valid.
+ // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
+ static std::unique_ptr<VdexFile> OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg);
+
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
+ // The mmap_* parameters can be left empty (nullptr/0/false) to allocate at random address.
+ static std::unique_ptr<VdexFile> OpenAtAddress(uint8_t* mmap_addr,
+ size_t mmap_size,
+ bool mmap_reuse,
+ int file_fd,
+ size_t vdex_length,
+ const std::string& vdex_filename,
+ bool writable,
+ bool low_4gb,
+ bool unquicken,
+ std::string* error_msg);
+
+ // Returns nullptr if the vdex file cannot be opened or is not valid.
static std::unique_ptr<VdexFile> Open(const std::string& vdex_filename,
bool writable,
bool low_4gb,
bool unquicken,
- std::string* error_msg);
+ std::string* error_msg) {
+ return OpenAtAddress(nullptr,
+ 0,
+ false,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
+ }
// Returns nullptr if the vdex file cannot be opened or is not valid.
static std::unique_ptr<VdexFile> Open(int file_fd,
@@ -115,7 +149,18 @@ class VdexFile {
bool writable,
bool low_4gb,
bool unquicken,
- std::string* error_msg);
+ std::string* error_msg) {
+ return OpenAtAddress(nullptr,
+ 0,
+ false,
+ file_fd,
+ vdex_length,
+ vdex_filename,
+ writable,
+ low_4gb,
+ unquicken,
+ error_msg);
+ }
const uint8_t* Begin() const { return mmap_->Begin(); }
const uint8_t* End() const { return mmap_->End(); }
@@ -160,29 +205,42 @@ class VdexFile {
// `decompile_return_instruction` controls if RETURN_VOID_BARRIER instructions are
// decompiled to RETURN_VOID instructions using the slower ClassDataItemIterator
// instead of the faster QuickeningInfoIterator.
- static void Unquicken(const std::vector<const DexFile*>& dex_files,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction);
+ // Always unquickens using the vdex dex files as the source for quicken tables.
+ void Unquicken(const std::vector<const DexFile*>& target_dex_files,
+ bool decompile_return_instruction) const;
// Fully unquicken `target_dex_file` based on `quickening_info`.
- static void UnquickenDexFile(const DexFile& target_dex_file,
- ArrayRef<const uint8_t> quickening_info,
- bool decompile_return_instruction);
+ void UnquickenDexFile(const DexFile& target_dex_file,
+ const DexFile& source_dex_file,
+ bool decompile_return_instruction) const;
- // Return the quickening info of the given code item.
- const uint8_t* GetQuickenedInfoOf(const DexFile& dex_file, uint32_t code_item_offset) const;
+ // Return the quickening info of a given method index (or null if it's empty).
+ ArrayRef<const uint8_t> GetQuickenedInfoOf(const DexFile& dex_file,
+ uint32_t dex_method_idx) const;
- uint32_t GetDebugInfoOffset(const DexFile& dex_file, uint32_t offset_in_code_item) const;
+ private:
+ uint32_t GetQuickeningInfoTableOffset(const uint8_t* source_dex_begin) const;
- static bool CanEncodeQuickenedData(const DexFile& dex_file);
+ // Source dex must be the in the vdex file.
+ void UnquickenDexFile(const DexFile& target_dex_file,
+ const uint8_t* source_dex_begin,
+ bool decompile_return_instruction) const;
- static constexpr uint32_t kNoQuickeningInfoOffset = -1;
+ QuickenInfoOffsetTableAccessor GetQuickenInfoOffsetTable(
+ const DexFile& dex_file,
+ const ArrayRef<const uint8_t>& quickening_info) const;
+
+ QuickenInfoOffsetTableAccessor GetQuickenInfoOffsetTable(
+ const uint8_t* source_dex_begin,
+ uint32_t num_method_ids,
+ const ArrayRef<const uint8_t>& quickening_info) const;
- private:
bool HasDexSection() const {
return GetHeader().GetDexSize() != 0;
}
+ bool ContainsDexFile(const DexFile& dex_file) const;
+
const uint8_t* DexBegin() const {
return Begin() + sizeof(Header) + GetHeader().GetSizeOfChecksumsSection();
}
@@ -191,8 +249,6 @@ class VdexFile {
return DexBegin() + GetHeader().GetDexSize();
}
- uint32_t GetDexFileIndex(const DexFile& dex_file) const;
-
std::unique_ptr<MemMap> mmap_;
DISALLOW_COPY_AND_ASSIGN(VdexFile);
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 2183b60b1e..afb3224944 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -569,7 +569,7 @@ MethodVerifier::MethodVerifier(Thread* self,
dex_cache_(dex_cache),
class_loader_(class_loader),
class_def_(class_def),
- code_item_accessor_(dex_file, code_item),
+ code_item_accessor_(*dex_file, code_item),
declaring_class_(nullptr),
interesting_dex_pc_(-1),
monitor_enter_dex_pcs_(nullptr),
@@ -1685,10 +1685,15 @@ void MethodVerifier::Dump(VariableIndentationOutputStream* vios) {
for (const DexInstructionPcPair& inst : code_item_accessor_) {
const size_t dex_pc = inst.DexPc();
- RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
- if (reg_line != nullptr) {
- vios->Stream() << reg_line->Dump(this) << "\n";
+
+ // Might be asked to dump before the table is initialized.
+ if (reg_table_.IsInitialized()) {
+ RegisterLine* reg_line = reg_table_.GetLine(dex_pc);
+ if (reg_line != nullptr) {
+ vios->Stream() << reg_line->Dump(this) << "\n";
+ }
}
+
vios->Stream()
<< StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index cadf4eb0ba..26c598f224 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -77,6 +77,10 @@ class PcToRegisterLineTable {
void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
uint16_t registers_size, MethodVerifier* verifier);
+ bool IsInitialized() const {
+ return !register_lines_.empty();
+ }
+
RegisterLine* GetLine(size_t idx) const {
return register_lines_[idx].get();
}
diff --git a/test/004-NativeAllocations/src-art/Main.java b/test/004-NativeAllocations/src-art/Main.java
index 8712755125..6b1c48d286 100644
--- a/test/004-NativeAllocations/src-art/Main.java
+++ b/test/004-NativeAllocations/src-art/Main.java
@@ -14,82 +14,109 @@
* limitations under the License.
*/
-import java.lang.reflect.*;
import java.lang.Runtime;
+import java.lang.ref.ReferenceQueue;
+import java.lang.ref.PhantomReference;
import dalvik.system.VMRuntime;
public class Main {
- static Object nativeLock = new Object();
static Object deadlockLock = new Object();
- static boolean aboutToDeadlockLock = false;
- static int nativeBytes = 0;
- static Object runtime;
- static Method register_native_allocation;
- static Method register_native_free;
- static long maxMem = 0;
-
- static class NativeAllocation {
- private int bytes;
-
- NativeAllocation(int bytes, boolean testingDeadlock) throws Exception {
- this.bytes = bytes;
- register_native_allocation.invoke(runtime, bytes);
-
- // Register native allocation can only provide guarantees bounding
- // the maximum outstanding allocations if finalizers don't time
- // out. In case finalizers have timed out, wait longer for them
- // now to complete so we can test the guarantees.
- if (!testingDeadlock) {
- VMRuntime.runFinalization(0);
- }
+ static VMRuntime runtime = VMRuntime.getRuntime();
+ static volatile boolean aboutToDeadlock = false;
- synchronized (nativeLock) {
- if (!testingDeadlock) {
- nativeBytes += bytes;
- if (nativeBytes > 2 * maxMem) {
- throw new OutOfMemoryError();
- }
- }
- }
- }
+ // Save ref as a static field to ensure it doesn't get GC'd before the
+ // referent is enqueued.
+ static PhantomReference ref = null;
+ static class DeadlockingFinalizer {
protected void finalize() throws Exception {
- synchronized (nativeLock) {
- nativeBytes -= bytes;
- }
- register_native_free.invoke(runtime, bytes);
- aboutToDeadlockLock = true;
- synchronized (deadlockLock) {
- }
+ aboutToDeadlock = true;
+ synchronized (deadlockLock) { }
+ }
+ }
+
+ private static void allocateDeadlockingFinalizer() {
+ new DeadlockingFinalizer();
+ }
+
+ public static PhantomReference allocPhantom(ReferenceQueue<Object> queue) {
+ return new PhantomReference(new Object(), queue);
+ }
+
+ // Test that calling registerNativeAllocation triggers a GC eventually
+ // after a substantial number of registered native bytes.
+ private static void checkRegisterNativeAllocation() throws Exception {
+ long maxMem = Runtime.getRuntime().maxMemory();
+ int size = (int)(maxMem / 32);
+ int allocationCount = 256;
+ int maxExpectedGcDurationMs = 2000;
+
+ ReferenceQueue<Object> queue = new ReferenceQueue<Object>();
+ ref = allocPhantom(queue);
+ long total = 0;
+ for (int i = 0; !ref.isEnqueued() && i < allocationCount; ++i) {
+ runtime.registerNativeAllocation(size);
+ total += size;
+
+ // Sleep a little bit to ensure not all of the calls to
+ // registerNativeAllocation complete while GC is in the process of
+ // running.
+ Thread.sleep(maxExpectedGcDurationMs / allocationCount);
+ }
+
+ // Wait up to maxExpectedGcDurationMs to give GC a chance to finish
+ // running. If the reference isn't enqueued after that, then it is
+ // pretty unlikely (though technically still possible) that GC was
+ // triggered as intended.
+ if (queue.remove(maxExpectedGcDurationMs) == null) {
+ throw new RuntimeException("GC failed to complete");
+ }
+
+ while (total > 0) {
+ runtime.registerNativeFree(size);
+ total -= size;
+ }
+ }
+
+ // Call registerNativeAllocation repeatedly at a high rate to trigger the
+ // case of blocking registerNativeAllocation.
+ private static void triggerBlockingRegisterNativeAllocation() throws Exception {
+ long maxMem = Runtime.getRuntime().maxMemory();
+ int size = (int)(maxMem / 5);
+ int allocationCount = 10;
+
+ long total = 0;
+ for (int i = 0; i < allocationCount; ++i) {
+ runtime.registerNativeAllocation(size);
+ total += size;
+ }
+
+ while (total > 0) {
+ runtime.registerNativeFree(size);
+ total -= size;
}
}
public static void main(String[] args) throws Exception {
- Class<?> vm_runtime = Class.forName("dalvik.system.VMRuntime");
- Method get_runtime = vm_runtime.getDeclaredMethod("getRuntime");
- runtime = get_runtime.invoke(null);
- register_native_allocation = vm_runtime.getDeclaredMethod("registerNativeAllocation", Integer.TYPE);
- register_native_free = vm_runtime.getDeclaredMethod("registerNativeFree", Integer.TYPE);
- maxMem = Runtime.getRuntime().maxMemory();
- int count = 16;
- int size = (int)(maxMem / 2 / count);
- int allocation_count = 256;
- NativeAllocation[] allocations = new NativeAllocation[count];
- for (int i = 0; i < allocation_count; ++i) {
- allocations[i % count] = new NativeAllocation(size, false);
+ // Test that registerNativeAllocation triggers GC.
+ // Run this a few times in a loop to reduce the chances that the test
+ // is flaky and make sure registerNativeAllocation continues to work
+ // after the first GC is triggered.
+ for (int i = 0; i < 20; ++i) {
+ checkRegisterNativeAllocation();
}
- // Test that we don't get a deadlock if we are holding nativeLock. If there is no timeout,
- // then we will get a finalizer timeout exception.
- aboutToDeadlockLock = false;
+
+ // Test that we don't get a deadlock if we call
+ // registerNativeAllocation with a blocked finalizer.
synchronized (deadlockLock) {
- for (int i = 0; aboutToDeadlockLock != true; ++i) {
- allocations[i % count] = new NativeAllocation(size, true);
+ allocateDeadlockingFinalizer();
+ while (!aboutToDeadlock) {
+ Runtime.getRuntime().gc();
}
+
// Do more allocations now that the finalizer thread is deadlocked so that we force
- // finalization and timeout.
- for (int i = 0; i < 10; ++i) {
- allocations[i % count] = new NativeAllocation(size, true);
- }
+ // finalization and timeout.
+ triggerBlockingRegisterNativeAllocation();
}
System.out.println("Test complete");
}
diff --git a/test/004-ThreadStress/check b/test/004-ThreadStress/check
index 77e4cdbda0..ecc5ea8a37 100755
--- a/test/004-ThreadStress/check
+++ b/test/004-ThreadStress/check
@@ -15,4 +15,9 @@
# limitations under the License.
# Do not compare numbers, so replace numbers with 'N'.
-sed '-es/[0-9][0-9]*/N/g' "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null \ No newline at end of file
+# Remove all messages relating to failing to allocate a java-peer for the
+# shutdown thread. This can occasionally happen with this test but it is not
+# something we really need to worry about here.
+sed '-es/[0-9][0-9]*/N/g' "$2" \
+ | sed "/Exception creating thread peer:/,+3d" \
+ | diff --strip-trailing-cr -q "$1" - >/dev/null
diff --git a/test/071-dexfile-get-static-size/build b/test/071-dexfile-get-static-size/build
index 0bba66d065..412ee6dd46 100755
--- a/test/071-dexfile-get-static-size/build
+++ b/test/071-dexfile-get-static-size/build
@@ -16,15 +16,13 @@
./default-build "$@"
-# Create and add as resources to the test jar file:
+# Bundle with the test the following resources:
# 1. test1.dex
# 2. test2.dex
# 3. test-jar.jar, containing test1.dex as classes.dex
# 4. multi-jar.jar, containing test1.dex as classes.dex and test2.dex as classes2.dex
mkdir test-jar
-cp test1.dex test-jar/classes.dex
-cp test2.dex test-jar/classes2.dex
-zip -j test-jar.jar test-jar/classes.dex
-zip -j multi-jar.jar test-jar/classes.dex test-jar/classes2.dex
-jar uf ${TEST_NAME}.jar test1.dex test2.dex test-jar.jar multi-jar.jar
-
+cp res/test1.dex test-jar/classes.dex
+cp res/test2.dex test-jar/classes2.dex
+zip -j res/test-jar.jar test-jar/classes.dex
+zip -j res/multi-jar.jar test-jar/classes.dex test-jar/classes2.dex
diff --git a/test/071-dexfile-get-static-size/test1.dex b/test/071-dexfile-get-static-size/res/test1.dex
index 84602d03c2..84602d03c2 100644
--- a/test/071-dexfile-get-static-size/test1.dex
+++ b/test/071-dexfile-get-static-size/res/test1.dex
Binary files differ
diff --git a/test/071-dexfile-get-static-size/test2.dex b/test/071-dexfile-get-static-size/res/test2.dex
index a07c46ef59..a07c46ef59 100644
--- a/test/071-dexfile-get-static-size/test2.dex
+++ b/test/071-dexfile-get-static-size/res/test2.dex
Binary files differ
diff --git a/test/071-dexfile-get-static-size/src/Main.java b/test/071-dexfile-get-static-size/src/Main.java
index 4bf453801e..8dbbba56c3 100644
--- a/test/071-dexfile-get-static-size/src/Main.java
+++ b/test/071-dexfile-get-static-size/src/Main.java
@@ -14,26 +14,9 @@
* limitations under the License.
*/
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.FileOutputStream;
-import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
public class Main {
- private static void extractResource(String resource, String filename) throws Exception {
- ClassLoader loader = Main.class.getClassLoader();
- InputStream is = loader.getResourceAsStream(resource);
- OutputStream os = new FileOutputStream(filename);
- int read;
- byte[] buf = new byte[4096];
- while ((read = is.read(buf)) >= 0) {
- os.write(buf, 0, read);
- }
- is.close();
- os.close();
- }
-
private static long getDexFileSize(String filename) throws Exception {
ClassLoader loader = Main.class.getClassLoader();
Class<?> DexFile = loader.loadClass("dalvik.system.DexFile");
@@ -47,8 +30,7 @@ public class Main {
}
private static void test(String resource) throws Exception {
- String filename = System.getenv("DEX_LOCATION") + "/" + resource;
- extractResource(resource, filename);
+ String filename = System.getenv("DEX_LOCATION") + "/res/" + resource;
long size = getDexFileSize(filename);
System.out.println("Size for " + resource + ": " + size);
}
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index ef758e86e1..bdfb44a87e 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -71,7 +71,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
for (Backtrace::const_iterator it = bt->begin(); it != bt->end(); ++it) {
if (BacktraceMap::IsValid(it->map)) {
LOG(INFO) << "Got " << it->func_name << ", looking for " << seq[cur_search_index];
- if (it->func_name == seq[cur_search_index]) {
+ if (it->func_name.find(seq[cur_search_index]) != std::string::npos) {
cur_search_index++;
if (cur_search_index == seq.size()) {
return true;
@@ -107,7 +107,7 @@ static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
JNIEnv*,
jobject,
- jboolean full_signatrues,
+ jboolean,
jint,
jboolean) {
#if __linux__
@@ -129,17 +129,11 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
std::vector<std::string> seq = {
"Java_Main_unwindInProcess", // This function.
"Main.unwindInProcess", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
+ "java.util.Arrays.binarySearch0", // Framework method.
"Main.main" // The Java entry method.
};
- std::vector<std::string> full_seq = {
- "Java_Main_unwindInProcess", // This function.
- "boolean Main.unwindInProcess(boolean, int, boolean)", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
- "void Main.main(java.lang.String[])" // The Java entry method.
- };
- bool result = CheckStack(bt.get(), full_signatrues ? full_seq : seq);
+ bool result = CheckStack(bt.get(), seq);
if (!kCauseSegfault) {
return result ? JNI_TRUE : JNI_FALSE;
} else {
@@ -191,7 +185,7 @@ int wait_for_sigstop(pid_t tid, int* total_sleep_time_usec, bool* detach_failed
extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
JNIEnv*,
jobject,
- jboolean full_signatrues,
+ jboolean,
jint pid_int) {
#if __linux__
// TODO: What to do on Valgrind?
@@ -235,20 +229,11 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
// Note: For some reason, the name isn't
// resolved, so don't look for it right now.
"Main.sleep", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
+ "java.util.Arrays.binarySearch0", // Framework method.
"Main.main" // The Java entry method.
};
- std::vector<std::string> full_seq = {
- // "Java_Main_sleep", // The sleep function being executed in the
- // other runtime.
- // Note: For some reason, the name isn't
- // resolved, so don't look for it right now.
- "boolean Main.sleep(int, boolean, double)", // The corresponding Java native method frame.
- "int java.util.Arrays.binarySearch(java.lang.Object[], int, int, java.lang.Object, java.util.Comparator)", // Framework method.
- "void Main.main(java.lang.String[])" // The Java entry method.
- };
- result = CheckStack(bt.get(), full_signatrues ? full_seq : seq);
+ result = CheckStack(bt.get(), seq);
}
constexpr bool kSigQuitOnFail = true;
diff --git a/test/168-vmstack-annotated/expected.txt b/test/168-vmstack-annotated/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/168-vmstack-annotated/expected.txt
diff --git a/test/168-vmstack-annotated/info.txt b/test/168-vmstack-annotated/info.txt
new file mode 100644
index 0000000000..d849bc31ed
--- /dev/null
+++ b/test/168-vmstack-annotated/info.txt
@@ -0,0 +1 @@
+Regression test for b/68703210
diff --git a/test/168-vmstack-annotated/run b/test/168-vmstack-annotated/run
new file mode 100644
index 0000000000..93654113e6
--- /dev/null
+++ b/test/168-vmstack-annotated/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a smaller heap so it's easier to potentially fill up.
+exec ${RUN} $@ --runtime-option -Xmx2m
diff --git a/test/168-vmstack-annotated/src/Main.java b/test/168-vmstack-annotated/src/Main.java
new file mode 100644
index 0000000000..8234f945c0
--- /dev/null
+++ b/test/168-vmstack-annotated/src/Main.java
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.Thread.State;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+public class Main {
+
+ static class Runner implements Runnable {
+ List<Object> locks;
+ List<CyclicBarrier> barriers;
+
+ public Runner(List<Object> locks, List<CyclicBarrier> barriers) {
+ this.locks = locks;
+ this.barriers = barriers;
+ }
+
+ @Override
+ public void run() {
+ step(locks, barriers);
+ }
+
+ private void step(List<Object> l, List<CyclicBarrier> b) {
+ if (l.isEmpty()) {
+ // Nothing to do, sleep indefinitely.
+ try {
+ Thread.sleep(100000000);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ Object lockObject = l.remove(0);
+ CyclicBarrier barrierObject = b.remove(0);
+
+ if (lockObject == null) {
+ // No lock object: only take barrier, recurse.
+ try {
+ barrierObject.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ } else if (barrierObject != null) {
+ // Have barrier: sync, wait and recurse.
+ synchronized(lockObject) {
+ try {
+ barrierObject.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ }
+ } else {
+ // Sync, and get next step (which is assumed to have object and barrier).
+ synchronized (lockObject) {
+ Object lockObject2 = l.remove(0);
+ CyclicBarrier barrierObject2 = b.remove(0);
+ synchronized(lockObject2) {
+ try {
+ barrierObject2.await();
+ } catch (InterruptedException | BrokenBarrierException e) {
+ throw new RuntimeException(e);
+ }
+ step(l, b);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ try {
+ testCluster1();
+ } catch (Exception e) {
+ Map<Thread,StackTraceElement[]> stacks = Thread.getAllStackTraces();
+ for (Map.Entry<Thread,StackTraceElement[]> entry : stacks.entrySet()) {
+ System.out.println(entry.getKey());
+ System.out.println(Arrays.toString(entry.getValue()));
+ }
+ throw e;
+ }
+ }
+
+ private static void testCluster1() throws Exception {
+ // Test setup (at deadlock):
+ //
+ // Thread 1:
+ // #0 step: synchornized(o3) { synchronized(o2) }
+ // #1 step: synchronized(o1)
+ //
+ // Thread 2:
+ // #0 step: synchronized(o1)
+ // #1 step: synchronized(o4) { synchronized(o2) }
+ //
+ LinkedList<Object> l1 = new LinkedList<>();
+ LinkedList<CyclicBarrier> b1 = new LinkedList<>();
+ LinkedList<Object> l2 = new LinkedList<>();
+ LinkedList<CyclicBarrier> b2 = new LinkedList<>();
+
+ Object o1 = new Object();
+ Object o2 = new Object();
+ Object o3 = new Object();
+ Object o4 = new Object();
+
+ l1.add(o1);
+ l1.add(o3);
+ l1.add(o2);
+ l2.add(o4);
+ l2.add(o2);
+ l2.add(o1);
+
+ CyclicBarrier c1 = new CyclicBarrier(3);
+ CyclicBarrier c2 = new CyclicBarrier(2);
+ b1.add(c1);
+ b1.add(null);
+ b1.add(c2);
+ b2.add(null);
+ b2.add(c1);
+ b2.add(c2);
+
+ Thread t1 = new Thread(new Runner(l1, b1));
+ t1.setDaemon(true);
+ t1.start();
+ Thread t2 = new Thread(new Runner(l2, b2));
+ t2.setDaemon(true);
+ t2.start();
+
+ c1.await();
+
+ waitNotRunnable(t1);
+ waitNotRunnable(t2);
+ Thread.sleep(250); // Unfortunately this seems necessary. :-(
+
+ // Thread 1.
+ {
+ Object[] stack1 = getAnnotatedStack(t1);
+ assertBlockedOn(stack1[0], o2); // Blocked on o2.
+ assertLocks(stack1[0], o3); // Locked o3.
+ assertStackTraceElementStep(stack1[0]);
+
+ assertBlockedOn(stack1[1], null); // Frame can't be blocked.
+ assertLocks(stack1[1], o1); // Locked o1.
+ assertStackTraceElementStep(stack1[1]);
+ }
+
+ // Thread 2.
+ {
+ Object[] stack2 = getAnnotatedStack(t2);
+ assertBlockedOn(stack2[0], o1); // Blocked on o1.
+ assertLocks(stack2[0]); // Nothing locked.
+ assertStackTraceElementStep(stack2[0]);
+
+ assertBlockedOn(stack2[1], null); // Frame can't be blocked.
+ assertLocks(stack2[1], o4, o2); // Locked o4, o2.
+ assertStackTraceElementStep(stack2[1]);
+ }
+ }
+
+ private static void waitNotRunnable(Thread t) throws InterruptedException {
+ while (t.getState() == State.RUNNABLE) {
+ Thread.sleep(100);
+ }
+ }
+
+ private static Object[] getAnnotatedStack(Thread t) throws Exception {
+ Class<?> vmStack = Class.forName("dalvik.system.VMStack");
+ Method m = vmStack.getDeclaredMethod("getAnnotatedThreadStackTrace", Thread.class);
+ return (Object[]) m.invoke(null, t);
+ }
+
+ private static void assertEquals(Object o1, Object o2) {
+ if (o1 != o2) {
+ throw new RuntimeException("Expected " + o1 + " == " + o2);
+ }
+ }
+ private static void assertLocks(Object fromTrace, Object... locks) throws Exception {
+ Object fieldValue = fromTrace.getClass().getDeclaredMethod("getHeldLocks").
+ invoke(fromTrace);
+ assertEquals((Object[]) fieldValue,
+ (locks == null) ? null : (locks.length == 0 ? null : locks));
+ }
+ private static void assertBlockedOn(Object fromTrace, Object block) throws Exception {
+ Object fieldValue = fromTrace.getClass().getDeclaredMethod("getBlockedOn").
+ invoke(fromTrace);
+ assertEquals(fieldValue, block);
+ }
+ private static void assertEquals(Object[] o1, Object[] o2) {
+ if (!Arrays.equals(o1, o2)) {
+ throw new RuntimeException(
+ "Expected " + Arrays.toString(o1) + " == " + Arrays.toString(o2));
+ }
+ }
+ private static void assertStackTraceElementStep(Object o) throws Exception {
+ Object fieldValue = o.getClass().getDeclaredMethod("getStackTraceElement").invoke(o);
+ if (fieldValue instanceof StackTraceElement) {
+ StackTraceElement elem = (StackTraceElement) fieldValue;
+ if (!elem.getMethodName().equals("step")) {
+ throw new RuntimeException("Expected step method");
+ }
+ return;
+ }
+ throw new RuntimeException("Expected StackTraceElement " + fieldValue + " / " + o);
+ }
+}
+
diff --git a/test/305-other-fault-handler/expected.txt b/test/305-other-fault-handler/expected.txt
new file mode 100644
index 0000000000..6221e8e853
--- /dev/null
+++ b/test/305-other-fault-handler/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Passed!
diff --git a/test/305-other-fault-handler/fault_handler.cc b/test/305-other-fault-handler/fault_handler.cc
new file mode 100644
index 0000000000..f04832613b
--- /dev/null
+++ b/test/305-other-fault-handler/fault_handler.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <atomic>
+#include <memory>
+
+#include <jni.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/mman.h>
+
+#include "fault_handler.h"
+#include "globals.h"
+#include "mem_map.h"
+
+namespace art {
+
+class TestFaultHandler FINAL : public FaultHandler {
+ public:
+ explicit TestFaultHandler(FaultManager* manager)
+ : FaultHandler(manager),
+ map_error_(""),
+ target_map_(MemMap::MapAnonymous("test-305-mmap",
+ /* addr */ nullptr,
+ /* byte_count */ kPageSize,
+ /* prot */ PROT_NONE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ /* error_msg */ &map_error_,
+ /* use_ashmem */ false)),
+ was_hit_(false) {
+ CHECK(target_map_ != nullptr) << "Unable to create segfault target address " << map_error_;
+ manager_->AddHandler(this, /*in_generated_code*/false);
+ }
+
+ virtual ~TestFaultHandler() {
+ manager_->RemoveHandler(this);
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ CHECK_EQ(sig, SIGSEGV);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(siginfo->si_addr),
+ GetTargetPointer()) << "Segfault on unexpected address!";
+ CHECK(!was_hit_) << "Recursive signal!";
+ was_hit_ = true;
+
+ LOG(INFO) << "SEGV Caught. mprotecting map.";
+ CHECK(target_map_->Protect(PROT_READ | PROT_WRITE)) << "Failed to mprotect R/W";
+ LOG(INFO) << "Setting value to be read.";
+ *GetTargetPointer() = kDataValue;
+ LOG(INFO) << "Changing prot to be read-only.";
+ CHECK(target_map_->Protect(PROT_READ)) << "Failed to mprotect R-only";
+ return true;
+ }
+
+ void CauseSegfault() {
+ CHECK_EQ(target_map_->GetProtect(), PROT_NONE);
+
+ // This will segfault. The handler should deal with it though and we will get a value out of it.
+ uint32_t data = *GetTargetPointer();
+
+ // Prevent re-ordering around the *GetTargetPointer by the compiler
+ std::atomic_signal_fence(std::memory_order_seq_cst);
+
+ CHECK(was_hit_);
+ CHECK_EQ(data, kDataValue) << "Unexpected read value from mmap";
+ CHECK_EQ(target_map_->GetProtect(), PROT_READ);
+ LOG(INFO) << "Success!";
+ }
+
+ private:
+ uint32_t* GetTargetPointer() {
+ return reinterpret_cast<uint32_t*>(target_map_->Begin() + 8);
+ }
+
+ static constexpr uint32_t kDataValue = 0xDEADBEEF;
+
+ std::string map_error_;
+ std::unique_ptr<MemMap> target_map_;
+ bool was_hit_;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_runFaultHandlerTest(JNIEnv*, jclass) {
+ std::unique_ptr<TestFaultHandler> handler(new TestFaultHandler(&fault_manager));
+ handler->CauseSegfault();
+}
+
+} // namespace art
diff --git a/test/305-other-fault-handler/info.txt b/test/305-other-fault-handler/info.txt
new file mode 100644
index 0000000000..656c8bd406
--- /dev/null
+++ b/test/305-other-fault-handler/info.txt
@@ -0,0 +1,3 @@
+Test that we correctly handle basic non-generated-code fault handlers
+
+Tests that we can use and remove these handlers and they can change mappings.
diff --git a/test/305-other-fault-handler/src/Main.java b/test/305-other-fault-handler/src/Main.java
new file mode 100644
index 0000000000..13a6fef730
--- /dev/null
+++ b/test/305-other-fault-handler/src/Main.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ runFaultHandlerTest();
+ System.out.println("Passed!");
+ }
+
+ public static native void runFaultHandlerTest();
+}
diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java
index 60e653c72f..3506649d3c 100644
--- a/test/449-checker-bce/src/Main.java
+++ b/test/449-checker-bce/src/Main.java
@@ -1057,6 +1057,64 @@ public class Main {
}
}
+ /// CHECK-START: void Main.lengthAlias1(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: NotEqual [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias1(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ public static void lengthAlias1(int[] a, int len) {
+ if (len == a.length) {
+ for (int i = 0; i < len; i++) {
+ a[i] = 1;
+ }
+ }
+ }
+
+ /// CHECK-START: void Main.lengthAlias2(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: Equal [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias2(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ public static void lengthAlias2(int[] a, int len) {
+ if (len != a.length) {
+ return;
+ }
+ for (int i = 0; i < len; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.lengthAlias3(int[], int) BCE (before)
+ /// CHECK-DAG: <<Arr:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Par:i\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Nul:l\d+>> NullCheck [<<Arr>>] loop:none
+ /// CHECK-DAG: <<Len:i\d+>> ArrayLength [<<Nul>>] loop:none
+ /// CHECK-DAG: NotEqual [<<Par>>,<<Len>>] loop:none
+ /// CHECK-DAG: <<Idx:i\d+>> Phi loop:<<Loop:B\d+>>
+ /// CHECK-DAG: BoundsCheck [<<Idx>>,<<Len>>] loop:<<Loop>>
+ //
+ /// CHECK-START: void Main.lengthAlias3(int[], int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ public static void lengthAlias3(int[] a, int len) {
+ if (a.length == len) {
+ for (int i = 0; i < len; i++) {
+ a[i] = 3;
+ }
+ }
+ }
+
static int[][] mA;
/// CHECK-START: void Main.dynamicBCEAndIntrinsic(int) BCE (before)
@@ -1747,10 +1805,40 @@ public class Main {
System.out.println("nonzero length failed!");
}
+ array = new int[8];
+ lengthAlias1(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 1) {
+ System.out.println("alias1 failed!");
+ }
+ }
+ lengthAlias2(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 2) {
+ System.out.println("alias2 failed!");
+ }
+ }
+ lengthAlias3(array, 8);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 3) {
+ System.out.println("alias3 failed!");
+ }
+ }
+
+ lengthAlias1(array, /*mismatched value*/ 32);
+ for (int i = 0; i < 8; i++) {
+ if (array[i] != 3) {
+ System.out.println("mismatch failed!");
+ }
+ }
+
// Zero length array does not break.
array = new int[0];
nonzeroLength(array);
knownLength(array);
+ lengthAlias1(array, 0);
+ lengthAlias2(array, 0);
+ lengthAlias3(array, 0);
mA = new int[4][4];
for (int i = 0; i < 4; i++) {
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index f6332b5503..98838c5089 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -398,7 +398,6 @@ public class Main {
/// CHECK-START: int Main.test15() load_store_elimination (after)
/// CHECK: <<Const2:i\d+>> IntConstant 2
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK-NOT: StaticFieldGet
/// CHECK: Return [<<Const2>>]
@@ -773,6 +772,127 @@ public class Main {
return obj;
}
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore2(TestClass2 obj) {
+ obj.i = 41;
+ obj.j = 42;
+ obj.i = 43;
+ obj.j = 44;
+ }
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore3(TestClass2 obj, boolean flag) {
+ obj.i = 41;
+ obj.j = 42; // redundant since it's overwritten in both branches below.
+ if (flag) {
+ obj.j = 43;
+ } else {
+ obj.j = 44;
+ }
+ }
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (before)
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldSet
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (after)
+ /// CHECK: StaticFieldSet
+ /// CHECK-NOT: StaticFieldSet
+
+ private static void testStoreStore4() {
+ TestClass.si = 61;
+ TestClass.si = 62;
+ }
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore5(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 71; // This store is needed since obj2.i may load from it.
+ int i = obj2.i;
+ obj1.i = 72;
+ return i;
+ }
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore6(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 81; // This store is not needed since obj2.j cannot load from it.
+ int j = obj2.j;
+ obj1.i = 82;
+ return j;
+ }
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+
+ private static int testNoSideEffects(int[] array) {
+ array[0] = 101;
+ array[1] = 102;
+ int bitCount = Integer.bitCount(0x3456);
+ array[1] = 103;
+ return array[0] + bitCount;
+ }
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ // Make sure throw keeps the store.
+ private static void testThrow(TestClass2 obj, Exception e) throws Exception {
+ obj.i = 55;
+ throw e;
+ }
+
/// CHECK-START: int Main.testStoreStoreWithDeoptimize(int[]) load_store_elimination (before)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
@@ -814,23 +934,6 @@ public class Main {
return arr[0] + arr[1] + arr[2] + arr[3];
}
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK: ArrayGet
-
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK-NOT: ArrayGet
-
- private static int testNoSideEffects(int[] array) {
- array[0] = 101;
- int bitCount = Integer.bitCount(0x3456);
- array[1] = array[0] + 1;
- return array[0] + bitCount;
- }
-
/// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
/// CHECK: NewInstance
@@ -1105,16 +1208,46 @@ public class Main {
assertIntEquals(testStoreStore().i, 41);
assertIntEquals(testStoreStore().j, 43);
- assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
assertIntEquals(testExitMerge(true), 2);
assertIntEquals(testExitMerge2(true), 2);
assertIntEquals(testExitMerge2(false), 2);
- int ret = testNoSideEffects(iarray);
+ TestClass2 testclass2 = new TestClass2();
+ testStoreStore2(testclass2);
+ assertIntEquals(testclass2.i, 43);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore3(testclass2, true);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 43);
+ testStoreStore3(testclass2, false);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore4();
+ assertIntEquals(TestClass.si, 62);
+
+ int ret = testStoreStore5(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 72);
+ assertIntEquals(ret, 71);
+
+ testclass2.j = 88;
+ ret = testStoreStore6(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 82);
+ assertIntEquals(ret, 88);
+
+ ret = testNoSideEffects(iarray);
assertIntEquals(iarray[0], 101);
- assertIntEquals(iarray[1], 102);
+ assertIntEquals(iarray[1], 103);
assertIntEquals(ret, 108);
+
+ try {
+ testThrow(testclass2, new Exception());
+ } catch (Exception e) {}
+ assertIntEquals(testclass2.i, 55);
+
+ assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
}
static boolean sFlag;
diff --git a/test/608-checker-unresolved-lse/src/Main.java b/test/608-checker-unresolved-lse/src/Main.java
index c6f8854b49..a39dd51bdf 100644
--- a/test/608-checker-unresolved-lse/src/Main.java
+++ b/test/608-checker-unresolved-lse/src/Main.java
@@ -88,7 +88,6 @@ public class Main extends MissingSuperClass {
/// CHECK-START: void Main.staticFieldTest() load_store_elimination (after)
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK: UnresolvedStaticFieldGet
public static void staticFieldTest() {
// Ensure Foo is initialized.
diff --git a/test/623-checker-loop-regressions/expected.txt b/test/623-checker-loop-regressions/expected.txt
index b0aad4deb5..805857dc65 100644
--- a/test/623-checker-loop-regressions/expected.txt
+++ b/test/623-checker-loop-regressions/expected.txt
@@ -1 +1,2 @@
+JNI_OnLoad called
passed
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 29f3817afb..4e2b241fd7 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -19,6 +19,8 @@
*/
public class Main {
+ private static native void ensureJitCompiled(Class<?> cls, String methodName);
+
/// CHECK-START: int Main.earlyExitFirst(int) loop_optimization (before)
/// CHECK-DAG: Phi loop:<<Loop:B\d+>> outer_loop:none
/// CHECK-DAG: Phi loop:<<Loop>> outer_loop:none
@@ -583,6 +585,8 @@ public class Main {
}
public static void main(String[] args) {
+ System.loadLibrary(args[0]);
+
expectEquals(10, earlyExitFirst(-1));
for (int i = 0; i <= 10; i++) {
expectEquals(i, earlyExitFirst(i));
@@ -746,6 +750,9 @@ public class Main {
expectEquals(153, doNotMoveSIMD());
+ // This test exposed SIMDization issues on x86 and x86_64
+ // so we make sure the test runs with JIT enabled.
+ ensureJitCompiled(Main.class, "reduction32Values");
{
int[] a1 = new int[100];
int[] a2 = new int[100];
diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt
index 52e756c231..5d4833aca8 100644
--- a/test/639-checker-code-sinking/expected.txt
+++ b/test/639-checker-code-sinking/expected.txt
@@ -1,3 +1,3 @@
0
class java.lang.Object
-43
+42
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
index 7496925adc..a1c30f7b4e 100644
--- a/test/639-checker-code-sinking/src/Main.java
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -337,7 +337,7 @@ public class Main {
public static void testStoreStore(boolean doThrow) {
Main m = new Main();
m.intField = 42;
- m.intField = 43;
+ m.intField2 = 43;
if (doThrow) {
throw new Error(m.$opt$noinline$toString());
}
@@ -349,6 +349,7 @@ public class Main {
volatile int volatileField;
int intField;
+ int intField2;
Object objectField;
static boolean doThrow;
static boolean doLoop;
diff --git a/test/672-checker-throw-method/expected.txt b/test/672-checker-throw-method/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/672-checker-throw-method/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/672-checker-throw-method/info.txt b/test/672-checker-throw-method/info.txt
new file mode 100644
index 0000000000..250810be15
--- /dev/null
+++ b/test/672-checker-throw-method/info.txt
@@ -0,0 +1 @@
+Test detecting throwing methods for code sinking.
diff --git a/test/672-checker-throw-method/src/Main.java b/test/672-checker-throw-method/src/Main.java
new file mode 100644
index 0000000000..ceb5eb784c
--- /dev/null
+++ b/test/672-checker-throw-method/src/Main.java
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for detecting throwing methods for code sinking.
+ */
+public class Main {
+
+ //
+ // Some "runtime library" methods.
+ //
+
+ static private void doThrow(String par) {
+ throw new Error("you are null: " + par);
+ }
+
+ static private void checkNotNullDirect(Object obj, String par) {
+ if (obj == null)
+ throw new Error("you are null: " + par);
+ }
+
+ static private void checkNotNullSplit(Object obj, String par) {
+ if (obj == null)
+ doThrow(par);
+ }
+
+ //
+ // Various ways of enforcing non-null parameter.
+ // In all cases, par should be subject to code sinking.
+ //
+
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ static public void doit1(int[] a) {
+ String par = "a";
+ if (a == null)
+ throw new Error("you are null: " + par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 1;
+ }
+ }
+
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ static public void doit2(int[] a) {
+ String par = "a";
+ if (a == null)
+ doThrow(par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ static public void doit3(int[] a) {
+ String par = "a";
+ checkNotNullDirect(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 3;
+ }
+ }
+
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeStaticOrDirect [<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ static public void doit4(int[] a) {
+ String par = "a";
+ checkNotNullSplit(a, par); // resembles Kotlin runtime lib
+ // (test is lined, doThrow is not)
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 4;
+ }
+ }
+
+ // Ensures Phi values are merged properly.
+ static public int doit5(int[] a) {
+ int t = 100;
+ String par = "a";
+ if (a == null) {
+ doThrow(par);
+ } else {
+ t = 1000;
+ }
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 5;
+ }
+ // Phi on t, even though doThrow never reaches.
+ return t;
+ }
+
+ //
+ // Test driver.
+ //
+
+ static public void main(String[] args) {
+ int[] a = new int[100];
+ for (int i = 0; i < 100; i++) {
+ a[i] = 0;
+ }
+
+ try {
+ doit1(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit1(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(1, a[i]);
+ }
+
+ try {
+ doit2(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit2(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(2, a[i]);
+ }
+
+ try {
+ doit3(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit3(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(3, a[i]);
+ }
+
+ try {
+ doit4(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ doit4(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(4, a[i]);
+ }
+
+ try {
+ doit5(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ expectEquals(1000, doit5(a));
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(5, a[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/673-checker-throw-vmethod/expected.txt b/test/673-checker-throw-vmethod/expected.txt
new file mode 100644
index 0000000000..b0aad4deb5
--- /dev/null
+++ b/test/673-checker-throw-vmethod/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/673-checker-throw-vmethod/info.txt b/test/673-checker-throw-vmethod/info.txt
new file mode 100644
index 0000000000..250810be15
--- /dev/null
+++ b/test/673-checker-throw-vmethod/info.txt
@@ -0,0 +1 @@
+Test detecting throwing methods for code sinking.
diff --git a/test/673-checker-throw-vmethod/src/Main.java b/test/673-checker-throw-vmethod/src/Main.java
new file mode 100644
index 0000000000..d0e1591bdb
--- /dev/null
+++ b/test/673-checker-throw-vmethod/src/Main.java
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for detecting throwing methods for code sinking.
+ */
+public class Main {
+
+ //
+ // Some "runtime library" methods.
+ //
+
+ public final void doThrow(String par) {
+ throw new Error("you are null: " + par);
+ }
+
+ public final void checkNotNullDirect(Object obj, String par) {
+ if (obj == null)
+ throw new Error("you are null: " + par);
+ }
+
+ public final void checkNotNullSplit(Object obj, String par) {
+ if (obj == null)
+ doThrow(par);
+ }
+
+ //
+ // Various ways of enforcing non-null parameter.
+ // In all cases, par should be subject to code sinking.
+ //
+
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit1(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ public void doit1(int[] a) {
+ String par = "a";
+ if (a == null)
+ throw new Error("you are null: " + par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 1;
+ }
+ }
+
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit2(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ public void doit2(int[] a) {
+ String par = "a";
+ if (a == null)
+ doThrow(par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 2;
+ }
+ }
+
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit3(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>]
+ /// CHECK: Throw
+ /// CHECK: end_block
+ public void doit3(int[] a) {
+ String par = "a";
+ checkNotNullDirect(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 3;
+ }
+ }
+
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (before)
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ //
+ /// CHECK-START: void Main.doit4(int[]) code_sinking (after)
+ /// CHECK: begin_block
+ /// CHECK: <<Tst:z\d+>> NotEqual
+ /// CHECK: If [<<Tst>>]
+ /// CHECK: end_block
+ /// CHECK: begin_block
+ /// CHECK: <<Str:l\d+>> LoadString
+ /// CHECK: InvokeVirtual [{{l\d+}},<<Str>>] method_name:Main.doThrow
+ /// CHECK: end_block
+ public void doit4(int[] a) {
+ String par = "a";
+ checkNotNullSplit(a, par);
+ for (int i = 0; i < a.length; i++) {
+ a[i] = 4;
+ }
+ }
+
+ //
+ // Test driver.
+ //
+
+ static public void main(String[] args) {
+ int[] a = new int[100];
+ for (int i = 0; i < 100; i++) {
+ a[i] = 0;
+ }
+
+ Main m = new Main();
+
+ try {
+ m.doit1(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit1(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(1, a[i]);
+ }
+
+ try {
+ m.doit2(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit2(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(2, a[i]);
+ }
+
+ try {
+ m.doit3(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit3(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(3, a[i]);
+ }
+
+ try {
+ m.doit4(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ m.doit4(a);
+ }
+ for (int i = 0; i < 100; i++) {
+ expectEquals(4, a[i]);
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index 55dc603c4f..c076d1521f 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -28,6 +28,7 @@
#include "base/macros.h"
#include "bytecode_utils.h"
#include "dex/code_item_accessors-inl.h"
+#include "dex/art_dex_file_loader.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_instruction.h"
@@ -66,15 +67,16 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
if (IsJVM()) {
return;
}
+ const ArtDexFileLoader dex_file_loader;
std::string error;
- std::unique_ptr<const DexFile> dex(DexFileLoader::Open(class_data,
- class_data_len,
- "fake_location.dex",
- /*location_checksum*/ 0,
- /*oat_dex_file*/ nullptr,
- /*verify*/ true,
- /*verify_checksum*/ true,
- &error));
+ std::unique_ptr<const DexFile> dex(dex_file_loader.Open(class_data,
+ class_data_len,
+ "fake_location.dex",
+ /*location_checksum*/ 0,
+ /*oat_dex_file*/ nullptr,
+ /*verify*/ true,
+ /*verify_checksum*/ true,
+ &error));
if (dex.get() == nullptr) {
std::cout << "Failed to verify dex file for " << name << " because " << error << std::endl;
return;
@@ -90,7 +92,7 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
continue;
}
for (const DexInstructionPcPair& pair :
- art::CodeItemInstructionAccessor(dex.get(), it.GetMethodCodeItem())) {
+ art::CodeItemInstructionAccessor(*dex, it.GetMethodCodeItem())) {
const Instruction& inst = pair.Inst();
int forbiden_flags = (Instruction::kVerifyError | Instruction::kVerifyRuntimeOnly);
if (inst.Opcode() == Instruction::RETURN_VOID_NO_BARRIER ||
diff --git a/test/Android.bp b/test/Android.bp
index f5ca2f0338..49a34a1246 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -369,6 +369,7 @@ cc_defaults {
"154-gc-loop/heap_interface.cc",
"167-visit-locks/visit_locks.cc",
"203-multi-checkpoint/multi_checkpoint.cc",
+ "305-other-fault-handler/fault_handler.cc",
"454-get-vreg/get_vreg_jni.cc",
"457-regs/regs_jni.cc",
"461-get-reference-vreg/get_reference_vreg_jni.cc",
diff --git a/test/HiddenApi/Main.java b/test/HiddenApi/Main.java
new file mode 100644
index 0000000000..187dd6e599
--- /dev/null
+++ b/test/HiddenApi/Main.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public int ifield;
+ private static Object sfield;
+
+ void imethod(long x) {}
+ public static void smethod(Object x) {}
+
+ public native void inmethod(char x);
+ protected native static void snmethod(Integer x);
+}
diff --git a/test/README.md b/test/README.md
index c68b40b135..350350e9e6 100644
--- a/test/README.md
+++ b/test/README.md
@@ -9,6 +9,8 @@ directory are compiled separately but to the same output directory;
this can be used to exercise "API mismatch" situations by replacing
class files created in the first pass. The "src-ex" directory is
built separately, and is intended for exercising class loaders.
+Resources can be stored in the "res" directory, which is distributed
+together with the executable files.
The gtests are in named directories and contain a .java source
file.
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 132099a45d..5e40b86aa0 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -466,10 +466,9 @@ if [ "$USE_JVMTI" = "y" ]; then
if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
plugin=libopenjdkjvmti.so
fi
+ # We used to add flags here that made the runtime debuggable but that is not
+ # needed anymore since the plugin can do it for us now.
FLAGS="${FLAGS} -Xplugin:${plugin}"
- FLAGS="${FLAGS} -Xcompiler-option --debuggable"
- # Always make the compilation be debuggable.
- COMPILE_FLAGS="${COMPILE_FLAGS} --debuggable"
fi
fi
@@ -807,6 +806,10 @@ if [ "$HOST" = "n" ]; then
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION
fi
+ # Copy resource folder
+ if [ -d res ]; then
+ adb push res $DEX_LOCATION
+ fi
else
adb shell rm -r $DEX_LOCATION >/dev/null 2>&1
adb shell mkdir -p $DEX_LOCATION >/dev/null 2>&1
@@ -815,7 +818,10 @@ if [ "$HOST" = "n" ]; then
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
adb push profile $DEX_LOCATION >/dev/null 2>&1
fi
-
+ # Copy resource folder
+ if [ -d res ]; then
+ adb push res $DEX_LOCATION >/dev/null 2>&1
+ fi
fi
LD_LIBRARY_PATH=/data/$TEST_DIRECTORY/art/$ISA
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a12510c9dc..41d976a174 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -257,10 +257,8 @@
},
{
"tests": "137-cfi",
- "description": ["CFI unwinding expects managed frames, and the test",
- "does not iterate enough to even compile. JIT also",
- "uses Generic JNI instead of the JNI compiler."],
- "variant": "interpreter | jit"
+ "description": ["CFI unwinding expects managed frames"],
+ "variant": "interpreter"
},
{
"tests": "906-iterate-heap",
@@ -412,7 +410,8 @@
{
"tests": [
"961-default-iface-resolution-gen",
- "964-default-iface-init-gen"
+ "964-default-iface-init-gen",
+ "968-default-partial-compile-gen"
],
"description": ["Tests that just take too long with jvmti-stress"],
"variant": "jvmti-stress | redefine-stress | trace-stress | step-stress"
@@ -442,6 +441,7 @@
"957-methodhandle-transforms",
"958-methodhandle-stackframe",
"959-invoke-polymorphic-accessors",
+ "979-const-method-handle",
"990-method-handle-and-mr"
],
"description": [
@@ -646,8 +646,15 @@
"bug": "b/64683522"
},
{
+ "tests": ["628-vdex",
+ "629-vdex-speed",
+ "634-vdex-duplicate"],
+ "variant": "cdex-fast",
+ "description": ["Tests that depend on input-vdex are not supported with compact dex"]
+ },
+ {
"tests": "661-oat-writer-layout",
- "variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace",
+ "variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
"description": ["Test is designed to only check --compiler-filter=speed"]
}
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 297ce08bee..2bb407db58 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -234,8 +234,8 @@ target_config = {
'env' : {
'ART_USE_READ_BARRIER' : 'false',
'ART_HEAP_POISONING' : 'true',
- # Get some extra automated testing coverage for compact dex.
- 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'fast'
+ # Disable compact dex to get coverage of standard dex file usage.
+ 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
'art-preopt' : {
@@ -280,8 +280,8 @@ target_config = {
'env': {
'ART_DEFAULT_GC_TYPE' : 'SS',
'ART_USE_READ_BARRIER' : 'false',
- # Get some extra automated testing coverage for compact dex.
- 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'fast'
+ # Disable compact dex to get coverage of standard dex file usage.
+ 'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
}
},
'art-gtest-gss-gc': {
diff --git a/tools/hiddenapi/Android.bp b/tools/hiddenapi/Android.bp
new file mode 100644
index 0000000000..a78bc43aa4
--- /dev/null
+++ b/tools/hiddenapi/Android.bp
@@ -0,0 +1,64 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+ name: "hiddenapi-defaults",
+ host_supported: true,
+ device_supported: false,
+ defaults: ["art_defaults"],
+ srcs: [
+ "hiddenapi.cc",
+ ],
+
+ target: {
+ android: {
+ compile_multilib: "prefer32",
+ },
+ },
+
+ shared_libs: [
+ "libbase",
+ ],
+}
+
+art_cc_binary {
+ name: "hiddenapi",
+ defaults: ["hiddenapi-defaults"],
+ shared_libs: [
+ "libart",
+ ],
+}
+
+art_cc_binary {
+ name: "hiddenapid",
+ defaults: [
+ "art_debug_defaults",
+ "hiddenapi-defaults",
+ ],
+ shared_libs: [
+ "libartd",
+ ],
+}
+
+art_cc_test {
+ name: "art_hiddenapi_tests",
+ host_supported: true,
+ device_supported: false,
+ defaults: [
+ "art_gtest_defaults",
+ ],
+ srcs: ["hiddenapi_test.cc"],
+}
diff --git a/tools/hiddenapi/README.md b/tools/hiddenapi/README.md
new file mode 100644
index 0000000000..cad12126dd
--- /dev/null
+++ b/tools/hiddenapi/README.md
@@ -0,0 +1,54 @@
+HiddenApi
+=========
+
+This tool iterates over all class members inside given DEX files and modifies
+their access flags if their signatures appear on one of two lists - greylist and
+blacklist - provided as text file inputs. These access flags denote to the
+runtime that the marked methods/fields should be treated as internal APIs with
+access restricted only to platform code. Methods/fields not mentioned on the two
+lists are assumed to be on a whitelist and left accessible by all code.
+
+API signatures
+==============
+
+The methods/fields to be marked are specified in two text files (greylist,
+blacklist) provided an input. Only one signature per line is allowed.
+
+Types are expected in their DEX format - class descriptors are to be provided in
+"slash" form, e.g. "Ljava/lang/Object;", primitive types in their shorty form,
+e.g. "I" for "int", and a "[" prefix denotes an array type. Lists of types do
+not use any separators, e.g. "ILxyz;F" for "int, xyz, float".
+
+Methods are encoded as:
+ `class_descriptor->method_name(parameter_types)return_type`
+
+Fields are encoded as:
+ `class_descriptor->field_name:field_type`
+
+Bit encoding
+============
+
+Two bits of information are encoded in the DEX access flags. These are encoded
+as unsigned LEB128 values in DEX and so as to not increase the size of the DEX,
+different modifiers were chosen for different kinds of methods/fields.
+
+First bit is encoded as the inversion of visibility access flags (bits 2:0).
+At most one of these flags can be set at any given time. Inverting these bits
+therefore produces a value where at least two bits are set and there is never
+any loss of information.
+
+Second bit is encoded differently for each given type of class member as there
+is no single unused bit such that setting it would not increase the size of the
+LEB128 encoding. The following bits are used:
+
+ * bit 5 for fields as it carries no other meaning
+ * bit 5 for non-native methods, as `synchronized` can only be set on native
+ methods (the Java `synchronized` modifier is bit 17)
+ * bit 9 for native methods, as it carries no meaning and bit 8 (`native`) will
+ make the LEB128 encoding at least two bytes long
+
+Two following bit encoding is used to denote the membership of a method/field:
+
+ * whitelist: `false`, `false`
+ * greylist: `true`, `false`
+ * blacklist: `true`, `true`
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
new file mode 100644
index 0000000000..a755fdb40b
--- /dev/null
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+#include <iostream>
+#include <unordered_set>
+
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+
+#include "base/unix_file/fd_file.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/dex_file-inl.h"
+#include "hidden_api_access_flags.h"
+#include "mem_map.h"
+#include "os.h"
+
+namespace art {
+
+static int original_argc;
+static char** original_argv;
+
+static std::string CommandLine() {
+ std::vector<std::string> command;
+ for (int i = 0; i < original_argc; ++i) {
+ command.push_back(original_argv[i]);
+ }
+ return android::base::Join(command, ' ');
+}
+
+static void UsageErrorV(const char* fmt, va_list ap) {
+ std::string error;
+ android::base::StringAppendV(&error, fmt, ap);
+ LOG(ERROR) << error;
+}
+
+static void UsageError(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+}
+
+NO_RETURN static void Usage(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UsageErrorV(fmt, ap);
+ va_end(ap);
+
+ UsageError("Command: %s", CommandLine().c_str());
+ UsageError("Usage: hiddenapi [options]...");
+ UsageError("");
+ UsageError(" --dex=<filename>: specify dex file whose members' access flags are to be set.");
+ UsageError(" At least one --dex parameter must be specified.");
+ UsageError("");
+ UsageError(" --light-greylist=<filename>:");
+ UsageError(" --dark-greylist=<filename>:");
+ UsageError(" --blacklist=<filename>: text files with signatures of methods/fields to be marked");
+ UsageError(" greylisted/blacklisted respectively. At least one list must be provided.");
+ UsageError("");
+ UsageError(" --print-hidden-api: dump a list of marked methods/fields to the standard output.");
+ UsageError(" There is no indication which API category they belong to.");
+ UsageError("");
+
+ exit(EXIT_FAILURE);
+}
+
+class DexClass {
+ public:
+ DexClass(const DexFile& dex_file, uint32_t idx)
+ : dex_file_(dex_file), class_def_(dex_file.GetClassDef(idx)) {}
+
+ const DexFile& GetDexFile() const { return dex_file_; }
+
+ const dex::TypeIndex GetClassIndex() const { return class_def_.class_idx_; }
+
+ const uint8_t* GetData() const { return dex_file_.GetClassData(class_def_); }
+
+ const char* GetDescriptor() const { return dex_file_.GetClassDescriptor(class_def_); }
+
+ private:
+ const DexFile& dex_file_;
+ const DexFile::ClassDef& class_def_;
+};
+
+class DexMember {
+ public:
+ DexMember(const DexClass& klass, const ClassDataItemIterator& it)
+ : klass_(klass), it_(it) {
+ DCHECK_EQ(it_.IsAtMethod() ? GetMethodId().class_idx_ : GetFieldId().class_idx_,
+ klass_.GetClassIndex());
+ }
+
+ // Sets hidden bits in access flags and writes them back into the DEX in memory.
+ // Note that this will not update the cached data of ClassDataItemIterator
+ // until it iterates over this item again and therefore will fail a CHECK if
+ // it is called multiple times on the same DexMember.
+ void SetHidden(HiddenApiAccessFlags::ApiList value) {
+ const uint32_t old_flags = it_.GetRawMemberAccessFlags();
+ const uint32_t new_flags = HiddenApiAccessFlags::EncodeForDex(old_flags, value);
+ CHECK_EQ(UnsignedLeb128Size(new_flags), UnsignedLeb128Size(old_flags));
+
+ // Locate the LEB128-encoded access flags in class data.
+ // `ptr` initially points to the next ClassData item. We iterate backwards
+ // until we hit the terminating byte of the previous Leb128 value.
+ const uint8_t* ptr = it_.DataPointer();
+ if (it_.IsAtMethod()) {
+ ptr = ReverseSearchUnsignedLeb128(ptr, it_.GetMethodCodeItemOffset());
+ }
+ ptr = ReverseSearchUnsignedLeb128(ptr, old_flags);
+
+ // Overwrite the access flags.
+ UpdateUnsignedLeb128(const_cast<uint8_t*>(ptr), new_flags);
+ }
+
+ // Returns true if this member's API entry is in `list`.
+ bool IsOnApiList(const std::unordered_set<std::string>& list) const {
+ return list.find(GetApiEntry()) != list.end();
+ }
+
+ // Constructs a string with a unique signature of this class member.
+ std::string GetApiEntry() const {
+ std::stringstream ss;
+ ss << klass_.GetDescriptor() << "->";
+ if (it_.IsAtMethod()) {
+ const DexFile::MethodId& mid = GetMethodId();
+ ss << klass_.GetDexFile().GetMethodName(mid)
+ << klass_.GetDexFile().GetMethodSignature(mid).ToString();
+ } else {
+ const DexFile::FieldId& fid = GetFieldId();
+ ss << klass_.GetDexFile().GetFieldName(fid) << ":"
+ << klass_.GetDexFile().GetFieldTypeDescriptor(fid);
+ }
+ return ss.str();
+ }
+
+ private:
+ inline const DexFile::MethodId& GetMethodId() const {
+ DCHECK(it_.IsAtMethod());
+ return klass_.GetDexFile().GetMethodId(it_.GetMemberIndex());
+ }
+
+ inline const DexFile::FieldId& GetFieldId() const {
+ DCHECK(!it_.IsAtMethod());
+ return klass_.GetDexFile().GetFieldId(it_.GetMemberIndex());
+ }
+
+ static inline bool IsLeb128Terminator(const uint8_t* ptr) {
+ return *ptr <= 0x7f;
+ }
+
+ // Returns the first byte of a Leb128 value assuming that:
+ // (1) `end_ptr` points to the first byte after the Leb128 value, and
+ // (2) there is another Leb128 value before this one.
+ // The function will fail after reading 5 bytes (the longest supported Leb128
+ // encoding) to protect against situations when (2) is not satisfied.
+ // When a Leb128 value is discovered, it is decoded and CHECKed against `value`.
+ static const uint8_t* ReverseSearchUnsignedLeb128(const uint8_t* end_ptr, uint32_t expected) {
+ const uint8_t* ptr = end_ptr;
+
+ // Move one byte back, check that this is the terminating byte.
+ ptr--;
+ CHECK(IsLeb128Terminator(ptr));
+
+ // Keep moving back while the previous byte is not a terminating byte.
+ // Fail after reading five bytes in case there isn't another Leb128 value
+ // before this one.
+ while (!IsLeb128Terminator(ptr - 1)) {
+ ptr--;
+ CHECK_LE((size_t) (end_ptr - ptr), 5u);
+ }
+
+ // Check that the decoded value matches the `expected` value.
+ const uint8_t* tmp_ptr = ptr;
+ CHECK_EQ(DecodeUnsignedLeb128(&tmp_ptr), expected);
+
+ return ptr;
+ }
+
+ const DexClass& klass_;
+ const ClassDataItemIterator& it_;
+};
+
+class HiddenApi FINAL {
+ public:
+ HiddenApi() : print_hidden_api_(false) {}
+
+ void ParseArgs(int argc, char** argv) {
+ original_argc = argc;
+ original_argv = argv;
+
+ android::base::InitLogging(argv);
+
+ // Skip over the command name.
+ argv++;
+ argc--;
+
+ if (argc == 0) {
+ Usage("No arguments specified");
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ const StringPiece option(argv[i]);
+ const bool log_options = false;
+ if (log_options) {
+ LOG(INFO) << "hiddenapi: option[" << i << "]=" << argv[i];
+ }
+ if (option == "--print-hidden-api") {
+ print_hidden_api_ = true;
+ } else if (option.starts_with("--dex=")) {
+ dex_paths_.push_back(option.substr(strlen("--dex=")).ToString());
+ } else if (option.starts_with("--light-greylist=")) {
+ light_greylist_path_ = option.substr(strlen("--light-greylist=")).ToString();
+ } else if (option.starts_with("--dark-greylist=")) {
+ dark_greylist_path_ = option.substr(strlen("--dark-greylist=")).ToString();
+ } else if (option.starts_with("--blacklist=")) {
+ blacklist_path_ = option.substr(strlen("--blacklist=")).ToString();
+ } else {
+ Usage("Unknown argument '%s'", option.data());
+ }
+ }
+ }
+
+ bool ProcessDexFiles() {
+ if (dex_paths_.empty()) {
+ Usage("No DEX files specified");
+ }
+
+ if (light_greylist_path_.empty() && dark_greylist_path_.empty() && blacklist_path_.empty()) {
+ Usage("No API file specified");
+ }
+
+ if (!light_greylist_path_.empty() && !OpenApiFile(light_greylist_path_, &light_greylist_)) {
+ return false;
+ }
+
+ if (!dark_greylist_path_.empty() && !OpenApiFile(dark_greylist_path_, &dark_greylist_)) {
+ return false;
+ }
+
+ if (!blacklist_path_.empty() && !OpenApiFile(blacklist_path_, &blacklist_)) {
+ return false;
+ }
+
+ MemMap::Init();
+ if (!OpenDexFiles()) {
+ return false;
+ }
+
+ DCHECK(!dex_files_.empty());
+ for (auto& dex_file : dex_files_) {
+ CategorizeAllClasses(*dex_file.get());
+ }
+
+ UpdateDexChecksums();
+ return true;
+ }
+
+ private:
+ bool OpenApiFile(const std::string& path, std::unordered_set<std::string>* list) {
+ DCHECK(list->empty());
+ DCHECK(!path.empty());
+
+ std::ifstream api_file(path, std::ifstream::in);
+ if (api_file.fail()) {
+ LOG(ERROR) << "Unable to open file '" << path << "' " << strerror(errno);
+ return false;
+ }
+
+ for (std::string line; std::getline(api_file, line);) {
+ list->insert(line);
+ }
+
+ api_file.close();
+ return true;
+ }
+
+ bool OpenDexFiles() {
+ ArtDexFileLoader dex_loader;
+ DCHECK(dex_files_.empty());
+
+ for (const std::string& filename : dex_paths_) {
+ std::string error_msg;
+
+ File fd(filename.c_str(), O_RDWR, /* check_usage */ false);
+ if (fd.Fd() == -1) {
+ LOG(ERROR) << "Unable to open file '" << filename << "': " << strerror(errno);
+ return false;
+ }
+
+ // Memory-map the dex file with MAP_SHARED flag so that changes in memory
+ // propagate to the underlying file. We run dex file verification as if
+ // the dex file was not in boot claass path to check basic assumptions,
+ // such as that at most one of public/private/protected flag is set.
+ // We do those checks here and skip them when loading the processed file
+ // into boot class path.
+ std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(fd.Release(),
+ /* location */ filename,
+ /* verify */ true,
+ /* verify_checksum */ true,
+ /* mmap_shared */ true,
+ &error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(ERROR) << "Open failed for '" << filename << "' " << error_msg;
+ return false;
+ }
+
+ if (!dex_file->IsStandardDexFile()) {
+ LOG(ERROR) << "Expected a standard dex file '" << filename << "'";
+ return false;
+ }
+
+ // Change the protection of the memory mapping to read-write.
+ if (!dex_file->EnableWrite()) {
+ LOG(ERROR) << "Failed to enable write permission for '" << filename << "'";
+ return false;
+ }
+
+ dex_files_.push_back(std::move(dex_file));
+ }
+ return true;
+ }
+
+ void CategorizeAllClasses(const DexFile& dex_file) {
+ for (uint32_t class_idx = 0; class_idx < dex_file.NumClassDefs(); ++class_idx) {
+ DexClass klass(dex_file, class_idx);
+ const uint8_t* klass_data = klass.GetData();
+ if (klass_data == nullptr) {
+ continue;
+ }
+
+ for (ClassDataItemIterator it(klass.GetDexFile(), klass_data); it.HasNext(); it.Next()) {
+ DexMember member(klass, it);
+
+ // Catagorize member and overwrite its access flags.
+ // Note that if a member appears on multiple API lists, it will be categorized
+ // as the strictest.
+ bool is_hidden = true;
+ if (member.IsOnApiList(blacklist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kBlacklist);
+ } else if (member.IsOnApiList(dark_greylist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kDarkGreylist);
+ } else if (member.IsOnApiList(light_greylist_)) {
+ member.SetHidden(HiddenApiAccessFlags::kLightGreylist);
+ } else {
+ member.SetHidden(HiddenApiAccessFlags::kWhitelist);
+ is_hidden = false;
+ }
+
+ if (print_hidden_api_ && is_hidden) {
+ std::cout << member.GetApiEntry() << std::endl;
+ }
+ }
+ }
+ }
+
+ void UpdateDexChecksums() {
+ for (auto& dex_file : dex_files_) {
+ // Obtain a writeable pointer to the dex header.
+ DexFile::Header* header = const_cast<DexFile::Header*>(&dex_file->GetHeader());
+ // Recalculate checksum and overwrite the value in the header.
+ header->checksum_ = dex_file->CalculateChecksum();
+ }
+ }
+
+ // Print signatures of APIs which have been grey-/blacklisted.
+ bool print_hidden_api_;
+
+ // Paths to DEX files which should be processed.
+ std::vector<std::string> dex_paths_;
+
+ // Paths to text files which contain the lists of API members.
+ std::string light_greylist_path_;
+ std::string dark_greylist_path_;
+ std::string blacklist_path_;
+
+ // Opened DEX files. Note that these are opened as `const` but eventually will be written into.
+ std::vector<std::unique_ptr<const DexFile>> dex_files_;
+
+ // Signatures of DEX members loaded from `light_greylist_path_`, `dark_greylist_path_`,
+ // `blacklist_path_`.
+ std::unordered_set<std::string> light_greylist_;
+ std::unordered_set<std::string> dark_greylist_;
+ std::unordered_set<std::string> blacklist_;
+};
+
+} // namespace art
+
+int main(int argc, char** argv) {
+ art::HiddenApi hiddenapi;
+
+ // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
+ hiddenapi.ParseArgs(argc, argv);
+ return hiddenapi.ProcessDexFiles() ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
new file mode 100644
index 0000000000..af1439520f
--- /dev/null
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fstream>
+
+#include "base/unix_file/fd_file.h"
+#include "common_runtime_test.h"
+#include "dex/art_dex_file_loader.h"
+#include "dex/dex_file-inl.h"
+#include "exec_utils.h"
+#include "zip_archive.h"
+
+namespace art {
+
+class HiddenApiTest : public CommonRuntimeTest {
+ protected:
+ std::string GetHiddenApiCmd() {
+ std::string file_path = GetTestAndroidRoot();
+ file_path += "/bin/hiddenapi";
+ if (kIsDebugBuild) {
+ file_path += "d";
+ }
+ if (!OS::FileExists(file_path.c_str())) {
+ LOG(FATAL) << "Could not find binary " << file_path;
+ UNREACHABLE();
+ }
+ return file_path;
+ }
+
+ std::unique_ptr<const DexFile> RunHiddenApi(const ScratchFile& light_greylist,
+ const ScratchFile& dark_greylist,
+ const ScratchFile& blacklist,
+ const std::vector<std::string>& extra_args,
+ ScratchFile* out_dex) {
+ std::string error;
+ std::unique_ptr<ZipArchive> jar(
+ ZipArchive::Open(GetTestDexFileName("HiddenApi").c_str(), &error));
+ if (jar == nullptr) {
+ LOG(FATAL) << "Could not open test file " << GetTestDexFileName("HiddenApi") << ": " << error;
+ UNREACHABLE();
+ }
+ std::unique_ptr<ZipEntry> jar_classes_dex(jar->Find("classes.dex", &error));
+ if (jar_classes_dex == nullptr) {
+ LOG(FATAL) << "Could not find classes.dex in test file " << GetTestDexFileName("HiddenApi")
+ << ": " << error;
+ UNREACHABLE();
+ } else if (!jar_classes_dex->ExtractToFile(*out_dex->GetFile(), &error)) {
+ LOG(FATAL) << "Could not extract classes.dex from test file "
+ << GetTestDexFileName("HiddenApi") << ": " << error;
+ UNREACHABLE();
+ }
+
+ std::vector<std::string> argv_str;
+ argv_str.push_back(GetHiddenApiCmd());
+ argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
+ argv_str.push_back("--dex=" + out_dex->GetFilename());
+ argv_str.push_back("--light-greylist=" + light_greylist.GetFilename());
+ argv_str.push_back("--dark-greylist=" + dark_greylist.GetFilename());
+ argv_str.push_back("--blacklist=" + blacklist.GetFilename());
+ int return_code = ExecAndReturnCode(argv_str, &error);
+ if (return_code != 0) {
+ LOG(FATAL) << "HiddenApi binary exited with unexpected return code " << return_code;
+ }
+ return OpenDex(*out_dex);
+ }
+
+ std::unique_ptr<const DexFile> OpenDex(const ScratchFile& file) {
+ ArtDexFileLoader dex_loader;
+ std::string error_msg;
+
+ File fd(file.GetFilename(), O_RDONLY, /* check_usage */ false);
+ if (fd.Fd() == -1) {
+ LOG(FATAL) << "Unable to open file '" << file.GetFilename() << "': " << strerror(errno);
+ UNREACHABLE();
+ }
+
+ std::unique_ptr<const DexFile> dex_file(dex_loader.OpenDex(
+ fd.Release(), /* location */ file.GetFilename(), /* verify */ false,
+ /* verify_checksum */ true, /* mmap_shared */ false, &error_msg));
+ if (dex_file.get() == nullptr) {
+ LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << error_msg;
+ UNREACHABLE();
+ } else if (!dex_file->IsStandardDexFile()) {
+ LOG(FATAL) << "Expected a standard dex file '" << file.GetFilename() << "'";
+ UNREACHABLE();
+ }
+
+ return dex_file;
+ }
+
+ std::ofstream OpenStream(const ScratchFile& file) {
+ std::ofstream ofs(file.GetFilename(), std::ofstream::out);
+ if (ofs.fail()) {
+ LOG(FATAL) << "Open failed for '" << file.GetFilename() << "' " << strerror(errno);
+ UNREACHABLE();
+ }
+ return ofs;
+ }
+
+ const DexFile::ClassDef& FindClass(const char* desc, const DexFile& dex_file) {
+ for (uint32_t i = 0; i < dex_file.NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file.GetClassDef(i);
+ if (strcmp(desc, dex_file.GetClassDescriptor(class_def)) == 0) {
+ return class_def;
+ }
+ }
+ LOG(FATAL) << "Could not find class " << desc;
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetFieldHiddenFlags(const char* name,
+ uint32_t expected_visibility,
+ const DexFile::ClassDef& class_def,
+ const DexFile& dex_file) {
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
+ UNREACHABLE();
+ }
+
+ for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
+ if (it.IsAtMethod()) {
+ break;
+ }
+ const DexFile::FieldId& fid = dex_file.GetFieldId(it.GetMemberIndex());
+ if (strcmp(name, dex_file.GetFieldName(fid)) == 0) {
+ uint32_t actual_visibility = it.GetFieldAccessFlags() & kAccVisibilityFlags;
+ if (actual_visibility != expected_visibility) {
+ LOG(FATAL) << "Field " << name << " in class " << dex_file.GetClassDescriptor(class_def)
+ << " does not have the expected visibility flags (" << expected_visibility
+ << " != " << actual_visibility << ")";
+ UNREACHABLE();
+ }
+ return it.DecodeHiddenAccessFlags();
+ }
+ }
+
+ LOG(FATAL) << "Could not find field " << name << " in class "
+ << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetMethodHiddenFlags(const char* name,
+ uint32_t expected_visibility,
+ bool expected_native,
+ const DexFile::ClassDef& class_def,
+ const DexFile& dex_file) {
+ const uint8_t* class_data = dex_file.GetClassData(class_def);
+ if (class_data == nullptr) {
+ LOG(FATAL) << "Class " << dex_file.GetClassDescriptor(class_def) << " has no data";
+ UNREACHABLE();
+ }
+
+ for (ClassDataItemIterator it(dex_file, class_data); it.HasNext(); it.Next()) {
+ if (!it.IsAtMethod()) {
+ continue;
+ }
+ const DexFile::MethodId& mid = dex_file.GetMethodId(it.GetMemberIndex());
+ if (strcmp(name, dex_file.GetMethodName(mid)) == 0) {
+ if (expected_native != it.MemberIsNative()) {
+ LOG(FATAL) << "Expected native=" << expected_native << " for method " << name
+ << " in class " << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+ uint32_t actual_visibility = it.GetMethodAccessFlags() & kAccVisibilityFlags;
+ if (actual_visibility != expected_visibility) {
+ LOG(FATAL) << "Method " << name << " in class " << dex_file.GetClassDescriptor(class_def)
+ << " does not have the expected visibility flags (" << expected_visibility
+ << " != " << actual_visibility << ")";
+ UNREACHABLE();
+ }
+ return it.DecodeHiddenAccessFlags();
+ }
+ }
+
+ LOG(FATAL) << "Could not find method " << name << " in class "
+ << dex_file.GetClassDescriptor(class_def);
+ UNREACHABLE();
+ }
+
+ HiddenApiAccessFlags::ApiList GetIFieldHiddenFlags(const DexFile& dex_file) {
+ return GetFieldHiddenFlags("ifield", kAccPublic, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSFieldHiddenFlags(const DexFile& dex_file) {
+ return GetFieldHiddenFlags("sfield", kAccPrivate, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetIMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "imethod", 0, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "smethod", kAccPublic, /* native */ false, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetINMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "inmethod", kAccPublic, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ }
+
+ HiddenApiAccessFlags::ApiList GetSNMethodHiddenFlags(const DexFile& dex_file) {
+ return GetMethodHiddenFlags(
+ "snmethod", kAccProtected, /* native */ true, FindClass("LMain;", dex_file), dex_file);
+ }
+};
+
+TEST_F(HiddenApiTest, InstanceFieldNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:I" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->ifield:I" << std::endl;
+ OpenStream(blacklist) << "LMain;->ifield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:LBadType1;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:LBadType2;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticFieldTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->sfield:Ljava/lang/Object;" << std::endl;
+ OpenStream(blacklist) << "LMain;->sfield:LBadType3;" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(J)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->imethod(J)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->imethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetIMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->smethod(Ljava/lang/Object;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->smethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(C)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceNativeMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->inmethod(C)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->inmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetINMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodNoMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kWhitelist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodLightGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kLightGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodDarkGreylistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodBlacklistMatch) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch1) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(LBadType1;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch2) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(LBadType2;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kBlacklist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, StaticNativeMethodTwoListsMatch3) {
+ ScratchFile dex, light_greylist, dark_greylist, blacklist;
+ OpenStream(light_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(dark_greylist) << "LMain;->snmethod(Ljava/lang/Integer;)V" << std::endl;
+ OpenStream(blacklist) << "LMain;->snmethod(LBadType3;)V" << std::endl;
+ auto dex_file = RunHiddenApi(light_greylist, dark_greylist, blacklist, {}, &dex);
+ ASSERT_EQ(HiddenApiAccessFlags::kDarkGreylist, GetSNMethodHiddenFlags(*dex_file));
+}
+
+} // namespace art
diff --git a/tools/jfuzz/run_dex_fuzz_test.py b/tools/jfuzz/run_dex_fuzz_test.py
index a25ef3fca5..203e03d678 100755
--- a/tools/jfuzz/run_dex_fuzz_test.py
+++ b/tools/jfuzz/run_dex_fuzz_test.py
@@ -106,14 +106,14 @@ class DexFuzzTester(object):
self.RunDexFuzz()
def CompileOnHost(self):
- """Compiles Test.java into classes.dex using either javac/dx or jack.
+ """Compiles Test.java into classes.dex using either javac/dx,d8 or jack.
Raises:
FatalError: error when compilation fails
"""
if self._dexer == 'dx' or self._dexer == 'd8':
dbg = '-g' if self._debug_info else '-g:none'
- if RunCommand(['javac', dbg, 'Test.java'],
+ if RunCommand(['javac', '--release=8', dbg, 'Test.java'],
out=None, err='jerr.txt', timeout=30) != RetCode.SUCCESS:
print('Unexpected error while running javac')
raise FatalError('Unexpected error while running javac')
diff --git a/tools/jfuzz/run_jfuzz_test.py b/tools/jfuzz/run_jfuzz_test.py
index 34180d993f..bddce32ad5 100755
--- a/tools/jfuzz/run_jfuzz_test.py
+++ b/tools/jfuzz/run_jfuzz_test.py
@@ -133,7 +133,7 @@ class TestRunnerWithHostCompilation(TestRunner):
def CompileOnHost(self):
if self._dexer == 'dx' or self._dexer == 'd8':
dbg = '-g' if self._debug_info else '-g:none'
- if RunCommand(['javac', dbg, 'Test.java'],
+ if RunCommand(['javac', '--release=8', dbg, 'Test.java'],
out=None, err=None, timeout=30) == RetCode.SUCCESS:
dx = 'dx' if self._dexer == 'dx' else 'd8-compat-dx'
retc = RunCommand([dx, '--dex', '--output=classes.dex'] + glob('*.class'),
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 6a846aee17..2cf614d795 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -26,8 +26,6 @@ if [ -z "$ANDROID_HOST_OUT" ] ; then
ANDROID_HOST_OUT=${OUT_DIR-$ANDROID_BUILD_TOP/out}/host/linux-x86
fi
-using_jack=$(get_build_var ANDROID_COMPILE_WITH_JACK)
-
java_lib_location="${ANDROID_HOST_OUT}/../common/obj/JAVA_LIBRARIES"
make_target_name="apache-harmony-jdwp-tests-hostdex"
@@ -184,7 +182,6 @@ if [[ $has_gdb = "yes" ]]; then
fi
if [[ $mode == "ri" ]]; then
- using_jack="false"
if [[ "x$with_jdwp_path" != "x" ]]; then
vm_args="${vm_args} --vm-arg -Djpda.settings.debuggeeAgentArgument=-agentpath:${agent_wrapper}"
vm_args="${vm_args} --vm-arg -Djpda.settings.debuggeeAgentName=$with_jdwp_path"
@@ -225,10 +222,7 @@ function jlib_name {
local str="classes"
local suffix="jar"
if [[ $mode == "ri" ]]; then
- suffix="jar"
str="javalib"
- elif [[ $using_jack == "true" ]]; then
- suffix="jack"
fi
echo "$path/$str.$suffix"
}
@@ -290,10 +284,8 @@ if [[ $verbose == "yes" ]]; then
art_debugee="$art_debugee -verbose:jdwp"
fi
-if [[ $using_jack == "true" ]]; then
- toolchain_args="--toolchain jack --language JN --jack-arg -g"
-elif [[ $mode != "ri" ]]; then
- toolchain_args="--toolchain dx --language CUR"
+if [[ $mode != "ri" ]]; then
+ toolchain_args="--toolchain d8 --language CUR"
else
toolchain_args="--toolchain javac --language CUR"
fi
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 673eea8cd9..739646a754 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -28,16 +28,10 @@ else
JAVA_LIBRARIES=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
fi
-using_jack=$(get_build_var ANDROID_COMPILE_WITH_JACK)
-
function classes_jar_path {
local var="$1"
local suffix="jar"
- if [[ $using_jack == "true" ]]; then
- suffix="jack"
- fi
-
echo "${JAVA_LIBRARIES}/${var}_intermediates/classes.${suffix}"
}
@@ -145,12 +139,8 @@ done
# the default timeout.
vogar_args="$vogar_args --timeout 480"
-# Switch between using jack or javac+desugar+dx
-if [[ $using_jack == "true" ]]; then
- vogar_args="$vogar_args --toolchain jack --language JO"
-else
- vogar_args="$vogar_args --toolchain dx --language CUR"
-fi
+# set the toolchain to use.
+vogar_args="$vogar_args --toolchain d8 --language CUR"
# JIT settings.
if $use_jit; then