summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.bp13
-rw-r--r--build/Android.oat.mk1
-rw-r--r--cmdline/detail/cmdline_parse_argument_detail.h2
-rw-r--r--compiler/common_compiler_test.cc7
-rw-r--r--compiler/compiled_method.cc12
-rw-r--r--compiler/compiled_method.h24
-rw-r--r--compiler/debug/method_debug_info.h2
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc3
-rw-r--r--compiler/driver/compiled_method_storage_test.cc6
-rw-r--r--compiler/driver/compiler_driver.cc23
-rw-r--r--compiler/driver/compiler_driver.h8
-rw-r--r--compiler/exception_test.cc16
-rw-r--r--compiler/linker/elf_builder.h52
-rw-r--r--compiler/linker/linker_patch.h28
-rw-r--r--compiler/linker/linker_patch_test.cc24
-rw-r--r--compiler/optimizing/code_generator_arm64.cc64
-rw-r--r--compiler/optimizing/code_generator_arm64.h1
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc45
-rw-r--r--compiler/optimizing/code_generator_mips.cc78
-rw-r--r--compiler/optimizing/code_generator_mips64.cc54
-rw-r--r--compiler/optimizing/code_generator_x86.cc42
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc42
-rw-r--r--compiler/optimizing/instruction_builder.cc274
-rw-r--r--compiler/optimizing/instruction_builder.h15
-rw-r--r--compiler/optimizing/instruction_simplifier.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm_vixl.cc4
-rw-r--r--compiler/optimizing/loop_analysis.cc10
-rw-r--r--compiler/optimizing/loop_analysis.h7
-rw-r--r--compiler/optimizing/loop_optimization.cc54
-rw-r--r--compiler/optimizing/loop_optimization.h6
-rw-r--r--compiler/optimizing/nodes.cc16
-rw-r--r--compiler/optimizing/nodes.h48
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc3
-rw-r--r--compiler/optimizing/optimizing_compiler.cc15
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.cc4
-rw-r--r--compiler/optimizing/pc_relative_fixups_x86.cc3
-rw-r--r--compiler/optimizing/reference_type_propagation.cc4
-rw-r--r--compiler/optimizing/sharpening.cc26
-rw-r--r--compiler/optimizing/stack_map_stream.cc7
-rw-r--r--compiler/utils/arm/assembler_arm_vixl.cc3
-rw-r--r--dex2oat/dex2oat.cc61
-rw-r--r--dex2oat/dex2oat_image_test.cc47
-rw-r--r--dex2oat/dex2oat_options.cc2
-rw-r--r--dex2oat/dex2oat_options.def1
-rw-r--r--dex2oat/dex2oat_test.cc133
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64.cc1
-rw-r--r--dex2oat/linker/arm64/relative_patcher_arm64_test.cc19
-rw-r--r--dex2oat/linker/elf_writer.h1
-rw-r--r--dex2oat/linker/elf_writer_quick.cc16
-rw-r--r--dex2oat/linker/image_writer.cc726
-rw-r--r--dex2oat/linker/image_writer.h103
-rw-r--r--dex2oat/linker/oat_writer.cc59
-rw-r--r--dex2oat/linker/oat_writer.h3
-rw-r--r--dex2oat/linker/oat_writer_test.cc5
-rw-r--r--dex2oat/linker/relative_patcher_test.h3
-rw-r--r--dexdump/dexdump.cc175
-rw-r--r--dexdump/dexdump_cfg.cc49
-rw-r--r--dexdump/dexdump_cfg.h6
-rw-r--r--dexlist/dexlist.cc33
-rw-r--r--imgdiag/imgdiag.cc33
-rw-r--r--libartbase/base/bit_table.h2
-rw-r--r--libartbase/base/casts.h23
-rw-r--r--libartbase/base/common_art_test.cc80
-rw-r--r--libartbase/base/common_art_test.h26
-rw-r--r--libartbase/base/file_utils.cc1
-rw-r--r--libartbase/base/safe_map.h2
-rw-r--r--libartbase/base/utils.cc1
-rw-r--r--libartbase/base/utils.h1
-rw-r--r--libdexfile/dex/art_dex_file_loader.cc22
-rw-r--r--libdexfile/dex/art_dex_file_loader.h2
-rw-r--r--libdexfile/dex/art_dex_file_loader_test.cc27
-rw-r--r--libdexfile/dex/class_accessor.h24
-rw-r--r--libdexfile/dex/descriptors_names.cc1
-rw-r--r--libdexfile/dex/dex_file_loader.cc38
-rw-r--r--libdexfile/dex/dex_file_loader.h24
-rw-r--r--libdexfile/dex/dex_file_loader_test.cc19
-rw-r--r--libdexfile/dex/dex_file_tracking_registrar.cc103
-rw-r--r--libdexfile/dex/dex_file_verifier_test.cc2
-rw-r--r--libdexfile/dex/utf.cc1
-rw-r--r--oatdump/Android.mk1
-rw-r--r--oatdump/oatdump.cc58
-rw-r--r--oatdump/oatdump_app_test.cc24
-rw-r--r--oatdump/oatdump_image_test.cc21
-rw-r--r--oatdump/oatdump_test.cc53
-rw-r--r--oatdump/oatdump_test.h240
-rw-r--r--openjdkjvm/OpenjdkJvm.cc5
-rw-r--r--openjdkjvmti/ti_extension.cc20
-rw-r--r--openjdkjvmti/ti_method.cc2
-rw-r--r--openjdkjvmti/ti_monitor.cc30
-rw-r--r--openjdkjvmti/ti_monitor.h2
-rw-r--r--openjdkjvmti/ti_redefine.cc151
-rw-r--r--openjdkjvmti/ti_redefine.h2
-rw-r--r--openjdkjvmti/ti_thread.cc154
-rw-r--r--patchoat/patchoat.cc146
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/arch/instruction_set_features_test.cc3
-rw-r--r--runtime/art_method-inl.h55
-rw-r--r--runtime/art_method.cc30
-rw-r--r--runtime/art_method.h67
-rw-r--r--runtime/backtrace_helper.cc98
-rw-r--r--runtime/backtrace_helper.h23
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h11
-rw-r--r--runtime/cha.cc13
-rw-r--r--runtime/class_linker.cc4
-rw-r--r--runtime/common_runtime_test.h18
-rw-r--r--runtime/debugger.cc2
-rw-r--r--runtime/dex/dex_file_annotations.cc2
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h6
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc2
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc4
-rw-r--r--runtime/fault_handler.cc4
-rw-r--r--runtime/gc/space/image_space.cc19
-rw-r--r--runtime/hprof/hprof.cc2
-rw-r--r--runtime/image.cc7
-rw-r--r--runtime/image.h6
-rw-r--r--runtime/intern_table.h4
-rw-r--r--runtime/jit/jit_code_cache.cc21
-rw-r--r--runtime/jit/jit_code_cache.h6
-rw-r--r--runtime/jni/check_jni.cc2
-rw-r--r--runtime/jni/java_vm_ext.cc2
-rw-r--r--runtime/jni/jni_internal.cc2
-rw-r--r--runtime/mirror/class-inl.h26
-rw-r--r--runtime/mirror/dex_cache-inl.h7
-rw-r--r--runtime/mirror/dex_cache.h6
-rw-r--r--runtime/mirror/executable.h7
-rw-r--r--runtime/mirror/method.cc33
-rw-r--r--runtime/mirror/method.h7
-rw-r--r--runtime/mirror/object.h10
-rw-r--r--runtime/mirror/var_handle.h5
-rw-r--r--runtime/native/java_lang_Class.cc8
-rw-r--r--runtime/oat.h4
-rw-r--r--runtime/oat_quick_method_header.cc15
-rw-r--r--runtime/oat_quick_method_header.h23
-rw-r--r--runtime/stack.cc6
-rw-r--r--runtime/stack_map.cc11
-rw-r--r--runtime/stack_map.h19
-rw-r--r--runtime/suspend_reason.h2
-rw-r--r--runtime/thread.cc60
-rw-r--r--runtime/thread.h48
-rw-r--r--runtime/thread_list.cc2
-rw-r--r--runtime/thread_pool.cc5
-rw-r--r--runtime/vdex_file.h4
-rw-r--r--runtime/verifier/method_verifier.cc49
-rw-r--r--test/163-app-image-methods/src/Main.java2
-rw-r--r--test/1951-monitor-enter-no-suspend/expected.txt1
-rw-r--r--test/1951-monitor-enter-no-suspend/info.txt1
-rw-r--r--test/1951-monitor-enter-no-suspend/raw_monitor.cc104
-rwxr-xr-xtest/1951-monitor-enter-no-suspend/run17
-rw-r--r--test/1951-monitor-enter-no-suspend/src/Main.java21
-rw-r--r--test/1951-monitor-enter-no-suspend/src/art/Main.java32
-rw-r--r--test/1951-monitor-enter-no-suspend/src/art/Suspension.java30
-rw-r--r--test/1951-monitor-enter-no-suspend/src/art/Test1951.java65
-rw-r--r--test/411-optimizing-arith/src/RemTest.java48
-rw-r--r--test/442-checker-constant-folding/src/Main.java54
-rw-r--r--test/478-checker-clinit-check-pruning/expected.txt8
-rw-r--r--test/478-checker-clinit-check-pruning/src/Main.java227
-rw-r--r--test/497-inlining-and-class-loader/clear_dex_cache.cc4
-rw-r--r--test/527-checker-array-access-split/src/Main.java8
-rw-r--r--test/530-checker-peel-unroll/src/Main.java40
-rw-r--r--test/551-checker-clinit/src/Main.java20
-rw-r--r--test/552-checker-sharpening/src/Main.java9
-rw-r--r--test/706-checker-scheduler/src/Main.java2
-rw-r--r--test/800-smali/expected.txt2
-rw-r--r--test/800-smali/jni.cc41
-rw-r--r--test/800-smali/smali/ConstClassAliasing.smali12
-rw-r--r--test/800-smali/src/Main.java22
-rw-r--r--test/911-get-stack-trace/expected.txt76
-rw-r--r--test/983-source-transform-verify/source_transform_art.cc19
-rw-r--r--test/Android.bp2
-rw-r--r--test/common/runtime_state.cc6
-rw-r--r--test/knownfailures.json35
-rw-r--r--tools/ahat/README.txt19
-rw-r--r--tools/ahat/etc/ahat.mf2
-rw-r--r--tools/ahat/etc/ahat_api.txt23
-rw-r--r--tools/ahat/src/main/com/android/ahat/Main.java31
-rw-r--r--tools/ahat/src/main/com/android/ahat/OverviewHandler.java8
-rw-r--r--tools/ahat/src/main/com/android/ahat/dominators/Dominators.java476
-rw-r--r--tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java354
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java26
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java35
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java8
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Parser.java28
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java11
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/Site.java19
-rw-r--r--tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java3
-rw-r--r--tools/ahat/src/test/com/android/ahat/DiffTest.java3
-rw-r--r--tools/ahat/src/test/com/android/ahat/DominatorsTest.java350
-rw-r--r--tools/ahat/src/test/com/android/ahat/InstanceTest.java32
-rw-r--r--tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java4
-rw-r--r--tools/ahat/src/test/com/android/ahat/RiTest.java4
-rw-r--r--tools/ahat/src/test/com/android/ahat/SiteTest.java50
-rw-r--r--tools/ahat/src/test/com/android/ahat/TestDump.java28
-rw-r--r--tools/class2greylist/Android.bp33
-rw-r--r--tools/class2greylist/src/class2greylist.mf1
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java118
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java99
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/JarReader.java65
-rw-r--r--tools/class2greylist/src/com/android/class2greylist/Status.java58
-rw-r--r--tools/class2greylist/test/Android.mk32
-rw-r--r--tools/class2greylist/test/AndroidTest.xml21
-rw-r--r--tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java202
-rw-r--r--tools/class2greylist/test/src/com/android/javac/Javac.java103
-rw-r--r--tools/dexanalyze/dexanalyze.cc20
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.cc32
-rw-r--r--tools/dexanalyze/dexanalyze_bytecode.h4
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.cc116
-rw-r--r--tools/dexanalyze/dexanalyze_experiments.h13
-rw-r--r--tools/dexfuzz/Android.mk1
-rwxr-xr-xtools/run-jdwp-tests.sh10
-rwxr-xr-xtools/run-libjdwp-tests.sh37
-rw-r--r--tools/veridex/veridex.cc6
212 files changed, 5258 insertions, 2744 deletions
diff --git a/build/Android.bp b/build/Android.bp
index b7d2cbc070..62f71ff275 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -20,9 +20,17 @@ bootstrap_go_package {
art_clang_tidy_errors = [
// Protect scoped things like MutexLock.
"bugprone-unused-raii",
+ "performance-for-range-copy",
+ "performance-unnecessary-copy-initialization",
+ "performance-unnecessary-value-param",
+ "misc-unused-using-decls",
]
// Should be: strings.Join(art_clang_tidy_errors, ",").
art_clang_tidy_errors_str = "bugprone-unused-raii"
+ + ",performance-for-range-copy"
+ + ",performance-unnecessary-copy-initialization"
+ + ",performance-unnecessary-value-param"
+ + ",misc-unused-using-decls"
art_clang_tidy_disabled = [
"-google-default-arguments",
@@ -161,6 +169,11 @@ art_global_defaults {
// void foo() { CHECK(kIsFooEnabled); /* do foo... */ }
// not being marked noreturn if kIsFooEnabled is false.
"-extra-arg=-Wno-missing-noreturn",
+ // Because tidy doesn't like our flow checks for compile-time configuration and thinks that
+ // the following code is dead (it is, but not for all configurations), disable unreachable
+ // code detection in Clang for tidy builds. It is still on for regular build steps, so we
+ // will still get the "real" errors.
+ "-extra-arg=-Wno-unreachable-code",
// Use art_clang_tidy_errors for build errors.
"-warnings-as-errors=" + art_clang_tidy_errors_str,
],
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 08b1e10268..c4ae593a5b 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -135,6 +135,7 @@ $(eval $(call create-core-oat-host-rule-combination,optimizing,true))
$(eval $(call create-core-oat-host-rule-combination,interpreter,true))
$(eval $(call create-core-oat-host-rule-combination,interp-ac,true))
+.PHONY: test-art-host-dex2oat-host
test-art-host-dex2oat-host: $(HOST_CORE_IMG_OUTS)
# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
diff --git a/cmdline/detail/cmdline_parse_argument_detail.h b/cmdline/detail/cmdline_parse_argument_detail.h
index 2fee27c067..65c11146aa 100644
--- a/cmdline/detail/cmdline_parse_argument_detail.h
+++ b/cmdline/detail/cmdline_parse_argument_detail.h
@@ -443,7 +443,7 @@ struct CmdlineParseArgument : CmdlineParseArgumentAny {
assert(!argument_info_.has_range_);
- return result;
+ return std::move(result);
}
CmdlineParseResult<TArg> result = type_parser.Parse(argument);
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index e8e1d408ef..4824763288 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -70,12 +70,7 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) {
const uint32_t method_info_offset = method_info.empty() ? 0u
: vmap_table_offset + method_info.size();
- OatQuickMethodHeader method_header(vmap_table_offset,
- method_info_offset,
- compiled_method->GetFrameSizeInBytes(),
- compiled_method->GetCoreSpillMask(),
- compiled_method->GetFpSpillMask(),
- code_size);
+ OatQuickMethodHeader method_header(vmap_table_offset, method_info_offset, code_size);
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index e41371855d..5b93316b87 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -102,17 +102,11 @@ const void* CompiledCode::CodePointer(const void* code_pointer, InstructionSet i
CompiledMethod::CompiledMethod(CompilerDriver* driver,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const linker::LinkerPatch>& patches)
: CompiledCode(driver, instruction_set, quick_code),
- frame_size_in_bytes_(frame_size_in_bytes),
- core_spill_mask_(core_spill_mask),
- fp_spill_mask_(fp_spill_mask),
method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)),
vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
@@ -123,9 +117,6 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
CompilerDriver* driver,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
@@ -136,9 +127,6 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
driver,
instruction_set,
quick_code,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
method_info,
vmap_table,
cfi_info, patches);
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index acdce260e5..aa6fd3e655 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -112,9 +112,6 @@ class CompiledMethod FINAL : public CompiledCode {
CompiledMethod(CompilerDriver* driver,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
@@ -126,9 +123,6 @@ class CompiledMethod FINAL : public CompiledCode {
CompilerDriver* driver,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
const ArrayRef<const uint8_t>& method_info,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& cfi_info,
@@ -148,18 +142,6 @@ class CompiledMethod FINAL : public CompiledCode {
SetPackedField<IsIntrinsicField>(/* value */ true);
}
- size_t GetFrameSizeInBytes() const {
- return frame_size_in_bytes_;
- }
-
- uint32_t GetCoreSpillMask() const {
- return core_spill_mask_;
- }
-
- uint32_t GetFpSpillMask() const {
- return fp_spill_mask_;
- }
-
ArrayRef<const uint8_t> GetMethodInfo() const;
ArrayRef<const uint8_t> GetVmapTable() const;
@@ -177,12 +159,6 @@ class CompiledMethod FINAL : public CompiledCode {
using IsIntrinsicField = BitField<bool, kIsIntrinsicLsb, kIsIntrinsicSize>;
- // For quick code, the size of the activation used by the code.
- const size_t frame_size_in_bytes_;
- // For quick code, a bit mask describing spilled GPR callee-save registers.
- const uint32_t core_spill_mask_;
- // For quick code, a bit mask describing spilled FPR callee-save registers.
- const uint32_t fp_spill_mask_;
// For quick code, method specific information that is not very dedupe friendly (method indices).
const LengthPrefixedArray<uint8_t>* const method_info_;
// For quick code, holds code infos which contain stack maps, inline information, and etc.
diff --git a/compiler/debug/method_debug_info.h b/compiler/debug/method_debug_info.h
index d0b03ec441..729c403f00 100644
--- a/compiler/debug/method_debug_info.h
+++ b/compiler/debug/method_debug_info.h
@@ -41,7 +41,7 @@ struct MethodDebugInfo {
uint64_t code_address;
uint32_t code_size;
uint32_t frame_size_in_bytes;
- const void* code_info;
+ const uint8_t* code_info;
ArrayRef<const uint8_t> cfi;
};
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index fcaa0cdd07..0800ab3d41 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -619,9 +619,6 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
driver_,
instruction_set,
ArrayRef<const uint8_t>(), // no code
- 0,
- 0,
- 0,
ArrayRef<const uint8_t>(), // method_info
ArrayRef<const uint8_t>(quicken_data), // vmap_table
ArrayRef<const uint8_t>(), // cfi data
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index aed04f9c75..14d1e191ca 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -64,11 +64,11 @@ TEST(CompiledMethodStorage, Deduplicate) {
ArrayRef<const uint8_t>(raw_cfi_info2),
};
const linker::LinkerPatch raw_patches1[] = {
- linker::LinkerPatch::CodePatch(0u, nullptr, 1u),
+ linker::LinkerPatch::IntrinsicReferencePatch(0u, 0u, 0u),
linker::LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 1u),
};
const linker::LinkerPatch raw_patches2[] = {
- linker::LinkerPatch::CodePatch(0u, nullptr, 1u),
+ linker::LinkerPatch::IntrinsicReferencePatch(0u, 0u, 0u),
linker::LinkerPatch::RelativeMethodPatch(4u, nullptr, 0u, 2u),
};
ArrayRef<const linker::LinkerPatch> patches[] = {
@@ -84,7 +84,7 @@ TEST(CompiledMethodStorage, Deduplicate) {
for (auto&& f : cfi_info) {
for (auto&& p : patches) {
compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
- &driver, InstructionSet::kNone, c, 0u, 0u, 0u, s, v, f, p));
+ &driver, InstructionSet::kNone, c, s, v, f, p));
}
}
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index b24b0362a3..6eca304223 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -255,7 +255,6 @@ CompilerDriver::CompilerDriver(
compiler_(Compiler::Create(this, compiler_kind)),
compiler_kind_(compiler_kind),
requires_constructor_barrier_lock_("constructor barrier lock"),
- non_relative_linker_patch_count_(0u),
image_classes_(std::move(image_classes)),
number_of_soft_verifier_failures_(0),
had_hard_verifier_failure_(false),
@@ -463,18 +462,7 @@ static void CompileMethodHarness(
}
if (compiled_method != nullptr) {
- // Count non-relative linker patches.
- size_t non_relative_linker_patch_count = 0u;
- for (const linker::LinkerPatch& patch : compiled_method->GetPatches()) {
- if (!patch.IsPcRelative()) {
- ++non_relative_linker_patch_count;
- }
- }
- bool compile_pic = driver->GetCompilerOptions().GetCompilePic(); // Off by default
- // When compiling with PIC, there should be zero non-relative linker patches
- CHECK(!compile_pic || non_relative_linker_patch_count == 0u);
-
- driver->AddCompiledMethod(method_ref, compiled_method, non_relative_linker_patch_count);
+ driver->AddCompiledMethod(method_ref, compiled_method);
}
if (self->IsExceptionPending()) {
@@ -2697,15 +2685,12 @@ void CompilerDriver::Compile(jobject class_loader,
}
void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
- CompiledMethod* const compiled_method,
- size_t non_relative_linker_patch_count) {
+ CompiledMethod* const compiled_method) {
DCHECK(GetCompiledMethod(method_ref) == nullptr) << method_ref.PrettyMethod();
MethodTable::InsertResult result = compiled_methods_.Insert(method_ref,
/*expected*/ nullptr,
compiled_method);
CHECK(result == MethodTable::kInsertResultSuccess);
- non_relative_linker_patch_count_.fetch_add(non_relative_linker_patch_count,
- std::memory_order_relaxed);
DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.PrettyMethod();
}
@@ -2815,10 +2800,6 @@ bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx,
return is_system_class;
}
-size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const {
- return non_relative_linker_patch_count_.load(std::memory_order_relaxed);
-}
-
void CompilerDriver::SetRequiresConstructorBarrier(Thread* self,
const DexFile* dex_file,
uint16_t class_def_index,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 3d3583cb9b..343f67c6d5 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -148,9 +148,7 @@ class CompilerDriver {
CompiledMethod* GetCompiledMethod(MethodReference ref) const;
size_t GetNonRelativeLinkerPatchCount() const;
// Add a compiled method.
- void AddCompiledMethod(const MethodReference& method_ref,
- CompiledMethod* const compiled_method,
- size_t non_relative_linker_patch_count);
+ void AddCompiledMethod(const MethodReference& method_ref, CompiledMethod* const compiled_method);
CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
void SetRequiresConstructorBarrier(Thread* self,
@@ -435,10 +433,6 @@ class CompilerDriver {
// All method references that this compiler has compiled.
MethodTable compiled_methods_;
- // Number of non-relative patches in all compiled methods. These patches need space
- // in the .oat_patches ELF section if requested in the compiler options.
- Atomic<size_t> non_relative_linker_patch_count_;
-
// Image classes to be updated by PreCompile().
// TODO: Remove this member which is a non-const pointer to the CompilerOptions' data.
// Pass this explicitly to the PreCompile() which should be called directly from
diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc
index b56a991e74..6d952035c9 100644
--- a/compiler/exception_test.cc
+++ b/compiler/exception_test.cc
@@ -84,18 +84,18 @@ class ExceptionTest : public CommonRuntimeTest {
const size_t stack_maps_size = stack_maps.PrepareForFillIn();
const size_t header_size = sizeof(OatQuickMethodHeader);
const size_t code_alignment = GetInstructionSetAlignment(kRuntimeISA);
- const size_t code_offset = RoundUp(stack_maps_size + header_size, code_alignment);
- fake_header_code_and_maps_.resize(code_offset + fake_code_.size());
+ fake_header_code_and_maps_.resize(stack_maps_size + header_size + code_size + code_alignment);
+ // NB: The start of the vector might not have been allocated the desired alignment.
+ uint8_t* code_ptr =
+ AlignUp(&fake_header_code_and_maps_[stack_maps_size + header_size], code_alignment);
+
MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size);
stack_maps.FillInCodeInfo(stack_maps_region);
- OatQuickMethodHeader method_header(code_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size);
+ OatQuickMethodHeader method_header(code_ptr - stack_maps_region.begin(), 0u, code_size);
static_assert(std::is_trivially_copyable<OatQuickMethodHeader>::value, "Cannot use memcpy");
- memcpy(&fake_header_code_and_maps_[code_offset - header_size], &method_header, header_size);
- std::copy(fake_code_.begin(),
- fake_code_.end(),
- fake_header_code_and_maps_.begin() + code_offset);
- const void* code_ptr = fake_header_code_and_maps_.data() + code_offset;
+ memcpy(code_ptr - header_size, &method_header, header_size);
+ memcpy(code_ptr, fake_code_.data(), fake_code_.size());
if (kRuntimeISA == InstructionSet::kArm) {
// Check that the Thumb2 adjustment will be a NOP, see EntryPointToCodePointer().
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 3da7a43762..974c590a65 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -308,9 +308,14 @@ class ElfBuilder FINAL {
/* link */ nullptr,
/* info */ 0,
align,
- /* entsize */ 0),
- current_offset_(0),
- last_offset_(0) {
+ /* entsize */ 0) {
+ Reset();
+ }
+
+ void Reset() {
+ current_offset_ = 0;
+ last_name_ = "";
+ last_offset_ = 0;
}
Elf_Word Write(const std::string& name) {
@@ -550,6 +555,7 @@ class ElfBuilder FINAL {
build_id_(this, ".note.gnu.build-id", SHT_NOTE, SHF_ALLOC, nullptr, 0, 4, 0),
current_section_(nullptr),
started_(false),
+ finished_(false),
write_program_headers_(false),
loaded_size_(0u),
virtual_address_(0) {
@@ -627,8 +633,10 @@ class ElfBuilder FINAL {
write_program_headers_ = write_program_headers;
}
- void End() {
+ off_t End() {
DCHECK(started_);
+ DCHECK(!finished_);
+ finished_ = true;
// Note: loaded_size_ == 0 for tests that don't write .rodata, .text, .bss,
// .dynstr, dynsym, .hash and .dynamic. These tests should not read loaded_size_.
@@ -662,6 +670,7 @@ class ElfBuilder FINAL {
Elf_Off section_headers_offset;
section_headers_offset = AlignFileOffset(sizeof(Elf_Off));
stream_.WriteFully(shdrs.data(), shdrs.size() * sizeof(shdrs[0]));
+ off_t file_size = stream_.Seek(0, kSeekCurrent);
// Flush everything else before writing the program headers. This should prevent
// the OS from reordering writes, so that we don't end up with valid headers
@@ -687,6 +696,39 @@ class ElfBuilder FINAL {
stream_.WriteFully(&elf_header, sizeof(elf_header));
stream_.WriteFully(phdrs.data(), phdrs.size() * sizeof(phdrs[0]));
stream_.Flush();
+
+ return file_size;
+ }
+
+ // This has the same effect as running the "strip" command line tool.
+ // It removes all debugging sections (but it keeps mini-debug-info).
+ // It returns the ELF file size (as the caller needs to truncate it).
+ off_t Strip() {
+ DCHECK(finished_);
+ finished_ = false;
+ Elf_Off end = 0;
+ std::vector<Section*> non_debug_sections;
+ for (Section* section : sections_) {
+ if (section == &shstrtab_ || // Section names will be recreated.
+ section == &symtab_ ||
+ section == &strtab_ ||
+ section->name_.find(".debug_") == 0) {
+ section->header_.sh_offset = 0;
+ section->header_.sh_size = 0;
+ section->section_index_ = 0;
+ } else {
+ if (section->header_.sh_type != SHT_NOBITS) {
+ DCHECK_LE(section->header_.sh_offset, end + kPageSize) << "Large gap between sections";
+ end = std::max<off_t>(end, section->header_.sh_offset + section->header_.sh_size);
+ }
+ non_debug_sections.push_back(section);
+ }
+ }
+ shstrtab_.Reset();
+ // Write the non-debug section headers, program headers, and ELF header again.
+ sections_ = std::move(non_debug_sections);
+ stream_.Seek(end, kSeekSet);
+ return End();
}
// The running program does not have access to section headers
@@ -861,6 +903,7 @@ class ElfBuilder FINAL {
void WriteBuildId(uint8_t build_id[kBuildIdLen]) {
stream_.Seek(build_id_.GetDigestStart(), kSeekSet);
stream_.WriteFully(build_id, kBuildIdLen);
+ stream_.Flush();
}
// Returns true if all writes and seeks on the output stream succeeded.
@@ -1060,6 +1103,7 @@ class ElfBuilder FINAL {
Section* current_section_; // The section which is currently being written.
bool started_;
+ bool finished_;
bool write_program_headers_;
// The size of the memory taken by the ELF file when loaded.
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index b7beb7bdb4..5e1615fbd2 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -47,7 +47,6 @@ class LinkerPatch {
kDataBimgRelRo,
kMethodRelative,
kMethodBssEntry,
- kCall, // TODO: Remove. (Deprecated, non-PIC.)
kCallRelative,
kTypeRelative,
kTypeBssEntry,
@@ -94,14 +93,6 @@ class LinkerPatch {
return patch;
}
- static LinkerPatch CodePatch(size_t literal_offset,
- const DexFile* target_dex_file,
- uint32_t target_method_idx) {
- LinkerPatch patch(literal_offset, Type::kCall, target_dex_file);
- patch.method_idx_ = target_method_idx;
- return patch;
- }
-
static LinkerPatch RelativeCodePatch(size_t literal_offset,
const DexFile* target_dex_file,
uint32_t target_method_idx) {
@@ -170,24 +161,6 @@ class LinkerPatch {
return patch_type_;
}
- bool IsPcRelative() const {
- switch (GetType()) {
- case Type::kIntrinsicReference:
- case Type::kDataBimgRelRo:
- case Type::kMethodRelative:
- case Type::kMethodBssEntry:
- case Type::kCallRelative:
- case Type::kTypeRelative:
- case Type::kTypeBssEntry:
- case Type::kStringRelative:
- case Type::kStringBssEntry:
- case Type::kBakerReadBarrierBranch:
- return true;
- default:
- return false;
- }
- }
-
uint32_t IntrinsicData() const {
DCHECK(patch_type_ == Type::kIntrinsicReference);
return intrinsic_data_;
@@ -201,7 +174,6 @@ class LinkerPatch {
MethodReference TargetMethod() const {
DCHECK(patch_type_ == Type::kMethodRelative ||
patch_type_ == Type::kMethodBssEntry ||
- patch_type_ == Type::kCall ||
patch_type_ == Type::kCallRelative);
return MethodReference(target_dex_file_, method_idx_);
}
diff --git a/compiler/linker/linker_patch_test.cc b/compiler/linker/linker_patch_test.cc
index e87dc8de6b..997418c4f7 100644
--- a/compiler/linker/linker_patch_test.cc
+++ b/compiler/linker/linker_patch_test.cc
@@ -25,10 +25,14 @@ TEST(LinkerPatch, LinkerPatchOperators) {
const DexFile* dex_file1 = reinterpret_cast<const DexFile*>(1);
const DexFile* dex_file2 = reinterpret_cast<const DexFile*>(2);
LinkerPatch patches[] = {
+ LinkerPatch::IntrinsicReferencePatch(16u, 3000u, 1000u),
+ LinkerPatch::IntrinsicReferencePatch(16u, 3001u, 1000u),
+ LinkerPatch::IntrinsicReferencePatch(16u, 3000u, 1001u),
+ LinkerPatch::IntrinsicReferencePatch(16u, 3001u, 1001u),
LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1000u),
LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1000u),
LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3000u, 1001u),
- LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Index 3.
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Index 7.
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1000u),
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3001u, 1000u),
LinkerPatch::RelativeMethodPatch(16u, dex_file2, 3000u, 1001u),
@@ -41,10 +45,6 @@ TEST(LinkerPatch, LinkerPatchOperators) {
LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1000u),
LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3000u, 1001u),
LinkerPatch::MethodBssEntryPatch(16u, dex_file2, 3001u, 1001u),
- LinkerPatch::CodePatch(16u, dex_file1, 1000u),
- LinkerPatch::CodePatch(16u, dex_file1, 1001u),
- LinkerPatch::CodePatch(16u, dex_file2, 1000u),
- LinkerPatch::CodePatch(16u, dex_file2, 1001u),
LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u),
LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u),
LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u),
@@ -86,6 +86,10 @@ TEST(LinkerPatch, LinkerPatchOperators) {
LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(16u, 1u, 1u),
+ LinkerPatch::IntrinsicReferencePatch(32u, 3000u, 1000u),
+ LinkerPatch::IntrinsicReferencePatch(32u, 3001u, 1000u),
+ LinkerPatch::IntrinsicReferencePatch(32u, 3000u, 1001u),
+ LinkerPatch::IntrinsicReferencePatch(32u, 3001u, 1001u),
LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1000u),
LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3001u, 1000u),
LinkerPatch::RelativeMethodPatch(32u, dex_file1, 3000u, 1001u),
@@ -102,10 +106,6 @@ TEST(LinkerPatch, LinkerPatchOperators) {
LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1000u),
LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3000u, 1001u),
LinkerPatch::MethodBssEntryPatch(32u, dex_file2, 3001u, 1001u),
- LinkerPatch::CodePatch(32u, dex_file1, 1000u),
- LinkerPatch::CodePatch(32u, dex_file1, 1001u),
- LinkerPatch::CodePatch(32u, dex_file2, 1000u),
- LinkerPatch::CodePatch(32u, dex_file2, 1001u),
LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u),
LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u),
LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u),
@@ -147,20 +147,20 @@ TEST(LinkerPatch, LinkerPatchOperators) {
LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 0u),
LinkerPatch::BakerReadBarrierBranchPatch(32u, 1u, 1u),
- LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Same as patch at index 3.
+ LinkerPatch::RelativeMethodPatch(16u, dex_file1, 3001u, 1001u), // Same as patch at index 7.
};
constexpr size_t last_index = arraysize(patches) - 1u;
for (size_t i = 0; i != arraysize(patches); ++i) {
for (size_t j = 0; j != arraysize(patches); ++j) {
- bool expected = (i != last_index ? i : 3u) == (j != last_index ? j : 3u);
+ bool expected = (i != last_index ? i : 7u) == (j != last_index ? j : 7u);
EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j;
}
}
for (size_t i = 0; i != arraysize(patches); ++i) {
for (size_t j = 0; j != arraysize(patches); ++j) {
- bool expected = (i != last_index ? i : 3u) < (j != last_index ? j : 3u);
+ bool expected = (i != last_index ? i : 7u) < (j != last_index ? j : 7u);
EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j;
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d1c83ce625..760b1dd09b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -64,7 +64,6 @@ using helpers::DRegisterFrom;
using helpers::FPRegisterFrom;
using helpers::HeapOperand;
using helpers::HeapOperandFrom;
-using helpers::InputCPURegisterAt;
using helpers::InputCPURegisterOrZeroRegAt;
using helpers::InputFPRegisterAt;
using helpers::InputOperandAt;
@@ -78,7 +77,6 @@ using helpers::OutputFPRegister;
using helpers::OutputRegister;
using helpers::QRegisterFrom;
using helpers::RegisterFrom;
-using helpers::SRegisterFrom;
using helpers::StackOperandFrom;
using helpers::VIXLRegCodeFromART;
using helpers::WRegisterFrom;
@@ -4555,10 +4553,6 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(
EmitAddPlaceholder(add_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- // Load method address from literal pool.
- __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
uint32_t boot_image_offset = GetBootImageOffset(invoke);
@@ -4581,6 +4575,10 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(
EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ // Load method address from literal pool.
+ __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -4812,8 +4810,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
// Add ADD with its PC-relative type patch.
vixl::aarch64::Label* add_label = NewBootImageIntrinsicPatch(boot_image_reference, adrp_label);
EmitAddPlaceholder(add_label, reg.X(), reg.X());
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
vixl::aarch64::Label* adrp_label = NewBootImageRelRoPatch(boot_image_reference);
EmitAdrpPlaceholder(adrp_label, reg.X());
@@ -4821,6 +4818,7 @@ void CodeGeneratorARM64::LoadBootImageAddress(vixl::aarch64::Register reg,
vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label);
EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -5016,10 +5014,10 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -5113,14 +5111,6 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
codegen_->EmitAddPlaceholder(add_label, out.X(), out.X());
break;
}
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
@@ -5153,6 +5143,13 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
@@ -5239,10 +5236,10 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -5294,13 +5291,6 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->EmitAddPlaceholder(add_label, out.X(), out.X());
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
// Add ADRP with its PC-relative .data.bimg.rel.ro patch.
@@ -5339,6 +5329,12 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ __LINE__);
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(address));
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
load->GetStringIndex(),
@@ -5678,14 +5674,6 @@ void InstructionCodeGeneratorARM64::GenerateIntRemForPower2Denom(HRem *instructi
}
}
-void InstructionCodeGeneratorARM64::GenerateIntRemForOneOrMinusOneDenom(HRem *instruction) {
- int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
- DCHECK(imm == 1 || imm == -1) << imm;
-
- Register out = OutputRegister(instruction);
- __ Mov(out, 0);
-}
-
void InstructionCodeGeneratorARM64::GenerateIntRemForConstDenom(HRem *instruction) {
int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1));
@@ -5695,10 +5683,12 @@ void InstructionCodeGeneratorARM64::GenerateIntRemForConstDenom(HRem *instructio
return;
}
- if (imm == 1 || imm == -1) {
- // TODO: These cases need to be optimized in InstructionSimplifier
- GenerateIntRemForOneOrMinusOneDenom(instruction);
- } else if (IsPowerOfTwo(AbsOrMin(imm))) {
+ if (IsPowerOfTwo(AbsOrMin(imm))) {
+ // Cases imm == -1 or imm == 1 are handled in constant folding by
+ // InstructionWithAbsorbingInputSimplifier.
+ // If the cases have survided till code generation they are handled in
+ // GenerateIntRemForPower2Denom becauses -1 and 1 are the power of 2 (2^0).
+ // The correct code is generated for them, just more instructions.
GenerateIntRemForPower2Denom(instruction);
} else {
DCHECK(imm < -2 || imm > 2) << imm;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index c44fa48066..93bab3180c 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -327,7 +327,6 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator {
void GenerateIntDivForPower2Denom(HDiv *instruction);
void GenerateIntRem(HRem* instruction);
void GenerateIntRemForConstDenom(HRem *instruction);
- void GenerateIntRemForOneOrMinusOneDenom(HRem *instruction);
void GenerateIntRemForPower2Denom(HRem *instruction);
void HandleGoto(HInstruction* got, HBasicBlock* successor);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index deab239362..6d6d1a2aa9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -48,7 +48,6 @@ using namespace vixl32; // NOLINT(build/namespaces)
using helpers::DRegisterFrom;
using helpers::DWARFReg;
-using helpers::HighDRegisterFrom;
using helpers::HighRegisterFrom;
using helpers::InputDRegisterAt;
using helpers::InputOperandAt;
@@ -7374,10 +7373,10 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -7465,14 +7464,6 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
codegen_->EmitMovwMovtPlaceholder(labels, out);
break;
}
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
@@ -7489,6 +7480,13 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
cls->GetTypeIndex(),
@@ -7649,10 +7647,10 @@ HLoadString::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -7699,13 +7697,6 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->EmitMovwMovtPlaceholder(labels, out);
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
@@ -7729,6 +7720,12 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
codegen_->MaybeGenerateMarkingRegisterCheck(/* code */ 16);
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ __ Ldr(out, codegen_->DeduplicateBootImageAddressLiteral(address));
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress: {
__ Ldr(out, codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
load->GetStringIndex(),
@@ -9336,9 +9333,6 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
EmitMovwMovtPlaceholder(labels, temp_reg);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
uint32_t boot_image_offset = GetBootImageOffset(invoke);
PcRelativePatchInfo* labels = NewBootImageRelRoPatch(boot_image_offset);
@@ -9355,6 +9349,9 @@ void CodeGeneratorARMVIXL::GenerateStaticOrDirectCall(
GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ __ Mov(RegisterFrom(temp), Operand::From(invoke->GetMethodAddress()));
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -9500,7 +9497,7 @@ vixl32::Label* CodeGeneratorARMVIXL::NewBakerReadBarrierPatch(uint32_t custom_da
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateBootImageAddressLiteral(uint32_t address) {
- return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
+ return DeduplicateUint32Literal(address, &uint32_literals_);
}
VIXLUInt32Literal* CodeGeneratorARMVIXL::DeduplicateJitStringLiteral(
@@ -9532,13 +9529,13 @@ void CodeGeneratorARMVIXL::LoadBootImageAddress(vixl32::Register reg,
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageIntrinsicPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
NewBootImageRelRoPatch(boot_image_reference);
EmitMovwMovtPlaceholder(labels, reg);
__ Ldr(reg, MemOperand(reg, /* offset */ 0));
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
uintptr_t address =
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c7295e4db1..4aed2c091c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1758,13 +1758,13 @@ void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_r
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base */ ZERO);
__ Addiu(reg, TMP, /* placeholder */ 0x5678, &info_low->label);
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base */ ZERO);
__ Lw(reg, reg, /* placeholder */ 0x5678, &info_low->label);
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -7879,10 +7879,10 @@ HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -7902,10 +7902,10 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -7982,9 +7982,6 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
__ Addiu(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
uint32_t boot_image_offset = GetBootImageOffset(invoke);
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
@@ -8004,6 +8001,9 @@ void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
__ Lw(temp_reg, TMP, /* placeholder */ 0x5678, &info_low->label);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -8114,14 +8114,14 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
+ case HLoadClass::LoadKind::kJitBootImageAddress:
if (isR6) {
break;
}
if (has_irreducible_loops) {
- if (load_kind != HLoadClass::LoadKind::kBootImageAddress) {
+ if (load_kind != HLoadClass::LoadKind::kJitBootImageAddress) {
codegen_->ClobberRA();
}
break;
@@ -8166,9 +8166,9 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
+ case HLoadClass::LoadKind::kJitBootImageAddress:
base_or_current_method_reg =
(isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
@@ -8210,20 +8210,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
__ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
break;
}
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- if (isR6 || !has_irreducible_loops) {
- __ LoadLiteral(out,
- base_or_current_method_reg,
- codegen_->DeduplicateBootImageAddressLiteral(address));
- } else {
- __ LoadConst32(out, address);
- }
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
@@ -8254,6 +8240,19 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ if (isR6 || !has_irreducible_loops) {
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ __ LoadConst32(out, address);
+ }
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
cls->GetTypeIndex(),
@@ -8343,15 +8342,15 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
+ case HLoadString::LoadKind::kJitBootImageAddress:
if (isR6) {
break;
}
if (has_irreducible_loops) {
- if (load_kind != HLoadString::LoadKind::kBootImageAddress) {
+ if (load_kind != HLoadString::LoadKind::kJitBootImageAddress) {
codegen_->ClobberRA();
}
break;
@@ -8395,10 +8394,10 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
switch (load_kind) {
// We need an extra register for PC-relative literals on R2.
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
+ case HLoadString::LoadKind::kJitBootImageAddress:
base_or_current_method_reg =
(isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
break;
@@ -8420,19 +8419,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
__ Addiu(out, out, /* placeholder */ 0x5678, &info_low->label);
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- if (isR6 || !has_irreducible_loops) {
- __ LoadLiteral(out,
- base_or_current_method_reg,
- codegen_->DeduplicateBootImageAddressLiteral(address));
- } else {
- __ LoadConst32(out, address);
- }
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
@@ -8468,6 +8454,18 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ if (isR6 || !has_irreducible_loops) {
+ __ LoadLiteral(out,
+ base_or_current_method_reg,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ } else {
+ __ LoadConst32(out, address);
+ }
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress: {
CodeGeneratorMIPS::JitPatchInfo* info =
codegen_->NewJitRootStringPatch(load->GetDexFile(),
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ffde45e95e..75169139cd 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1657,14 +1657,14 @@ void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_im
PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
__ Daddiu(reg, AT, /* placeholder */ 0x5678);
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
__ Lwu(reg, AT, /* placeholder */ 0x5678);
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
uintptr_t address =
@@ -5995,10 +5995,10 @@ HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -6022,10 +6022,10 @@ HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -6073,11 +6073,6 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
__ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ LoadLiteral(temp.AsRegister<GpuRegister>(),
- kLoadDoubleword,
- DeduplicateUint64Literal(invoke->GetMethodAddress()));
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
uint32_t boot_image_offset = GetBootImageOffset(invoke);
PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
@@ -6096,6 +6091,11 @@ void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
__ Ld(temp.AsRegister<GpuRegister>(), AT, /* placeholder */ 0x5678);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ __ LoadLiteral(temp.AsRegister<GpuRegister>(),
+ kLoadDoubleword,
+ DeduplicateUint64Literal(invoke->GetMethodAddress()));
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -6264,16 +6264,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
__ Daddiu(out, AT, /* placeholder */ 0x5678);
break;
}
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- __ LoadLiteral(out,
- kLoadUnsignedWord,
- codegen_->DeduplicateBootImageAddressLiteral(address));
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
@@ -6300,6 +6290,15 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress:
__ LoadLiteral(out,
kLoadUnsignedWord,
@@ -6415,15 +6414,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
__ Daddiu(out, AT, /* placeholder */ 0x5678);
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- __ LoadLiteral(out,
- kLoadUnsignedWord,
- codegen_->DeduplicateBootImageAddressLiteral(address));
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
@@ -6455,6 +6445,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ __ LoadLiteral(out,
+ kLoadUnsignedWord,
+ codegen_->DeduplicateBootImageAddressLiteral(address));
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress:
__ LoadLiteral(out,
kLoadUnsignedWord,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1c0d283ef6..30436eef9c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -4828,9 +4828,6 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(
RecordBootImageMethodPatch(invoke);
break;
}
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
temp.AsRegister<Register>());
@@ -4847,6 +4844,9 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(
RecordMethodBssEntryPatch(invoke);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ __ movl(temp.AsRegister<Register>(), Immediate(invoke->GetMethodAddress()));
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -4979,8 +4979,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()).AsRegister<Register>();
__ leal(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageIntrinsicPatch(method_address, boot_image_reference);
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
DCHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
HX86ComputeBaseMethodAddress* method_address =
invoke->InputAt(invoke->GetSpecialInputIndex())->AsX86ComputeBaseMethodAddress();
@@ -4990,6 +4989,7 @@ void CodeGeneratorX86::LoadBootImageAddress(Register reg,
__ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
RecordBootImageRelRoPatch(method_address, boot_image_reference);
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -6447,10 +6447,10 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -6549,14 +6549,6 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
codegen_->RecordBootImageTypePatch(cls);
break;
}
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- __ movl(out, Immediate(address));
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
@@ -6573,6 +6565,13 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFE
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ __ movl(out, Immediate(address));
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootClassPatch(
@@ -6686,10 +6685,10 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -6748,13 +6747,6 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
codegen_->RecordBootImageStringPatch(load);
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- __ movl(out, Immediate(address));
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
Register method_address = locations->InAt(0).AsRegister<Register>();
@@ -6776,6 +6768,12 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) NO_THREAD_S
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ __ movl(out, Immediate(address));
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset);
Label* fixup_label = codegen_->NewJitRootStringPatch(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 3073be6ca7..0d7837e70f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -996,9 +996,6 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
Address::Absolute(kDummy32BitOffset, /* no_rip */ false));
RecordBootImageMethodPatch(invoke);
break;
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
- break;
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
// Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
__ movl(temp.AsRegister<CpuRegister>(),
@@ -1012,6 +1009,9 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(
RecordMethodBssEntryPatch(invoke);
break;
}
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress());
+ break;
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
return; // No code pointer retrieval; the runtime performs the call directly.
@@ -1117,11 +1117,11 @@ void CodeGeneratorX86_64::LoadBootImageAddress(CpuRegister reg, uint32_t boot_im
if (GetCompilerOptions().IsBootImage()) {
__ leal(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
RecordBootImageIntrinsicPatch(boot_image_reference);
- } else if (GetCompilerOptions().GetCompilePic()) {
- DCHECK(Runtime::Current()->IsAotCompiler());
+ } else if (Runtime::Current()->IsAotCompiler()) {
__ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
RecordBootImageRelRoPatch(boot_image_reference);
} else {
+ DCHECK(Runtime::Current()->UseJitCompilation());
gc::Heap* heap = Runtime::Current()->GetHeap();
DCHECK(!heap->GetBootImageSpaces().empty());
const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -5793,10 +5793,10 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind(
case HLoadClass::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadClass::LoadKind::kJitBootImageAddress:
case HLoadClass::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kRuntimeCall:
break;
}
@@ -5889,14 +5889,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
__ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
codegen_->RecordBootImageTypePatch(cls);
break;
- case HLoadClass::LoadKind::kBootImageAddress: {
- DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(cls->GetClass().Get()));
- DCHECK_NE(address, 0u);
- __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended.
- break;
- }
case HLoadClass::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
__ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
@@ -5912,6 +5904,13 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
generate_null_check = true;
break;
}
+ case HLoadClass::LoadKind::kJitBootImageAddress: {
+ DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
+ uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
+ DCHECK_NE(address, 0u);
+ __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended.
+ break;
+ }
case HLoadClass::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
@@ -5989,10 +5988,10 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind(
case HLoadString::LoadKind::kBssEntry:
DCHECK(!Runtime::Current()->UseJitCompilation());
break;
+ case HLoadString::LoadKind::kJitBootImageAddress:
case HLoadString::LoadKind::kJitTableAddress:
DCHECK(Runtime::Current()->UseJitCompilation());
break;
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kRuntimeCall:
break;
}
@@ -6044,13 +6043,6 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
codegen_->RecordBootImageStringPatch(load);
return;
}
- case HLoadString::LoadKind::kBootImageAddress: {
- uint32_t address = dchecked_integral_cast<uint32_t>(
- reinterpret_cast<uintptr_t>(load->GetString().Get()));
- DCHECK_NE(address, 0u);
- __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended.
- return;
- }
case HLoadString::LoadKind::kBootImageRelRo: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
__ movl(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false));
@@ -6070,6 +6062,12 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA
__ Bind(slow_path->GetExitLabel());
return;
}
+ case HLoadString::LoadKind::kJitBootImageAddress: {
+ uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
+ DCHECK_NE(address, 0u);
+ __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended.
+ return;
+ }
case HLoadString::LoadKind::kJitTableAddress: {
Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset,
/* no_rip */ true);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 771e066d2f..e555d0d890 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -854,7 +854,7 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in
// make this an invoke-unresolved to handle cross-dex invokes or abstract super methods, both of
// which require runtime handling.
if (invoke_type == kSuper) {
- ObjPtr<mirror::Class> compiling_class = GetCompilingClass();
+ ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
if (compiling_class == nullptr) {
// We could not determine the method's class we need to wait until runtime.
DCHECK(Runtime::Current()->IsAotCompiler());
@@ -973,8 +973,8 @@ bool HInstructionBuilder::BuildInvoke(const Instruction& instruction,
= HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit;
ScopedObjectAccess soa(Thread::Current());
if (invoke_type == kStatic) {
- clinit_check = ProcessClinitCheckForInvoke(
- dex_pc, resolved_method, &clinit_check_requirement);
+ clinit_check =
+ ProcessClinitCheckForInvoke(soa, dex_pc, resolved_method, &clinit_check_requirement);
} else if (invoke_type == kSuper) {
if (IsSameDexFile(*resolved_method->GetDexFile(), *dex_compilation_unit_->GetDexFile())) {
// Update the method index to the one resolved. Note that this may be a no-op if
@@ -1063,7 +1063,7 @@ HNewInstance* HInstructionBuilder::BuildNewInstance(dex::TypeIndex type_index, u
HInstruction* cls = load_class;
Handle<mirror::Class> klass = load_class->GetClass();
- if (!IsInitialized(klass)) {
+ if (!IsInitialized(soa, klass)) {
cls = new (allocator_) HClinitCheck(load_class, dex_pc);
AppendInstruction(cls);
}
@@ -1147,7 +1147,152 @@ void HInstructionBuilder::BuildConstructorFenceForAllocation(HInstruction* alloc
MethodCompilationStat::kConstructorFenceGeneratedNew);
}
-bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
+static bool IsInBootImage(ObjPtr<mirror::Class> cls, const CompilerOptions& compiler_options)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (compiler_options.IsBootImage()) {
+ std::string temp;
+ const char* descriptor = cls->GetDescriptor(&temp);
+ return compiler_options.IsImageClass(descriptor);
+ } else {
+ return Runtime::Current()->GetHeap()->FindSpaceFromObject(cls, false)->IsImageSpace();
+ }
+}
+
+static bool IsSubClass(ObjPtr<mirror::Class> to_test, ObjPtr<mirror::Class> super_class)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class);
+}
+
+static bool HasTrivialClinit(ObjPtr<mirror::Class> klass, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // Check if the class has encoded fields that trigger bytecode execution.
+ // (Encoded fields are just a different representation of <clinit>.)
+ if (klass->NumStaticFields() != 0u) {
+ DCHECK(klass->GetClassDef() != nullptr);
+ EncodedStaticFieldValueIterator it(klass->GetDexFile(), *klass->GetClassDef());
+ for (; it.HasNext(); it.Next()) {
+ switch (it.GetValueType()) {
+ case EncodedArrayValueIterator::ValueType::kBoolean:
+ case EncodedArrayValueIterator::ValueType::kByte:
+ case EncodedArrayValueIterator::ValueType::kShort:
+ case EncodedArrayValueIterator::ValueType::kChar:
+ case EncodedArrayValueIterator::ValueType::kInt:
+ case EncodedArrayValueIterator::ValueType::kLong:
+ case EncodedArrayValueIterator::ValueType::kFloat:
+ case EncodedArrayValueIterator::ValueType::kDouble:
+ case EncodedArrayValueIterator::ValueType::kNull:
+ case EncodedArrayValueIterator::ValueType::kString:
+ // Primitive, null or j.l.String initialization is permitted.
+ break;
+ case EncodedArrayValueIterator::ValueType::kType:
+ // Type initialization can load classes and execute bytecode through a class loader
+ // which can execute arbitrary bytecode. We do not optimize for known class loaders;
+ // kType is rarely used (if ever).
+ return false;
+ default:
+ // Other types in the encoded static field list are rejected by the DexFileVerifier.
+ LOG(FATAL) << "Unexpected type " << it.GetValueType();
+ UNREACHABLE();
+ }
+ }
+ }
+ // Check if the class has <clinit> that executes arbitrary code.
+ // Initialization of static fields of the class itself with constants is allowed.
+ ArtMethod* clinit = klass->FindClassInitializer(pointer_size);
+ if (clinit != nullptr) {
+ const DexFile& dex_file = *clinit->GetDexFile();
+ CodeItemInstructionAccessor accessor(dex_file, clinit->GetCodeItem());
+ for (DexInstructionPcPair it : accessor) {
+ switch (it->Opcode()) {
+ case Instruction::CONST_4:
+ case Instruction::CONST_16:
+ case Instruction::CONST:
+ case Instruction::CONST_HIGH16:
+ case Instruction::CONST_WIDE_16:
+ case Instruction::CONST_WIDE_32:
+ case Instruction::CONST_WIDE:
+ case Instruction::CONST_WIDE_HIGH16:
+ case Instruction::CONST_STRING:
+ case Instruction::CONST_STRING_JUMBO:
+ // Primitive, null or j.l.String initialization is permitted.
+ break;
+ case Instruction::RETURN_VOID:
+ case Instruction::RETURN_VOID_NO_BARRIER:
+ break;
+ case Instruction::SPUT:
+ case Instruction::SPUT_WIDE:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ // Only initialization of a static field of the same class is permitted.
+ if (dex_file.GetFieldId(it->VRegB_21c()).class_idx_ != klass->GetDexTypeIndex()) {
+ return false;
+ }
+ break;
+ case Instruction::NEW_ARRAY:
+ // Only primitive arrays are permitted.
+ if (Primitive::GetType(dex_file.GetTypeDescriptor(dex_file.GetTypeId(
+ dex::TypeIndex(it->VRegC_22c())))[1]) == Primitive::kPrimNot) {
+ return false;
+ }
+ break;
+ case Instruction::APUT:
+ case Instruction::APUT_WIDE:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_SHORT:
+ case Instruction::FILL_ARRAY_DATA:
+ case Instruction::NOP:
+ // Allow initialization of primitive arrays (only constants can be stored).
+ // Note: We expect NOPs used for fill-array-data-payload but accept all NOPs
+ // (even unreferenced switch payloads if they make it through the verifier).
+ break;
+ default:
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static bool HasTrivialInitialization(ObjPtr<mirror::Class> cls,
+ const CompilerOptions& compiler_options)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+
+ // Check the superclass chain.
+ for (ObjPtr<mirror::Class> klass = cls; klass != nullptr; klass = klass->GetSuperClass()) {
+ if (klass->IsInitialized() && IsInBootImage(klass, compiler_options)) {
+ break; // `klass` and its superclasses are already initialized in the boot image.
+ }
+ if (!HasTrivialClinit(klass, pointer_size)) {
+ return false;
+ }
+ }
+
+ // Also check interfaces with default methods as they need to be initialized as well.
+ ObjPtr<mirror::IfTable> iftable = cls->GetIfTable();
+ DCHECK(iftable != nullptr);
+ for (int32_t i = 0, count = iftable->Count(); i != count; ++i) {
+ ObjPtr<mirror::Class> iface = iftable->GetInterface(i);
+ if (!iface->HasDefaultMethods()) {
+ continue; // Initializing `cls` does not initialize this interface.
+ }
+ if (iface->IsInitialized() && IsInBootImage(iface, compiler_options)) {
+ continue; // This interface is already initialized in the boot image.
+ }
+ if (!HasTrivialClinit(iface, pointer_size)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool HInstructionBuilder::IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const {
if (cls == nullptr) {
return false;
}
@@ -1162,48 +1307,77 @@ bool HInstructionBuilder::IsInitialized(Handle<mirror::Class> cls) const {
}
// Assume loaded only if klass is in the boot image. App classes cannot be assumed
// loaded because we don't even know what class loader will be used to load them.
- const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions();
- if (compiler_options.IsBootImage()) {
- std::string temp;
- const char* descriptor = cls->GetDescriptor(&temp);
- if (compiler_options.IsImageClass(descriptor)) {
- return true;
- }
- } else {
- if (runtime->GetHeap()->FindSpaceFromObject(cls.Get(), false)->IsImageSpace()) {
- return true;
- }
+ if (IsInBootImage(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ return true;
}
}
- // We can avoid the class initialization check for `cls` only in static methods in the
+ // We can avoid the class initialization check for `cls` in static methods in the
// very same class. Instance methods of the same class can run on an escaped instance
// of an erroneous class. Even a superclass may need to be checked as the subclass
// can be completely initialized while the superclass is initializing and the subclass
// remains initialized when the superclass initializer throws afterwards. b/62478025
// Note: The HClinitCheck+HInvokeStaticOrDirect merging can still apply.
- if ((dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u &&
- GetOutermostCompilingClass() == cls.Get()) {
+ ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
+ bool is_static = (dex_compilation_unit_->GetAccessFlags() & kAccStatic) != 0u;
+ if (is_static && outermost_cls == cls.Get()) {
return true;
}
+ // Remember if the compiled class is a subclass of `cls`. By the time this is used
+ // below the `outermost_cls` may be invalidated by calling ResolveCompilingClass().
+ bool is_subclass = IsSubClass(outermost_cls, cls.Get());
+ if (dex_compilation_unit_ != outer_compilation_unit_) {
+ // Check also the innermost method. Though excessive copies of ClinitCheck can be
+ // eliminated by GVN, that happens only after the decision whether to inline the
+ // graph or not and that may depend on the presence of the ClinitCheck.
+ // TODO: We should walk over the entire inlined method chain, but we don't pass that
+ // information to the builder.
+ ObjPtr<mirror::Class> innermost_cls = ResolveCompilingClass(soa);
+ if (is_static && innermost_cls == cls.Get()) {
+ return true;
+ }
+ is_subclass = is_subclass || IsSubClass(innermost_cls, cls.Get());
+ }
- // Note: We could walk over the inlined methods to avoid allocating excessive
- // `HClinitCheck`s in inlined static methods but they shall be eliminated by GVN.
+ // Otherwise, we may be able to avoid the check if `cls` is a superclass of a method being
+ // compiled here (anywhere in the inlining chain) as the `cls` must have started initializing
+ // before calling any `cls` or subclass methods. Static methods require a clinit check and
+ // instance methods require an instance which cannot be created before doing a clinit check.
+ // When a subclass of `cls` starts initializing, it starts initializing its superclass
+ // chain up to `cls` without running any bytecode, i.e. without any opportunity for circular
+ // initialization weirdness.
+ //
+ // If the initialization of `cls` is trivial (`cls` and its superclasses and superinterfaces
+ // with default methods initialize only their own static fields using constant values), it must
+ // complete, either successfully or by throwing and marking `cls` erroneous, without allocating
+ // any instances of `cls` or subclasses (or any other class) and without calling any methods.
+ // If it completes by throwing, no instances of `cls` shall be created and no subclass method
+ // bytecode shall execute (see above), therefore the instruction we're building shall be
+ // unreachable. By reaching the instruction, we know that `cls` was initialized successfully.
+ //
+ // TODO: We should walk over the entire inlined methods chain, but we don't pass that
+ // information to the builder. (We could also check if we're guaranteed a non-null instance
+ // of `cls` at this location but that's outside the scope of the instruction builder.)
+ if (is_subclass && HasTrivialInitialization(cls.Get(), compiler_driver_->GetCompilerOptions())) {
+ return true;
+ }
return false;
}
HClinitCheck* HInstructionBuilder::ProcessClinitCheckForInvoke(
- uint32_t dex_pc,
- ArtMethod* resolved_method,
- HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
+ ScopedObjectAccess& soa,
+ uint32_t dex_pc,
+ ArtMethod* resolved_method,
+ HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
Handle<mirror::Class> klass = handles_->NewHandle(resolved_method->GetDeclaringClass());
HClinitCheck* clinit_check = nullptr;
- if (IsInitialized(klass)) {
+ if (IsInitialized(soa, klass)) {
*clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
} else {
- HLoadClass* cls = BuildLoadClass(klass->GetDexTypeIndex(),
+ HLoadClass* cls = BuildLoadClass(soa,
+ klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
@@ -1438,21 +1612,23 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio
return true;
}
-static ObjPtr<mirror::Class> GetClassFrom(CompilerDriver* driver,
- const DexCompilationUnit& compilation_unit) {
- ScopedObjectAccess soa(Thread::Current());
+static ObjPtr<mirror::Class> ResolveClassFrom(ScopedObjectAccess& soa,
+ CompilerDriver* driver,
+ const DexCompilationUnit& compilation_unit)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader();
Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache();
return driver->ResolveCompilingMethodsClass(soa, dex_cache, class_loader, &compilation_unit);
}
-ObjPtr<mirror::Class> HInstructionBuilder::GetOutermostCompilingClass() const {
- return GetClassFrom(compiler_driver_, *outer_compilation_unit_);
+ObjPtr<mirror::Class> HInstructionBuilder::ResolveOutermostCompilingClass(
+ ScopedObjectAccess& soa) const {
+ return ResolveClassFrom(soa, compiler_driver_, *outer_compilation_unit_);
}
-ObjPtr<mirror::Class> HInstructionBuilder::GetCompilingClass() const {
- return GetClassFrom(compiler_driver_, *dex_compilation_unit_);
+ObjPtr<mirror::Class> HInstructionBuilder::ResolveCompilingClass(ScopedObjectAccess& soa) const {
+ return ResolveClassFrom(soa, compiler_driver_, *dex_compilation_unit_);
}
bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) const {
@@ -1462,7 +1638,7 @@ bool HInstructionBuilder::IsOutermostCompilingClass(dex::TypeIndex type_index) c
Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
Handle<mirror::Class> cls(hs.NewHandle(compiler_driver_->ResolveClass(
soa, dex_cache, class_loader, type_index, dex_compilation_unit_)));
- Handle<mirror::Class> outer_class(hs.NewHandle(GetOutermostCompilingClass()));
+ Handle<mirror::Class> outer_class(hs.NewHandle(ResolveOutermostCompilingClass(soa)));
// GetOutermostCompilingClass returns null when the class is unresolved
// (e.g. if it derives from an unresolved class). This is bogus knowing that
@@ -1496,7 +1672,7 @@ ArtField* HInstructionBuilder::ResolveField(uint16_t field_idx, bool is_static,
ClassLinker* class_linker = dex_compilation_unit_->GetClassLinker();
Handle<mirror::ClassLoader> class_loader = dex_compilation_unit_->GetClassLoader();
- Handle<mirror::Class> compiling_class(hs.NewHandle(GetCompilingClass()));
+ Handle<mirror::Class> compiling_class(hs.NewHandle(ResolveCompilingClass(soa)));
ArtField* resolved_field = class_linker->ResolveField(field_idx,
dex_compilation_unit_->GetDexCache(),
@@ -1557,7 +1733,8 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
DataType::Type field_type = GetFieldAccessType(*dex_file_, field_index);
Handle<mirror::Class> klass = handles_->NewHandle(resolved_field->GetDeclaringClass());
- HLoadClass* constant = BuildLoadClass(klass->GetDexTypeIndex(),
+ HLoadClass* constant = BuildLoadClass(soa,
+ klass->GetDexTypeIndex(),
klass->GetDexFile(),
klass,
dex_pc,
@@ -1573,7 +1750,7 @@ void HInstructionBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HInstruction* cls = constant;
- if (!IsInitialized(klass)) {
+ if (!IsInitialized(soa, klass)) {
cls = new (allocator_) HClinitCheck(constant, dex_pc);
AppendInstruction(cls);
}
@@ -1802,11 +1979,12 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index, uint3
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(klass);
- return BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+ bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
+ return BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
}
-HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
+HLoadClass* HInstructionBuilder::BuildLoadClass(ScopedObjectAccess& soa,
+ dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -1823,12 +2001,17 @@ HLoadClass* HInstructionBuilder::BuildLoadClass(dex::TypeIndex type_index,
}
// Note: `klass` must be from `handles_`.
+ bool is_referrers_class = false;
+ if (klass != nullptr) {
+ ObjPtr<mirror::Class> outermost_cls = ResolveOutermostCompilingClass(soa);
+ is_referrers_class = (outermost_cls == klass.Get());
+ }
HLoadClass* load_class = new (allocator_) HLoadClass(
graph_->GetCurrentMethod(),
type_index,
*actual_dex_file,
klass,
- klass != nullptr && (klass.Get() == GetOutermostCompilingClass()),
+ is_referrers_class,
dex_pc,
needs_access_check);
@@ -1856,13 +2039,14 @@ Handle<mirror::Class> HInstructionBuilder::ResolveClass(ScopedObjectAccess& soa,
return handles_->NewHandle(klass);
}
-bool HInstructionBuilder::LoadClassNeedsAccessCheck(Handle<mirror::Class> klass) {
+bool HInstructionBuilder::LoadClassNeedsAccessCheck(ScopedObjectAccess& soa,
+ Handle<mirror::Class> klass) {
if (klass == nullptr) {
return true;
} else if (klass->IsPublic()) {
return false;
} else {
- ObjPtr<mirror::Class> compiling_class = GetCompilingClass();
+ ObjPtr<mirror::Class> compiling_class = ResolveCompilingClass(soa);
return compiling_class == nullptr || !compiling_class->CanAccess(klass.Get());
}
}
@@ -1891,7 +2075,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
ScopedObjectAccess soa(Thread::Current());
const DexFile& dex_file = *dex_compilation_unit_->GetDexFile();
Handle<mirror::Class> klass = ResolveClass(soa, type_index);
- bool needs_access_check = LoadClassNeedsAccessCheck(klass);
+ bool needs_access_check = LoadClassNeedsAccessCheck(soa, klass);
TypeCheckKind check_kind = HSharpening::ComputeTypeCheckKind(
klass.Get(), code_generator_, needs_access_check);
@@ -1909,7 +2093,7 @@ void HInstructionBuilder::BuildTypeCheck(const Instruction& instruction,
bitstring_path_to_root = graph_->GetIntConstant(static_cast<int32_t>(path_to_root), dex_pc);
bitstring_mask = graph_->GetIntConstant(static_cast<int32_t>(mask), dex_pc);
} else {
- class_or_null = BuildLoadClass(type_index, dex_file, klass, dex_pc, needs_access_check);
+ class_or_null = BuildLoadClass(soa, type_index, dex_file, klass, dex_pc, needs_access_check);
}
DCHECK(class_or_null != nullptr);
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 578172a18e..af1b86ca6f 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -219,7 +219,8 @@ class HInstructionBuilder : public ValueObject {
// Builds a `HLoadClass` loading the given `type_index`.
HLoadClass* BuildLoadClass(dex::TypeIndex type_index, uint32_t dex_pc);
- HLoadClass* BuildLoadClass(dex::TypeIndex type_index,
+ HLoadClass* BuildLoadClass(ScopedObjectAccess& soa,
+ dex::TypeIndex type_index,
const DexFile& dex_file,
Handle<mirror::Class> klass,
uint32_t dex_pc,
@@ -229,7 +230,7 @@ class HInstructionBuilder : public ValueObject {
Handle<mirror::Class> ResolveClass(ScopedObjectAccess& soa, dex::TypeIndex type_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool LoadClassNeedsAccessCheck(Handle<mirror::Class> klass)
+ bool LoadClassNeedsAccessCheck(ScopedObjectAccess& soa, Handle<mirror::Class> klass)
REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a `HLoadMethodHandle` loading the given `method_handle_index`.
@@ -239,10 +240,12 @@ class HInstructionBuilder : public ValueObject {
void BuildLoadMethodType(dex::ProtoIndex proto_index, uint32_t dex_pc);
// Returns the outer-most compiling method's class.
- ObjPtr<mirror::Class> GetOutermostCompilingClass() const;
+ ObjPtr<mirror::Class> ResolveOutermostCompilingClass(ScopedObjectAccess& soa) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the class whose method is being compiled.
- ObjPtr<mirror::Class> GetCompilingClass() const;
+ ObjPtr<mirror::Class> ResolveCompilingClass(ScopedObjectAccess& soa) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether `type_index` points to the outer-most compiling method's class.
bool IsOutermostCompilingClass(dex::TypeIndex type_index) const;
@@ -269,6 +272,7 @@ class HInstructionBuilder : public ValueObject {
void HandleStringInitResult(HInvokeStaticOrDirect* invoke);
HClinitCheck* ProcessClinitCheckForInvoke(
+ ScopedObjectAccess& soa,
uint32_t dex_pc,
ArtMethod* method,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
@@ -282,7 +286,8 @@ class HInstructionBuilder : public ValueObject {
void BuildConstructorFenceForAllocation(HInstruction* allocation);
// Return whether the compiler can assume `cls` is initialized.
- bool IsInitialized(Handle<mirror::Class> cls) const REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInitialized(ScopedObjectAccess& soa, Handle<mirror::Class> cls) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
// not be resolved.
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 70af49f8f0..f493b66cfd 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -2268,7 +2268,7 @@ void InstructionSimplifierVisitor::SimplifySystemArrayCopy(HInvoke* instruction)
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
PointerSize image_size = class_linker->GetImagePointerSize();
HInvokeStaticOrDirect* invoke = instruction->AsInvokeStaticOrDirect();
- mirror::Class* system = invoke->GetResolvedMethod()->GetDeclaringClass();
+ ObjPtr<mirror::Class> system = invoke->GetResolvedMethod()->GetDeclaringClass();
ArtMethod* method = nullptr;
switch (source_component_type) {
case DataType::Type::kBool:
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f11e5a1989..2963308da8 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -41,19 +41,15 @@ using helpers::HighRegisterFrom;
using helpers::InputDRegisterAt;
using helpers::InputRegisterAt;
using helpers::InputSRegisterAt;
-using helpers::InputVRegisterAt;
using helpers::Int32ConstantFrom;
using helpers::LocationFrom;
using helpers::LowRegisterFrom;
using helpers::LowSRegisterFrom;
using helpers::HighSRegisterFrom;
using helpers::OutputDRegister;
-using helpers::OutputSRegister;
using helpers::OutputRegister;
-using helpers::OutputVRegister;
using helpers::RegisterFrom;
using helpers::SRegisterFrom;
-using helpers::DRegisterFromS;
using namespace vixl::aarch32; // NOLINT(build/namespaces)
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index efb23e7d3e..d355cedb35 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -84,6 +84,8 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
static constexpr uint32_t kScalarHeuristicMaxBodySizeInstr = 17;
// Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
static constexpr uint32_t kScalarHeuristicMaxBodySizeBlocks = 6;
+ // Maximum number of instructions to be created as a result of full unrolling.
+ static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const OVERRIDE {
return analysis_info->HasLongTypeInstructions() ||
@@ -108,6 +110,14 @@ class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
bool IsLoopPeelingEnabled() const OVERRIDE { return true; }
+ bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const OVERRIDE {
+ int64_t trip_count = analysis_info->GetTripCount();
+ // We assume that trip count is known.
+ DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
+ size_t instr_num = analysis_info->GetNumberOfInstructions();
+ return (trip_count * instr_num < kScalarHeuristicFullyUnrolledMaxInstrThreshold);
+ }
+
protected:
bool IsLoopTooBig(LoopAnalysisInfo* loop_analysis_info,
size_t instr_threshold,
diff --git a/compiler/optimizing/loop_analysis.h b/compiler/optimizing/loop_analysis.h
index bcb7b70494..57509ee410 100644
--- a/compiler/optimizing/loop_analysis.h
+++ b/compiler/optimizing/loop_analysis.h
@@ -160,6 +160,13 @@ class ArchNoOptsLoopHelper : public ArenaObject<kArenaAllocOptimization> {
// Returns 'false' by default, should be overridden by particular target loop helper.
virtual bool IsLoopPeelingEnabled() const { return false; }
+ // Returns whether it is beneficial to fully unroll the loop.
+ //
+ // Returns 'false' by default, should be overridden by particular target loop helper.
+ virtual bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info ATTRIBUTE_UNUSED) const {
+ return false;
+ }
+
// Returns optimal SIMD unrolling factor for the loop.
//
// Returns kNoUnrollingFactor by default, should be overridden by particular target loop helper.
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 440cd3351e..7d66155b39 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -422,6 +422,15 @@ static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) {
}
}
+// Peel the first 'count' iterations of the loop.
+static void PeelByCount(HLoopInformation* loop_info, int count) {
+ for (int i = 0; i < count; i++) {
+ // Perform peeling.
+ PeelUnrollSimpleHelper helper(loop_info);
+ helper.DoPeeling();
+ }
+}
+
//
// Public methods.
//
@@ -811,6 +820,45 @@ bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisI
return true;
}
+bool HLoopOptimization::TryFullUnrolling(LoopAnalysisInfo* analysis_info, bool generate_code) {
+ // Fully unroll loops with a known and small trip count.
+ int64_t trip_count = analysis_info->GetTripCount();
+ if (!arch_loop_helper_->IsLoopPeelingEnabled() ||
+ trip_count == LoopAnalysisInfo::kUnknownTripCount ||
+ !arch_loop_helper_->IsFullUnrollingBeneficial(analysis_info)) {
+ return false;
+ }
+
+ if (generate_code) {
+ // Peeling of the N first iterations (where N equals to the trip count) will effectively
+ // eliminate the loop: after peeling we will have N sequential iterations copied into the loop
+ // preheader and the original loop. The trip count of this loop will be 0 as the sequential
+ // iterations are executed first and there are exactly N of them. Thus we can statically
+ // evaluate the loop exit condition to 'false' and fully eliminate it.
+ //
+ // Here is an example of full unrolling of a loop with a trip count 2:
+ //
+ // loop_cond_1
+ // loop_body_1 <- First iteration.
+ // |
+ // \ v
+ // ==\ loop_cond_2
+ // ==/ loop_body_2 <- Second iteration.
+ // / |
+ // <- v <-
+ // loop_cond \ loop_cond \ <- This cond is always false.
+ // loop_body _/ loop_body _/
+ //
+ HLoopInformation* loop_info = analysis_info->GetLoopInfo();
+ PeelByCount(loop_info, trip_count);
+ HIf* loop_hif = loop_info->GetHeader()->GetLastInstruction()->AsIf();
+ int32_t constant = loop_info->Contains(*loop_hif->IfTrueSuccessor()) ? 0 : 1;
+ loop_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
+ }
+
+ return true;
+}
+
bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
// Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
// as InstructionSet is needed.
@@ -828,7 +876,8 @@ bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
return false;
}
- if (!TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
+ if (!TryFullUnrolling(&analysis_info, /*generate_code*/ false) &&
+ !TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
!TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
return false;
}
@@ -838,7 +887,8 @@ bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
return false;
}
- return TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
+ return TryFullUnrolling(&analysis_info) ||
+ TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
TryUnrollingForBranchPenaltyReduction(&analysis_info);
}
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index bc4792458b..644b740ed4 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -155,6 +155,12 @@ class HLoopOptimization : public HOptimization {
bool TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
bool generate_code = true);
+ // Tries to perform whole loop unrolling for a small loop with a small trip count to eliminate
+ // the loop check overhead and to have more opportunities for inter-iteration optimizations.
+ // Returns whether transformation happened. 'generate_code' determines whether the optimization
+ // should be actually applied.
+ bool TryFullUnrolling(LoopAnalysisInfo* analysis_info, bool generate_code = true);
+
// Tries to apply scalar loop peeling and unrolling.
bool TryPeelingAndUnrolling(LoopNode* node);
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 50ce7559f5..8f822cce5a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2930,12 +2930,12 @@ std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind
return os << "Recursive";
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
- case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
- return os << "DirectAddress";
case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo:
return os << "BootImageRelRo";
case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry:
return os << "BssEntry";
+ case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
+ return os << "JitDirectAddress";
case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall:
return os << "RuntimeCall";
default:
@@ -2967,8 +2967,8 @@ bool HLoadClass::InstructionDataEquals(const HInstruction* other) const {
return false;
}
switch (GetLoadKind()) {
- case LoadKind::kBootImageAddress:
case LoadKind::kBootImageRelRo:
+ case LoadKind::kJitBootImageAddress:
case LoadKind::kJitTableAddress: {
ScopedObjectAccess soa(Thread::Current());
return GetClass().Get() == other_load_class->GetClass().Get();
@@ -2985,12 +2985,12 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) {
return os << "ReferrersClass";
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
- case HLoadClass::LoadKind::kBootImageAddress:
- return os << "BootImageAddress";
case HLoadClass::LoadKind::kBootImageRelRo:
return os << "BootImageRelRo";
case HLoadClass::LoadKind::kBssEntry:
return os << "BssEntry";
+ case HLoadClass::LoadKind::kJitBootImageAddress:
+ return os << "JitBootImageAddress";
case HLoadClass::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
case HLoadClass::LoadKind::kRuntimeCall:
@@ -3010,8 +3010,8 @@ bool HLoadString::InstructionDataEquals(const HInstruction* other) const {
return false;
}
switch (GetLoadKind()) {
- case LoadKind::kBootImageAddress:
case LoadKind::kBootImageRelRo:
+ case LoadKind::kJitBootImageAddress:
case LoadKind::kJitTableAddress: {
ScopedObjectAccess soa(Thread::Current());
return GetString().Get() == other_load_string->GetString().Get();
@@ -3025,12 +3025,12 @@ std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs) {
switch (rhs) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
return os << "BootImageLinkTimePcRelative";
- case HLoadString::LoadKind::kBootImageAddress:
- return os << "BootImageAddress";
case HLoadString::LoadKind::kBootImageRelRo:
return os << "BootImageRelRo";
case HLoadString::LoadKind::kBssEntry:
return os << "BssEntry";
+ case HLoadString::LoadKind::kJitBootImageAddress:
+ return os << "JitBootImageAddress";
case HLoadString::LoadKind::kJitTableAddress:
return os << "JitTableAddress";
case HLoadString::LoadKind::kRuntimeCall:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index cd8d07a17a..16a7417301 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -4439,18 +4439,18 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Used for boot image methods referenced by boot image code.
kBootImageLinkTimePcRelative,
- // Use ArtMethod* at a known address, embed the direct address in the code.
- // Used for app->boot calls with non-relocatable image and for JIT-compiled calls.
- kDirectAddress,
-
// Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
// Used for app->boot calls with relocatable image.
kBootImageRelRo,
// Load from an entry in the .bss section using a PC-relative load.
- // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ // Used for methods outside boot image referenced by AOT-compiled app and boot image code.
kBssEntry,
+ // Use ArtMethod* at a known address, embed the direct address in the code.
+ // Used for for JIT-compiled calls.
+ kJitDirectAddress,
+
// Make a runtime call to resolve and call the method. This is the last-resort-kind
// used when other kinds are unimplemented on a particular architecture.
kRuntimeCall,
@@ -4576,7 +4576,7 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
- bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
+ bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kJitDirectAddress; }
bool HasPcRelativeMethodLoadKind() const {
return GetMethodLoadKind() == MethodLoadKind::kBootImageLinkTimePcRelative ||
GetMethodLoadKind() == MethodLoadKind::kBootImageRelRo ||
@@ -6155,18 +6155,18 @@ class HLoadClass FINAL : public HInstruction {
// Used for boot image classes referenced by boot image code.
kBootImageLinkTimePcRelative,
- // Use a known boot image Class* address, embedded in the code by the codegen.
- // Used for boot image classes referenced by apps in JIT- and AOT-compiled code (non-PIC).
- kBootImageAddress,
-
// Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
- // Used for boot image classes referenced by apps in AOT-compiled code (PIC).
+ // Used for boot image classes referenced by apps in AOT-compiled code.
kBootImageRelRo,
// Load from an entry in the .bss section using a PC-relative load.
- // Used for classes outside boot image when .bss is accessible with a PC-relative load.
+ // Used for classes outside boot image referenced by AOT-compiled app and boot image code.
kBssEntry,
+ // Use a known boot image Class* address, embedded in the code by the codegen.
+ // Used for boot image classes referenced by apps in JIT-compiled code.
+ kJitBootImageAddress,
+
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
@@ -6248,8 +6248,6 @@ class HLoadClass FINAL : public HInstruction {
return NeedsAccessCheck() ||
MustGenerateClinitCheck() ||
// If the class is in the boot image, the lookup in the runtime call cannot throw.
- // This keeps CanThrow() consistent between non-PIC (using kBootImageAddress) and
- // PIC and subsequently avoids a DCE behavior dependency on the PIC option.
((GetLoadKind() == LoadKind::kRuntimeCall ||
GetLoadKind() == LoadKind::kBssEntry) &&
!IsInBootImage());
@@ -6366,9 +6364,9 @@ inline void HLoadClass::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kBootImageAddress ||
GetLoadKind() == LoadKind::kBootImageRelRo ||
- GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
+ GetLoadKind() == LoadKind::kBssEntry ||
+ GetLoadKind() == LoadKind::kJitBootImageAddress) << GetLoadKind();
DCHECK(special_input_.GetInstruction() == nullptr);
special_input_ = HUserRecord<HInstruction*>(special_input);
special_input->AddUseAt(this, 0);
@@ -6382,18 +6380,18 @@ class HLoadString FINAL : public HInstruction {
// Used for boot image strings referenced by boot image code.
kBootImageLinkTimePcRelative,
- // Use a known boot image String* address, embedded in the code by the codegen.
- // Used for boot image strings referenced by apps in JIT- and AOT-compiled code (non-PIC).
- kBootImageAddress,
-
// Load from an entry in the .data.bimg.rel.ro using a PC-relative load.
- // Used for boot image strings referenced by apps in AOT-compiled code (PIC).
+ // Used for boot image strings referenced by apps in AOT-compiled code.
kBootImageRelRo,
// Load from an entry in the .bss section using a PC-relative load.
- // Used for strings outside boot image when .bss is accessible with a PC-relative load.
+ // Used for strings outside boot image referenced by AOT-compiled app and boot image code.
kBssEntry,
+ // Use a known boot image String* address, embedded in the code by the codegen.
+ // Used for boot image strings referenced by apps in JIT-compiled code.
+ kJitBootImageAddress,
+
// Load from the root table associated with the JIT compiled method.
kJitTableAddress,
@@ -6459,8 +6457,8 @@ class HLoadString FINAL : public HInstruction {
bool NeedsEnvironment() const OVERRIDE {
LoadKind load_kind = GetLoadKind();
if (load_kind == LoadKind::kBootImageLinkTimePcRelative ||
- load_kind == LoadKind::kBootImageAddress ||
load_kind == LoadKind::kBootImageRelRo ||
+ load_kind == LoadKind::kJitBootImageAddress ||
load_kind == LoadKind::kJitTableAddress) {
return false;
}
@@ -6533,9 +6531,9 @@ inline void HLoadString::AddSpecialInput(HInstruction* special_input) {
// The special input is used for PC-relative loads on some architectures,
// including literal pool loads, which are PC-relative too.
DCHECK(GetLoadKind() == LoadKind::kBootImageLinkTimePcRelative ||
- GetLoadKind() == LoadKind::kBootImageAddress ||
GetLoadKind() == LoadKind::kBootImageRelRo ||
- GetLoadKind() == LoadKind::kBssEntry) << GetLoadKind();
+ GetLoadKind() == LoadKind::kBssEntry ||
+ GetLoadKind() == LoadKind::kJitBootImageAddress) << GetLoadKind();
// HLoadString::GetInputRecords() returns an empty array at this point,
// so use the GetInputRecords() from the base class to set the input record.
DCHECK(special_input_.GetInstruction() == nullptr);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 1c1cf28294..04301f5366 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -34,8 +34,6 @@
namespace vixl32 = vixl::aarch32;
-using vixl32::r0;
-
namespace art {
// Run the tests only on host.
@@ -194,6 +192,7 @@ TEST_ISA(kMips64)
#ifdef ART_ENABLE_CODEGEN_arm
TEST_F(OptimizingCFITest, kThumb2Adjust) {
+ using vixl32::r0;
std::vector<uint8_t> expected_asm(
expected_asm_kThumb2_adjust,
expected_asm_kThumb2_adjust + arraysize(expected_asm_kThumb2_adjust));
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2f530a911a..939802626c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -726,12 +726,6 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
GetCompilerDriver(),
codegen->GetInstructionSet(),
code_allocator->GetMemory(),
- // Follow Quick's behavior and set the frame size to zero if it is
- // considered "empty" (see the definition of
- // art::CodeGenerator::HasEmptyFrame).
- codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
- codegen->GetCoreSpillMask(),
- codegen->GetFpuSpillMask(),
ArrayRef<const uint8_t>(method_info),
ArrayRef<const uint8_t>(stack_map),
ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
@@ -1181,9 +1175,6 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
GetCompilerDriver(),
jni_compiled_method.GetInstructionSet(),
jni_compiled_method.GetCode(),
- jni_compiled_method.GetFrameSize(),
- jni_compiled_method.GetCoreSpillMask(),
- jni_compiled_method.GetFpSpillMask(),
ArrayRef<const uint8_t>(method_info),
ArrayRef<const uint8_t>(stack_map),
jni_compiled_method.GetCfi(),
@@ -1275,9 +1266,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
stack_map_data,
method_info_data,
roots_data,
- jni_compiled_method.GetFrameSize(),
- jni_compiled_method.GetCoreSpillMask(),
- jni_compiled_method.GetFpSpillMask(),
jni_compiled_method.GetCode().data(),
jni_compiled_method.GetCode().size(),
data_size,
@@ -1394,9 +1382,6 @@ bool OptimizingCompiler::JitCompile(Thread* self,
stack_map_data,
method_info_data,
roots_data,
- codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
- codegen->GetCoreSpillMask(),
- codegen->GetFpuSpillMask(),
code_allocator.GetMemory().data(),
code_allocator.GetMemory().size(),
data_size,
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
index f18ecc1458..a7e97a1ce5 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ b/compiler/optimizing/pc_relative_fixups_mips.cc
@@ -74,9 +74,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
switch (load_kind) {
case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadClass::LoadKind::kBootImageAddress:
case HLoadClass::LoadKind::kBootImageRelRo:
case HLoadClass::LoadKind::kBssEntry:
+ case HLoadClass::LoadKind::kJitBootImageAddress:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_class->AddSpecialInput(base_);
@@ -90,9 +90,9 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
HLoadString::LoadKind load_kind = load_string->GetLoadKind();
switch (load_kind) {
case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
- case HLoadString::LoadKind::kBootImageAddress:
case HLoadString::LoadKind::kBootImageRelRo:
case HLoadString::LoadKind::kBssEntry:
+ case HLoadString::LoadKind::kJitBootImageAddress:
// Add a base register for PC-relative literals on R2.
InitializePCRelativeBasePointer();
load_string->AddSpecialInput(base_);
diff --git a/compiler/optimizing/pc_relative_fixups_x86.cc b/compiler/optimizing/pc_relative_fixups_x86.cc
index 05ec765b19..41f2f776fc 100644
--- a/compiler/optimizing/pc_relative_fixups_x86.cc
+++ b/compiler/optimizing/pc_relative_fixups_x86.cc
@@ -17,6 +17,7 @@
#include "pc_relative_fixups_x86.h"
#include "code_generator_x86.h"
#include "intrinsics_x86.h"
+#include "runtime.h"
namespace art {
namespace x86 {
@@ -238,7 +239,7 @@ class PCRelativeHandlerVisitor : public HGraphVisitor {
case Intrinsics::kIntegerValueOf:
// This intrinsic can be call free if it loads the address of the boot image object.
// If we're compiling PIC, we need the address base for loading from .data.bimg.rel.ro.
- if (!codegen_->GetCompilerOptions().GetCompilePic()) {
+ if (Runtime::Current()->UseJitCompilation()) {
break;
}
FALLTHROUGH_INTENDED;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f3fe62561f..0d622484ee 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -563,7 +563,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
dex_method_index, dex_cache, loader, /* referrer */ nullptr, kDirect);
DCHECK(method != nullptr);
- mirror::Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
DCHECK(declaring_class != nullptr);
DCHECK(declaring_class->IsStringClass())
<< "Expected String class: " << declaring_class->PrettyDescriptor();
@@ -572,7 +572,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst
}
instr->SetReferenceTypeInfo(
ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true));
- } else if (IsAdmissible(klass.Ptr())) {
+ } else if (IsAdmissible(klass)) {
ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass);
is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes();
instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact));
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 27482ac5bf..5c2f57e314 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -63,10 +63,6 @@ static bool IsInBootImage(ArtMethod* method) {
return false;
}
-static bool AOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& options) {
- return IsInBootImage(method) && !options.GetCompilePic();
-}
-
static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& compiler_options) {
DCHECK(compiler_options.IsBootImage());
ScopedObjectAccess soa(Thread::Current());
@@ -120,11 +116,10 @@ void HSharpening::SharpenInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke,
method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBssEntry;
}
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- } else if (Runtime::Current()->UseJitCompilation() ||
- AOTCanEmbedMethod(callee, compiler_options)) {
+ } else if (Runtime::Current()->UseJitCompilation()) {
// JIT or on-device AOT compilation referencing a boot image method.
// Use the method address directly.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
method_load_data = reinterpret_cast<uintptr_t>(callee);
code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
} else if (IsInBootImage(callee)) {
@@ -199,8 +194,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
if (runtime->UseJitCompilation()) {
DCHECK(!compiler_options.GetCompilePic());
if (is_in_boot_image) {
- // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
+ desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
} else if (klass != nullptr) {
desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
} else {
@@ -212,11 +206,7 @@ HLoadClass::LoadKind HSharpening::ComputeLoadClassKind(
}
} else if (is_in_boot_image) {
// AOT app compilation, boot image class.
- if (codegen->GetCompilerOptions().GetCompilePic()) {
- desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
- } else {
- desired_load_kind = HLoadClass::LoadKind::kBootImageAddress;
- }
+ desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
} else {
// Not JIT and the klass is not in boot image.
desired_load_kind = HLoadClass::LoadKind::kBssEntry;
@@ -348,7 +338,7 @@ void HSharpening::ProcessLoadString(
string = class_linker->LookupString(string_index, dex_cache.Get());
if (string != nullptr) {
if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
- desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
+ desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress;
} else {
desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
}
@@ -359,11 +349,7 @@ void HSharpening::ProcessLoadString(
// AOT app compilation. Try to lookup the string without allocating if not found.
string = class_linker->LookupString(string_index, dex_cache.Get());
if (string != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
- if (codegen->GetCompilerOptions().GetCompilePic()) {
- desired_load_kind = HLoadString::LoadKind::kBootImageRelRo;
- } else {
- desired_load_kind = HLoadString::LoadKind::kBootImageAddress;
- }
+ desired_load_kind = HLoadString::LoadKind::kBootImageRelRo;
} else {
desired_load_kind = HLoadString::LoadKind::kBssEntry;
}
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index a65fbcc514..da6c711659 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -179,7 +179,7 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
ScopedObjectAccess soa(Thread::Current());
DCHECK(IsSameDexFile(*outer_dex_file, *method->GetDexFile()));
}
- uint32_t dex_method_index = method->GetDexMethodIndexUnchecked();
+ uint32_t dex_method_index = method->GetDexMethodIndex();
entry[InlineInfo::kMethodInfoIndex] = method_infos_.Dedup({dex_method_index});
}
current_inline_infos_.push_back(entry);
@@ -196,8 +196,7 @@ void StackMapStream::BeginInlineInfoEntry(ArtMethod* method,
if (encode_art_method) {
CHECK_EQ(inline_info.GetArtMethod(), method);
} else {
- CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()][0],
- method->GetDexMethodIndexUnchecked());
+ CHECK_EQ(method_infos_[inline_info.GetMethodInfoIndex()][0], method->GetDexMethodIndex());
}
});
}
@@ -311,9 +310,9 @@ size_t StackMapStream::PrepareForFillIn() {
EncodeUnsignedLeb128(&out_, num_dex_registers_);
BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&out_, out_.size() * kBitsPerByte);
stack_maps_.Encode(out);
+ inline_infos_.Encode(out);
register_masks_.Encode(out);
stack_masks_.Encode(out);
- inline_infos_.Encode(out);
dex_register_masks_.Encode(out);
dex_register_maps_.Encode(out);
dex_register_catalog_.Encode(out);
diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc
index 05250a4157..ebb631e33c 100644
--- a/compiler/utils/arm/assembler_arm_vixl.cc
+++ b/compiler/utils/arm/assembler_arm_vixl.cc
@@ -26,9 +26,6 @@
using namespace vixl::aarch32; // NOLINT(build/namespaces)
-using vixl::ExactAssemblyScope;
-using vixl::CodeBufferCheckScope;
-
namespace art {
namespace arm {
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cbc6424466..0b68620e6e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -253,7 +253,14 @@ NO_RETURN static void Usage(const char* fmt, ...) {
UsageError(" to the file descriptor specified by --oat-fd.");
UsageError(" Example: --oat-location=/data/dalvik-cache/system@app@Calculator.apk.oat");
UsageError("");
- UsageError(" --oat-symbols=<file.oat>: specifies an oat output destination with full symbols.");
+ UsageError(" --oat-symbols=<file.oat>: specifies a destination where the oat file is copied.");
+ UsageError(" This is equivalent to file copy as build post-processing step.");
+ UsageError(" It is intended to be used with --strip and it happens before it.");
+ UsageError(" Example: --oat-symbols=/symbols/system/framework/boot.oat");
+ UsageError("");
+ UsageError(" --strip: remove all debugging sections at the end (but keep mini-debug-info).");
+ UsageError(" This is equivalent to the \"strip\" command as build post-processing step.");
+ UsageError(" It is intended to be used with --oat-symbols and it happens after it.");
UsageError(" Example: --oat-symbols=/symbols/system/framework/boot.oat");
UsageError("");
UsageError(" --image=<file.art>: specifies an output image filename.");
@@ -1180,6 +1187,7 @@ class Dex2Oat FINAL {
AssignIfExists(args, M::DexLocations, &dex_locations_);
AssignIfExists(args, M::OatFiles, &oat_filenames_);
AssignIfExists(args, M::OatSymbols, &parser_options->oat_symbols);
+ AssignTrueIfExists(args, M::Strip, &strip_);
AssignIfExists(args, M::ImageFilenames, &image_filenames_);
AssignIfExists(args, M::ZipFd, &zip_fd_);
AssignIfExists(args, M::ZipLocation, &zip_location_);
@@ -2175,7 +2183,7 @@ class Dex2Oat FINAL {
VLOG(compiler) << "Oat file written successfully: " << oat_filenames_[i];
oat_writer.reset();
- elf_writer.reset();
+ // We may still need the ELF writer later for stripping.
}
}
@@ -2194,21 +2202,16 @@ class Dex2Oat FINAL {
return true;
}
- // Create a copy from stripped to unstripped.
- bool CopyStrippedToUnstripped() {
+ // Copy the full oat files to symbols directory and then strip the originals.
+ bool CopyOatFilesToSymbolsDirectoryAndStrip() {
for (size_t i = 0; i < oat_unstripped_.size(); ++i) {
// If we don't want to strip in place, copy from stripped location to unstripped location.
// We need to strip after image creation because FixupElf needs to use .strtab.
if (strcmp(oat_unstripped_[i], oat_filenames_[i]) != 0) {
- // If the oat file is still open, flush it.
- if (oat_files_[i].get() != nullptr && oat_files_[i]->IsOpened()) {
- if (!FlushCloseOutputFile(&oat_files_[i])) {
- return false;
- }
- }
+ DCHECK(oat_files_[i].get() != nullptr && oat_files_[i]->IsOpened());
TimingLogger::ScopedTiming t("dex2oat OatFile copy", timings_);
- std::unique_ptr<File> in(OS::OpenFileForReading(oat_filenames_[i]));
+ std::unique_ptr<File>& in = oat_files_[i];
std::unique_ptr<File> out(OS::CreateEmptyFile(oat_unstripped_[i]));
int64_t in_length = in->GetLength();
if (in_length < 0) {
@@ -2224,6 +2227,14 @@ class Dex2Oat FINAL {
return false;
}
VLOG(compiler) << "Oat file copied successfully (unstripped): " << oat_unstripped_[i];
+
+ if (strip_) {
+ TimingLogger::ScopedTiming t2("dex2oat OatFile strip", timings_);
+ if (!elf_writers_[i]->StripDebugInfo()) {
+ PLOG(ERROR) << "Failed strip oat file: " << in->GetPath();
+ return false;
+ }
+ }
}
}
return true;
@@ -2239,11 +2250,10 @@ class Dex2Oat FINAL {
return true;
}
- bool FlushCloseOutputFile(std::unique_ptr<File>* file) {
- if (file->get() != nullptr) {
- std::unique_ptr<File> tmp(file->release());
- if (tmp->FlushCloseOrErase() != 0) {
- PLOG(ERROR) << "Failed to flush and close output file: " << tmp->GetPath();
+ bool FlushCloseOutputFile(File* file) {
+ if (file != nullptr) {
+ if (file->FlushCloseOrErase() != 0) {
+ PLOG(ERROR) << "Failed to flush and close output file: " << file->GetPath();
return false;
}
}
@@ -2266,7 +2276,7 @@ class Dex2Oat FINAL {
bool result = true;
for (auto& files : { &vdex_files_, &oat_files_ }) {
for (size_t i = 0; i < files->size(); ++i) {
- result &= FlushCloseOutputFile(&(*files)[i]);
+ result &= FlushCloseOutputFile((*files)[i].get());
}
}
return result;
@@ -2825,6 +2835,7 @@ class Dex2Oat FINAL {
std::string oat_location_;
std::vector<const char*> oat_filenames_;
std::vector<const char*> oat_unstripped_;
+ bool strip_;
int oat_fd_;
int input_vdex_fd_;
int output_vdex_fd_;
@@ -2947,15 +2958,9 @@ static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
return dex2oat::ReturnCode::kOther;
}
- // Flush boot.oat. We always expect the output file by name, and it will be re-opened from the
- // unstripped name. Do not close the file if we are compiling the image with an oat fd since the
- // image writer will require this fd to generate the image.
- if (dex2oat.ShouldKeepOatFileOpen()) {
- if (!dex2oat.FlushOutputFiles()) {
- dex2oat.EraseOutputFiles();
- return dex2oat::ReturnCode::kOther;
- }
- } else if (!dex2oat.FlushCloseOutputFiles()) {
+ // Flush boot.oat. Keep it open as we might still modify it later (strip it).
+ if (!dex2oat.FlushOutputFiles()) {
+ dex2oat.EraseOutputFiles();
return dex2oat::ReturnCode::kOther;
}
@@ -2974,7 +2979,7 @@ static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) {
}
// Copy stripped to unstripped location, if necessary.
- if (!dex2oat.CopyStrippedToUnstripped()) {
+ if (!dex2oat.CopyOatFilesToSymbolsDirectoryAndStrip()) {
return dex2oat::ReturnCode::kOther;
}
@@ -3012,7 +3017,7 @@ static dex2oat::ReturnCode CompileApp(Dex2Oat& dex2oat) {
// Copy stripped to unstripped location, if necessary. This will implicitly flush & close the
// stripped versions. If this is given, we expect to be able to open writable files by name.
- if (!dex2oat.CopyStrippedToUnstripped()) {
+ if (!dex2oat.CopyOatFilesToSymbolsDirectoryAndStrip()) {
return dex2oat::ReturnCode::kOther;
}
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index ae8e1b7597..4247e176aa 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -186,48 +186,15 @@ class Dex2oatImageTest : public CommonRuntimeTest {
return RunDex2Oat(argv, error_msg);
}
- int RunDex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
- int link[2];
-
- if (pipe(link) == -1) {
- return false;
- }
-
- pid_t pid = fork();
- if (pid == -1) {
- return false;
- }
-
- if (pid == 0) {
- // We need dex2oat to actually log things.
- setenv("ANDROID_LOG_TAGS", "*:f", 1);
- dup2(link[1], STDERR_FILENO);
- close(link[0]);
- close(link[1]);
- std::vector<const char*> c_args;
- for (const std::string& str : args) {
- c_args.push_back(str.c_str());
- }
- c_args.push_back(nullptr);
- execv(c_args[0], const_cast<char* const*>(c_args.data()));
- exit(1);
- UNREACHABLE();
- } else {
- close(link[1]);
- char buffer[128];
- memset(buffer, 0, 128);
- ssize_t bytes_read = 0;
-
- while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
- *error_msg += std::string(buffer, bytes_read);
- }
- close(link[0]);
- int status = -1;
- if (waitpid(pid, &status, 0) != -1) {
- return (status == 0);
- }
+ bool RunDex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
+ // We only want fatal logging for the error message.
+ auto post_fork_fn = []() { return setenv("ANDROID_LOG_TAGS", "*:f", 1) == 0; };
+ ForkAndExecResult res = ForkAndExec(args, post_fork_fn, error_msg);
+ if (res.stage != ForkAndExecResult::kFinished) {
+ *error_msg = strerror(errno);
return false;
}
+ return res.StandardSuccess();
}
};
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index bf9edf7384..710f14c4c5 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -98,6 +98,8 @@ static void AddGeneratedArtifactMappings(Builder& builder) {
.Define("--oat-symbols=_")
.WithType<std::vector<std::string>>().AppendValues()
.IntoKey(M::OatSymbols)
+ .Define("--strip")
+ .IntoKey(M::Strip)
.Define("--oat-fd=_")
.WithType<int>()
.IntoKey(M::OatFd)
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index fe5c4e69a7..c8cb7e7b72 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -47,6 +47,7 @@ DEX2OAT_OPTIONS_KEY (int, DmFd)
DEX2OAT_OPTIONS_KEY (std::string, DmFile)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatFiles)
DEX2OAT_OPTIONS_KEY (std::vector<std::string>, OatSymbols)
+DEX2OAT_OPTIONS_KEY (Unit, Strip)
DEX2OAT_OPTIONS_KEY (int, OatFd)
DEX2OAT_OPTIONS_KEY (std::string, OatLocation)
DEX2OAT_OPTIONS_KEY (bool, Watchdog)
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index ad44624f76..2b96684fdd 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -33,6 +33,7 @@
#include "dex/art_dex_file_loader.h"
#include "dex/base64_test_util.h"
#include "dex/bytecode_utils.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -230,47 +231,15 @@ class Dex2oatTest : public Dex2oatEnvironmentTest {
LOG(ERROR) << all_args;
}
- int link[2];
-
- if (pipe(link) == -1) {
- return false;
- }
-
- pid_t pid = fork();
- if (pid == -1) {
- return false;
- }
-
- if (pid == 0) {
- // We need dex2oat to actually log things.
- setenv("ANDROID_LOG_TAGS", "*:d", 1);
- dup2(link[1], STDERR_FILENO);
- close(link[0]);
- close(link[1]);
- std::vector<const char*> c_args;
- for (const std::string& str : argv) {
- c_args.push_back(str.c_str());
- }
- c_args.push_back(nullptr);
- execv(c_args[0], const_cast<char* const*>(c_args.data()));
- exit(1);
- UNREACHABLE();
- } else {
- close(link[1]);
- char buffer[128];
- memset(buffer, 0, 128);
- ssize_t bytes_read = 0;
-
- while (TEMP_FAILURE_RETRY(bytes_read = read(link[0], buffer, 128)) > 0) {
- output_ += std::string(buffer, bytes_read);
- }
- close(link[0]);
- int status = -1;
- if (waitpid(pid, &status, 0) != -1) {
- success_ = (status == 0);
- }
- return status;
+ // We need dex2oat to actually log things.
+ auto post_fork_fn = []() { return setenv("ANDROID_LOG_TAGS", "*:d", 1) == 0; };
+ ForkAndExecResult res = ForkAndExec(argv, post_fork_fn, &output_);
+ if (res.stage != ForkAndExecResult::kFinished) {
+ *error_msg = strerror(errno);
+ return -1;
}
+ success_ = res.StandardSuccess();
+ return res.status_code;
}
std::string output_ = "";
@@ -992,19 +961,10 @@ class Dex2oatUnquickenTest : public Dex2oatTest {
// Iterate over the dex files and ensure there is no quickened instruction.
for (const OatDexFile* oat_dex_file : odex_file->GetOatDexFiles()) {
std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data != nullptr) {
- for (ClassDataItemIterator class_it(*dex_file, class_data);
- class_it.HasNext();
- class_it.Next()) {
- if (class_it.IsAtMethod() && class_it.GetMethodCodeItem() != nullptr) {
- for (const DexInstructionPcPair& inst :
- CodeItemInstructionAccessor(*dex_file, class_it.GetMethodCodeItem())) {
- ASSERT_FALSE(inst->IsQuickened()) << inst->Opcode() << " " << output_;
- }
- }
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ for (const DexInstructionPcPair& inst : method.GetInstructions()) {
+ ASSERT_FALSE(inst->IsQuickened()) << inst->Opcode() << " " << output_;
}
}
}
@@ -1308,19 +1268,16 @@ TEST_F(Dex2oatTest, LayoutSections) {
{
const DexFile::TypeId* type_id = dex->FindTypeId("LManyMethods;");
dex::TypeIndex type_idx = dex->GetIndexForTypeId(*type_id);
- const DexFile::ClassDef* class_def = dex->FindClassDef(type_idx);
- ClassDataItemIterator it(*dex, dex->GetClassData(*class_def));
- it.SkipAllFields();
+ ClassAccessor accessor(*dex, *dex->FindClassDef(type_idx));
std::set<size_t> code_item_offsets;
- for (; it.HasNextMethod(); it.Next()) {
- const uint16_t method_idx = it.GetMemberIndex();
- const size_t code_item_offset = it.GetMethodCodeItemOffset();
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const uint16_t method_idx = method.GetIndex();
+ const size_t code_item_offset = method.GetCodeItemOffset();
if (code_item_offsets.insert(code_item_offset).second) {
// Unique code item, add the method index.
methods.push_back(method_idx);
}
}
- DCHECK(!it.HasNext());
}
ASSERT_GE(methods.size(), 8u);
std::vector<uint16_t> hot_methods = {methods[1], methods[3], methods[5]};
@@ -1423,11 +1380,10 @@ TEST_F(Dex2oatTest, LayoutSections) {
size_t unused_count = 0;
// Visit all of the methdos of the main class and cross reference the method indices to their
// corresponding code item offsets to verify the layout.
- ClassDataItemIterator it(*dex_file, dex_file->GetClassData(*class_def));
- it.SkipAllFields();
- for (; it.HasNextMethod(); it.Next()) {
- const size_t method_idx = it.GetMemberIndex();
- const size_t code_item_offset = it.GetMethodCodeItemOffset();
+ ClassAccessor accessor(*dex_file, *class_def);
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const size_t method_idx = method.GetIndex();
+ const size_t code_item_offset = method.GetCodeItemOffset();
const bool is_hot = ContainsElement(hot_methods, method_idx);
const bool is_startup = ContainsElement(startup_methods, method_idx);
const bool is_post_startup = ContainsElement(post_methods, method_idx);
@@ -1449,17 +1405,14 @@ TEST_F(Dex2oatTest, LayoutSections) {
++unused_count;
} else {
// or this method is part of the last code item and the end is 4 byte aligned.
- ClassDataItemIterator it2(*dex_file, dex_file->GetClassData(*class_def));
- it2.SkipAllFields();
- for (; it2.HasNextMethod(); it2.Next()) {
- EXPECT_LE(it2.GetMethodCodeItemOffset(), code_item_offset);
+ for (const ClassAccessor::Method& method2 : accessor.GetMethods()) {
+ EXPECT_LE(method2.GetCodeItemOffset(), code_item_offset);
}
uint32_t code_item_size = dex_file->FindCodeItemOffset(*class_def, method_idx);
EXPECT_EQ((code_item_offset + code_item_size) % 4, 0u);
}
}
}
- DCHECK(!it.HasNext());
EXPECT_GT(hot_count, 0u);
EXPECT_GT(post_startup_count, 0u);
EXPECT_GT(startup_count, 0u);
@@ -1510,14 +1463,13 @@ TEST_F(Dex2oatTest, GenerateCompactDex) {
EXPECT_LE(header.OwnedDataBegin(), header.OwnedDataEnd());
EXPECT_LE(header.OwnedDataBegin(), header.data_size_);
EXPECT_LE(header.OwnedDataEnd(), header.data_size_);
- for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- class_def.VisitMethods(dex_file.get(), [&](const ClassDataItemIterator& it) {
- if (it.GetMethodCodeItemOffset() != 0u) {
- ASSERT_GE(it.GetMethodCodeItemOffset(), header.OwnedDataBegin());
- ASSERT_LT(it.GetMethodCodeItemOffset(), header.OwnedDataEnd());
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (method.GetCodeItemOffset() != 0u) {
+ ASSERT_GE(method.GetCodeItemOffset(), header.OwnedDataBegin());
+ ASSERT_LT(method.GetCodeItemOffset(), header.OwnedDataEnd());
}
- });
+ }
}
// Test that the owned sections don't overlap.
for (const std::unique_ptr<const CompactDexFile>& other_dex : compact_dex_files) {
@@ -1948,26 +1900,15 @@ TEST_F(Dex2oatTest, QuickenedInput) {
MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("ManyMethods"), [] (DexFile* dex) {
bool mutated_successfully = false;
// Change the dex instructions to make an opcode that spans past the end of the code item.
- for (size_t i = 0; i < dex->NumClassDefs(); ++i) {
- const DexFile::ClassDef& def = dex->GetClassDef(i);
- const uint8_t* data = dex->GetClassData(def);
- if (data == nullptr) {
- continue;
- }
- ClassDataItemIterator it(*dex, data);
- it.SkipAllFields();
- while (it.HasNextMethod()) {
- DexFile::CodeItem* item = const_cast<DexFile::CodeItem*>(it.GetMethodCodeItem());
- if (item != nullptr) {
- CodeItemInstructionAccessor instructions(*dex, item);
- // Make a quickened instruction that doesn't run past the end of the code item.
- if (instructions.InsnsSizeInCodeUnits() > 2) {
- const_cast<Instruction&>(instructions.InstructionAt(0)).SetOpcode(
- Instruction::IGET_BYTE_QUICK);
- mutated_successfully = true;
- }
+ for (ClassAccessor accessor : dex->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ CodeItemInstructionAccessor instructions = method.GetInstructions();
+ // Make a quickened instruction that doesn't run past the end of the code item.
+ if (instructions.InsnsSizeInCodeUnits() > 2) {
+ const_cast<Instruction&>(instructions.InstructionAt(0)).SetOpcode(
+ Instruction::IGET_BYTE_QUICK);
+ mutated_successfully = true;
}
- it.Next();
}
}
CHECK(mutated_successfully)
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.cc b/dex2oat/linker/arm64/relative_patcher_arm64.cc
index dd0fcfe0be..0497d4f966 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.cc
@@ -57,7 +57,6 @@ constexpr uint32_t kAdrpThunkSize = 8u;
inline bool IsAdrpPatch(const LinkerPatch& patch) {
switch (patch.GetType()) {
- case LinkerPatch::Type::kCall:
case LinkerPatch::Type::kCallRelative:
case LinkerPatch::Type::kBakerReadBarrierBranch:
return false;
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index 07e6860f9c..9e3bb978fb 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -726,15 +726,24 @@ TEST_F(Arm64RelativePatcherTestDefault, CallOtherJustTooFarAfter) {
ArrayRef<const LinkerPatch>(),
bl_offset_in_method1 + just_over_max_positive_disp);
ASSERT_EQ(expected_last_method_idx, last_method_idx);
+ uint32_t method_after_thunk_idx = last_method_idx;
+ if (sizeof(OatQuickMethodHeader) < kArm64Alignment) {
+ // The thunk needs to start on a kArm64Alignment-aligned address before the address where the
+ // last method would have been if there was no thunk. If the size of the OatQuickMethodHeader
+ // is at least kArm64Alignment, the thunk start shall fit between the previous filler method
+ // and that address. Otherwise, it shall be inserted before that filler method.
+ method_after_thunk_idx -= 1u;
+ }
uint32_t method1_offset = GetMethodOffset(1u);
- uint32_t last_method_offset = GetMethodOffset(last_method_idx);
- ASSERT_TRUE(IsAligned<kArm64Alignment>(last_method_offset));
- uint32_t last_method_header_offset = last_method_offset - sizeof(OatQuickMethodHeader);
+ uint32_t method_after_thunk_offset = GetMethodOffset(method_after_thunk_idx);
+ ASSERT_TRUE(IsAligned<kArm64Alignment>(method_after_thunk_offset));
+ uint32_t method_after_thunk_header_offset =
+ method_after_thunk_offset - sizeof(OatQuickMethodHeader);
uint32_t thunk_size = MethodCallThunkSize();
- uint32_t thunk_offset = RoundDown(last_method_header_offset - thunk_size, kArm64Alignment);
+ uint32_t thunk_offset = RoundDown(method_after_thunk_header_offset - thunk_size, kArm64Alignment);
DCHECK_EQ(thunk_offset + thunk_size + CodeAlignmentSize(thunk_offset + thunk_size),
- last_method_header_offset);
+ method_after_thunk_header_offset);
uint32_t diff = thunk_offset - (method1_offset + bl_offset_in_method1);
CHECK_ALIGNED(diff, 4u);
ASSERT_LT(diff, 128 * MB);
diff --git a/dex2oat/linker/elf_writer.h b/dex2oat/linker/elf_writer.h
index cd8cf4c54e..637330c835 100644
--- a/dex2oat/linker/elf_writer.h
+++ b/dex2oat/linker/elf_writer.h
@@ -77,6 +77,7 @@ class ElfWriter {
virtual void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) = 0;
virtual void WriteDynamicSection() = 0;
virtual void WriteDebugInfo(const debug::DebugInfo& debug_info) = 0;
+ virtual bool StripDebugInfo() = 0;
virtual bool End() = 0;
// Get the ELF writer's stream. This stream can be used for writing data directly
diff --git a/dex2oat/linker/elf_writer_quick.cc b/dex2oat/linker/elf_writer_quick.cc
index 8f6ff702cc..4e7d636dbf 100644
--- a/dex2oat/linker/elf_writer_quick.cc
+++ b/dex2oat/linker/elf_writer_quick.cc
@@ -117,6 +117,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
void EndDataBimgRelRo(OutputStream* data_bimg_rel_ro) OVERRIDE;
void WriteDynamicSection() OVERRIDE;
void WriteDebugInfo(const debug::DebugInfo& debug_info) OVERRIDE;
+ bool StripDebugInfo() OVERRIDE;
bool End() OVERRIDE;
virtual OutputStream* GetStream() OVERRIDE;
@@ -280,10 +281,6 @@ void ElfWriterQuick<ElfTypes>::PrepareDebugInfo(const debug::DebugInfo& debug_in
template <typename ElfTypes>
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info) {
if (!debug_info.Empty()) {
- if (compiler_options_.GetGenerateDebugInfo()) {
- // Generate all the debug information we can.
- debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
- }
if (compiler_options_.GetGenerateMiniDebugInfo()) {
// Wait for the mini-debug-info generation to finish and write it to disk.
Thread* self = Thread::Current();
@@ -291,10 +288,21 @@ void ElfWriterQuick<ElfTypes>::WriteDebugInfo(const debug::DebugInfo& debug_info
debug_info_thread_pool_->Wait(self, true, false);
builder_->WriteSection(".gnu_debugdata", debug_info_task_->GetResult());
}
+ // The Strip method expects debug info to be last (mini-debug-info is not stripped).
+ if (compiler_options_.GetGenerateDebugInfo()) {
+ // Generate all the debug information we can.
+ debug::WriteDebugInfo(builder_.get(), debug_info, kCFIFormat, true /* write_oat_patches */);
+ }
}
}
template <typename ElfTypes>
+bool ElfWriterQuick<ElfTypes>::StripDebugInfo() {
+ off_t file_size = builder_->Strip();
+ return elf_file_->SetLength(file_size) == 0;
+}
+
+template <typename ElfTypes>
bool ElfWriterQuick<ElfTypes>::End() {
builder_->End();
if (compiler_options_.GetGenerateBuildId()) {
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index de9c3d831d..e10f9b3feb 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -27,6 +27,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/bit_memory_region.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
#include "base/globals.h"
@@ -86,6 +87,68 @@ using ::art::mirror::String;
namespace art {
namespace linker {
+static inline size_t RelocationIndex(size_t relocation_offset, PointerSize target_ptr_size) {
+ static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Expecting heap GC roots and references to have the same size.");
+ DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(target_ptr_size));
+ DCHECK_ALIGNED(relocation_offset, sizeof(GcRoot<mirror::Object>));
+ return relocation_offset / sizeof(GcRoot<mirror::Object>);
+}
+
+static ArrayRef<const uint8_t> MaybeCompressData(ArrayRef<const uint8_t> source,
+ ImageHeader::StorageMode image_storage_mode,
+ /*out*/ std::vector<uint8_t>* storage) {
+ const uint64_t compress_start_time = NanoTime();
+
+ switch (image_storage_mode) {
+ case ImageHeader::kStorageModeLZ4: {
+ storage->resize(LZ4_compressBound(source.size()));
+ size_t data_size = LZ4_compress_default(
+ reinterpret_cast<char*>(const_cast<uint8_t*>(source.data())),
+ reinterpret_cast<char*>(storage->data()),
+ source.size(),
+ storage->size());
+ storage->resize(data_size);
+ break;
+ }
+ case ImageHeader::kStorageModeLZ4HC: {
+ // Bound is same as non HC.
+ storage->resize(LZ4_compressBound(source.size()));
+ size_t data_size = LZ4_compress_HC(
+ reinterpret_cast<const char*>(const_cast<uint8_t*>(source.data())),
+ reinterpret_cast<char*>(storage->data()),
+ source.size(),
+ storage->size(),
+ LZ4HC_CLEVEL_MAX);
+ storage->resize(data_size);
+ break;
+ }
+ case ImageHeader::kStorageModeUncompressed: {
+ return source;
+ }
+ default: {
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
+ }
+ }
+
+ DCHECK(image_storage_mode == ImageHeader::kStorageModeLZ4 ||
+ image_storage_mode == ImageHeader::kStorageModeLZ4HC);
+ VLOG(compiler) << "Compressed from " << source.size() << " to " << storage->size() << " in "
+ << PrettyDuration(NanoTime() - compress_start_time);
+ if (kIsDebugBuild) {
+ std::vector<uint8_t> decompressed(source.size());
+ const size_t decompressed_size = LZ4_decompress_safe(
+ reinterpret_cast<char*>(storage->data()),
+ reinterpret_cast<char*>(decompressed.data()),
+ storage->size(),
+ decompressed.size());
+ CHECK_EQ(decompressed_size, decompressed.size());
+ CHECK_EQ(memcmp(source.data(), decompressed.data(), source.size()), 0) << image_storage_mode;
+ }
+ return ArrayRef<const uint8_t>(*storage);
+}
+
// Separate objects into multiple bins to optimize dirty memory use.
static constexpr bool kBinObjects = true;
@@ -239,69 +302,18 @@ bool ImageWriter::Write(int image_fd,
return EXIT_FAILURE;
}
- std::unique_ptr<char[]> compressed_data;
// Image data size excludes the bitmap and the header.
ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin());
- const size_t image_data_size = image_header->GetImageSize() - sizeof(ImageHeader);
- char* image_data = reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader);
- size_t data_size;
- const char* image_data_to_write;
- const uint64_t compress_start_time = NanoTime();
+ ArrayRef<const uint8_t> raw_image_data(image_info.image_->Begin() + sizeof(ImageHeader),
+ image_header->GetImageSize() - sizeof(ImageHeader));
CHECK_EQ(image_header->storage_mode_, image_storage_mode_);
- switch (image_storage_mode_) {
- case ImageHeader::kStorageModeLZ4: {
- const size_t compressed_max_size = LZ4_compressBound(image_data_size);
- compressed_data.reset(new char[compressed_max_size]);
- data_size = LZ4_compress_default(
- reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
- &compressed_data[0],
- image_data_size,
- compressed_max_size);
- break;
- }
- case ImageHeader::kStorageModeLZ4HC: {
- // Bound is same as non HC.
- const size_t compressed_max_size = LZ4_compressBound(image_data_size);
- compressed_data.reset(new char[compressed_max_size]);
- data_size = LZ4_compress_HC(
- reinterpret_cast<char*>(image_info.image_->Begin()) + sizeof(ImageHeader),
- &compressed_data[0],
- image_data_size,
- compressed_max_size,
- LZ4HC_CLEVEL_MAX);
- break;
- }
- case ImageHeader::kStorageModeUncompressed: {
- data_size = image_data_size;
- image_data_to_write = image_data;
- break;
- }
- default: {
- LOG(FATAL) << "Unsupported";
- UNREACHABLE();
- }
- }
-
- if (compressed_data != nullptr) {
- image_data_to_write = &compressed_data[0];
- VLOG(compiler) << "Compressed from " << image_data_size << " to " << data_size << " in "
- << PrettyDuration(NanoTime() - compress_start_time);
- if (kIsDebugBuild) {
- std::unique_ptr<uint8_t[]> temp(new uint8_t[image_data_size]);
- const size_t decompressed_size = LZ4_decompress_safe(
- reinterpret_cast<char*>(&compressed_data[0]),
- reinterpret_cast<char*>(&temp[0]),
- data_size,
- image_data_size);
- CHECK_EQ(decompressed_size, image_data_size);
- CHECK_EQ(memcmp(image_data, &temp[0], image_data_size), 0) << image_storage_mode_;
- }
- }
+ std::vector<uint8_t> compressed_data;
+ ArrayRef<const uint8_t> image_data =
+ MaybeCompressData(raw_image_data, image_storage_mode_, &compressed_data);
// Write out the image + fields + methods.
- const bool is_compressed = compressed_data != nullptr;
- if (!image_file->PwriteFully(image_data_to_write, data_size, sizeof(ImageHeader))) {
+ if (!image_file->PwriteFully(image_data.data(), image_data.size(), sizeof(ImageHeader))) {
PLOG(ERROR) << "Failed to write image file data " << image_filename;
image_file->Erase();
return false;
@@ -311,14 +323,30 @@ bool ImageWriter::Write(int image_fd,
// convenience.
const ImageSection& bitmap_section = image_header->GetImageBitmapSection();
// Align up since data size may be unaligned if the image is compressed.
- size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + data_size, kPageSize);
- if (!is_compressed) {
+ size_t bitmap_position_in_file = RoundUp(sizeof(ImageHeader) + image_data.size(), kPageSize);
+ if (image_storage_mode_ == ImageHeader::kDefaultStorageMode) {
CHECK_EQ(bitmap_position_in_file, bitmap_section.Offset());
}
- if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_bitmap_->Begin()),
+ if (!image_file->PwriteFully(image_info.image_bitmap_->Begin(),
bitmap_section.Size(),
bitmap_position_in_file)) {
- PLOG(ERROR) << "Failed to write image file " << image_filename;
+ PLOG(ERROR) << "Failed to write image file bitmap " << image_filename;
+ image_file->Erase();
+ return false;
+ }
+
+ // Write out relocations.
+ size_t relocations_position_in_file = bitmap_position_in_file + bitmap_section.Size();
+ ArrayRef<const uint8_t> relocations = MaybeCompressData(
+ ArrayRef<const uint8_t>(image_info.relocation_bitmap_),
+ image_storage_mode_,
+ &compressed_data);
+ image_header->sections_[ImageHeader::kSectionImageRelocations] =
+ ImageSection(bitmap_section.Offset() + bitmap_section.Size(), relocations.size());
+ if (!image_file->PwriteFully(relocations.data(),
+ relocations.size(),
+ relocations_position_in_file)) {
+ PLOG(ERROR) << "Failed to write image file relocations " << image_filename;
image_file->Erase();
return false;
}
@@ -333,7 +361,7 @@ bool ImageWriter::Write(int image_fd,
// Write header last in case the compiler gets killed in the middle of image writing.
// We do not want to have a corrupted image with a valid header.
// The header is uncompressed since it contains whether the image is compressed or not.
- image_header->data_size_ = data_size;
+ image_header->data_size_ = image_data.size();
if (!image_file->PwriteFully(reinterpret_cast<char*>(image_info.image_->Begin()),
sizeof(ImageHeader),
0)) {
@@ -342,7 +370,7 @@ bool ImageWriter::Write(int image_fd,
return false;
}
- CHECK_EQ(bitmap_position_in_file + bitmap_section.Size(),
+ CHECK_EQ(relocations_position_in_file + relocations.size(),
static_cast<size_t>(image_file->GetLength()));
if (image_file->FlushCloseOrErase() != 0) {
PLOG(ERROR) << "Failed to flush and close image file " << image_filename;
@@ -472,37 +500,36 @@ void ImageWriter::PrepareDexCacheArraySlots() {
DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr);
AddDexCacheArrayRelocation(dex_cache->GetResolvedTypes(),
start + layout.TypesOffset(),
- dex_cache);
+ oat_index);
DCHECK_EQ(dex_file->NumMethodIds() != 0u, dex_cache->GetResolvedMethods() != nullptr);
AddDexCacheArrayRelocation(dex_cache->GetResolvedMethods(),
start + layout.MethodsOffset(),
- dex_cache);
+ oat_index);
DCHECK_EQ(dex_file->NumFieldIds() != 0u, dex_cache->GetResolvedFields() != nullptr);
AddDexCacheArrayRelocation(dex_cache->GetResolvedFields(),
start + layout.FieldsOffset(),
- dex_cache);
+ oat_index);
DCHECK_EQ(dex_file->NumStringIds() != 0u, dex_cache->GetStrings() != nullptr);
- AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), dex_cache);
+ AddDexCacheArrayRelocation(dex_cache->GetStrings(), start + layout.StringsOffset(), oat_index);
if (dex_cache->GetResolvedMethodTypes() != nullptr) {
AddDexCacheArrayRelocation(dex_cache->GetResolvedMethodTypes(),
start + layout.MethodTypesOffset(),
- dex_cache);
+ oat_index);
}
if (dex_cache->GetResolvedCallSites() != nullptr) {
AddDexCacheArrayRelocation(dex_cache->GetResolvedCallSites(),
start + layout.CallSitesOffset(),
- dex_cache);
+ oat_index);
}
}
}
void ImageWriter::AddDexCacheArrayRelocation(void* array,
size_t offset,
- ObjPtr<mirror::DexCache> dex_cache) {
+ size_t oat_index) {
if (array != nullptr) {
DCHECK(!IsInBootImage(array));
- size_t oat_index = GetOatIndexForDexCache(dex_cache);
native_object_relocations_.emplace(array,
NativeObjectRelocation { oat_index, offset, NativeObjectRelocationType::kDexCacheArray });
}
@@ -514,7 +541,7 @@ void ImageWriter::AddMethodPointerArray(mirror::PointerArray* arr) {
for (size_t i = 0, len = arr->GetLength(); i < len; i++) {
ArtMethod* method = arr->GetElementPtrSize<ArtMethod*>(i, target_ptr_size_);
if (method != nullptr && !method->IsRuntimeMethod()) {
- mirror::Class* klass = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
CHECK(klass == nullptr || KeepClass(klass))
<< Class::PrettyClass(klass) << " should be a kept class";
}
@@ -656,7 +683,7 @@ bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
if (m->IsNative()) {
return true;
}
- mirror::Class* declaring_class = m->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClass();
// Initialized is highly unlikely to dirty since there's no entry points to mutate.
return declaring_class == nullptr || declaring_class->GetStatus() != ClassStatus::kInitialized;
}
@@ -1970,6 +1997,8 @@ void ImageWriter::CreateHeader(size_t oat_index) {
const size_t bitmap_bytes = image_info.image_bitmap_->Size();
auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
*bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
+ // The relocations section shall be finished later as we do not know its actual size yet.
+
if (VLOG_IS_ON(compiler)) {
LOG(INFO) << "Creating header for " << oat_filenames_[oat_index];
size_t idx = 0;
@@ -1996,34 +2025,54 @@ void ImageWriter::CreateHeader(size_t oat_index) {
// Create the header, leave 0 for data size since we will fill this in as we are writing the
// image.
- new (image_info.image_->Begin()) ImageHeader(PointerToLowMemUInt32(image_info.image_begin_),
- image_end,
- sections,
- image_info.image_roots_address_,
- image_info.oat_checksum_,
- PointerToLowMemUInt32(oat_file_begin),
- PointerToLowMemUInt32(image_info.oat_data_begin_),
- PointerToLowMemUInt32(oat_data_end),
- PointerToLowMemUInt32(oat_file_end),
- boot_image_begin,
- boot_image_end - boot_image_begin,
- boot_oat_begin,
- boot_oat_end - boot_oat_begin,
- static_cast<uint32_t>(target_ptr_size_),
- compile_pic_,
- /*is_pic*/compile_app_image_,
- image_storage_mode_,
- /*data_size*/0u);
+ ImageHeader* header = new (image_info.image_->Begin()) ImageHeader(
+ PointerToLowMemUInt32(image_info.image_begin_),
+ image_end,
+ sections,
+ image_info.image_roots_address_,
+ image_info.oat_checksum_,
+ PointerToLowMemUInt32(oat_file_begin),
+ PointerToLowMemUInt32(image_info.oat_data_begin_),
+ PointerToLowMemUInt32(oat_data_end),
+ PointerToLowMemUInt32(oat_file_end),
+ boot_image_begin,
+ boot_image_end - boot_image_begin,
+ boot_oat_begin,
+ boot_oat_end - boot_oat_begin,
+ static_cast<uint32_t>(target_ptr_size_),
+ compile_pic_,
+ /*is_pic*/compile_app_image_,
+ image_storage_mode_,
+ /*data_size*/0u);
+
+ // Resize relocation bitmap for recording reference/pointer relocations.
+ size_t number_of_relocation_locations = RelocationIndex(image_end, target_ptr_size_);
+ DCHECK(image_info.relocation_bitmap_.empty());
+ image_info.relocation_bitmap_.resize(
+ BitsToBytesRoundUp(number_of_relocation_locations * (compile_app_image_ ? 2u : 1u)));
+ // Record header relocations.
+ RecordImageRelocation(&header->image_begin_, oat_index);
+ RecordImageRelocation(&header->oat_file_begin_, oat_index);
+ RecordImageRelocation(&header->oat_data_begin_, oat_index);
+ RecordImageRelocation(&header->oat_data_end_, oat_index);
+ RecordImageRelocation(&header->oat_file_end_, oat_index);
+ if (compile_app_image_) {
+ RecordImageRelocation(&header->boot_image_begin_, oat_index, /* app_to_boot_image */ true);
+ RecordImageRelocation(&header->boot_oat_begin_, oat_index, /* app_to_boot_image */ true);
+ } else {
+ DCHECK_EQ(header->boot_image_begin_, 0u);
+ DCHECK_EQ(header->boot_oat_begin_, 0u);
+ }
+ RecordImageRelocation(&header->image_roots_, oat_index);
+ // Skip non-null check for `patch_delta_` as it is actually 0 but still needs to be recorded.
+ RecordImageRelocation</* kCheckNotNull */ false>(&header->patch_delta_, oat_index);
}
ArtMethod* ImageWriter::GetImageMethodAddress(ArtMethod* method) {
- auto it = native_object_relocations_.find(method);
- CHECK(it != native_object_relocations_.end()) << ArtMethod::PrettyMethod(method) << " @ "
- << method;
- size_t oat_index = GetOatIndex(method->GetDexCache());
- ImageInfo& image_info = GetImageInfo(oat_index);
- CHECK_GE(it->second.offset, image_info.image_end_) << "ArtMethods should be after Objects";
- return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + it->second.offset);
+ NativeObjectRelocation relocation = GetNativeRelocation(method);
+ const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
+ CHECK_GE(relocation.offset, image_info.image_end_) << "ArtMethods should be after Objects";
+ return reinterpret_cast<ArtMethod*>(image_info.image_begin_ + relocation.offset);
}
const void* ImageWriter::GetIntrinsicReferenceAddress(uint32_t intrinsic_data) {
@@ -2060,11 +2109,15 @@ class ImageWriter::FixupRootVisitor : public RootVisitor {
LOG(FATAL) << "Unsupported";
}
- void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
- image_writer_->CopyReference(roots[i], roots[i]->AsMirrorPtr());
+ // Copy the reference. Since we do not have the address for recording the relocation,
+ // it needs to be recorded explicitly by the user of FixupRootVisitor.
+ ObjPtr<mirror::Object> old_ptr = roots[i]->AsMirrorPtr();
+ roots[i]->Assign(image_writer_->GetImageAddress(old_ptr.Ptr()));
}
}
@@ -2072,23 +2125,28 @@ class ImageWriter::FixupRootVisitor : public RootVisitor {
ImageWriter* const image_writer_;
};
-void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy) {
+void ImageWriter::CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index) {
for (size_t i = 0; i < ImTable::kSize; ++i) {
ArtMethod* method = orig->Get(i, target_ptr_size_);
void** address = reinterpret_cast<void**>(copy->AddressOfElement(i, target_ptr_size_));
- CopyAndFixupPointer(address, method);
+ CopyAndFixupPointer(address, method, oat_index);
DCHECK_EQ(copy->Get(i, target_ptr_size_), NativeLocationInImage(method));
}
}
-void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) {
+void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig,
+ ImtConflictTable* copy,
+ size_t oat_index) {
const size_t count = orig->NumEntries(target_ptr_size_);
for (size_t i = 0; i < count; ++i) {
ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_);
ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_);
- CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_), interface_method);
+ CopyAndFixupPointer(copy->AddressOfInterfaceMethod(i, target_ptr_size_),
+ interface_method,
+ oat_index);
CopyAndFixupPointer(copy->AddressOfImplementationMethod(i, target_ptr_size_),
- implementation_method);
+ implementation_method,
+ oat_index);
DCHECK_EQ(copy->GetInterfaceMethod(i, target_ptr_size_),
NativeLocationInImage(interface_method));
DCHECK_EQ(copy->GetImplementationMethod(i, target_ptr_size_),
@@ -2111,9 +2169,10 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
switch (relocation.type) {
case NativeObjectRelocationType::kArtField: {
memcpy(dest, pair.first, sizeof(ArtField));
- CopyReference(
+ CopyAndFixupReference(
reinterpret_cast<ArtField*>(dest)->GetDeclaringClassAddressWithoutBarrier(),
- reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass().Ptr());
+ reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass(),
+ oat_index);
break;
}
case NativeObjectRelocationType::kRuntimeMethod:
@@ -2121,7 +2180,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case NativeObjectRelocationType::kArtMethodDirty: {
CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first),
reinterpret_cast<ArtMethod*>(dest),
- image_info);
+ oat_index);
break;
}
// For arrays, copy just the header since the elements will get copied by their corresponding
@@ -2146,14 +2205,15 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
case NativeObjectRelocationType::kIMTable: {
ImTable* orig_imt = reinterpret_cast<ImTable*>(pair.first);
ImTable* dest_imt = reinterpret_cast<ImTable*>(dest);
- CopyAndFixupImTable(orig_imt, dest_imt);
+ CopyAndFixupImTable(orig_imt, dest_imt, oat_index);
break;
}
case NativeObjectRelocationType::kIMTConflictTable: {
auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first);
CopyAndFixupImtConflictTable(
orig_table,
- new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_));
+ new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_),
+ oat_index);
break;
}
}
@@ -2163,10 +2223,10 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) {
ArtMethod* method = image_methods_[i];
CHECK(method != nullptr);
- if (!IsInBootImage(method)) {
- method = NativeLocationInImage(method);
- }
- image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method);
+ CopyAndFixupPointer(reinterpret_cast<void**>(&image_header->image_methods_[i]),
+ method,
+ oat_index,
+ PointerSize::k32);
}
FixupRootVisitor root_visitor(this);
@@ -2187,6 +2247,13 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
temp_intern_table.AddTableFromMemory(intern_table_memory_ptr);
CHECK_EQ(temp_intern_table.Size(), intern_table->Size());
temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots);
+ // Record relocations. (The root visitor does not get to see the slot addresses.)
+ MutexLock lock(Thread::Current(), *Locks::intern_table_lock_);
+ DCHECK(!temp_intern_table.strong_interns_.tables_.empty());
+ DCHECK(!temp_intern_table.strong_interns_.tables_[0].empty()); // Inserted at the beginning.
+ for (const GcRoot<mirror::String>& slot : temp_intern_table.strong_interns_.tables_[0]) {
+ RecordImageRelocation(&slot, oat_index);
+ }
}
// Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple
// class loaders. Writing multiple class tables into the image is currently unsupported.
@@ -2194,7 +2261,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
const ImageSection& class_table_section = image_header->GetClassTableSection();
uint8_t* const class_table_memory_ptr =
image_info.image_->Begin() + class_table_section.Offset();
- ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+ Thread* self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_);
ClassTable* table = image_info.class_table_.get();
CHECK(table != nullptr);
@@ -2208,6 +2276,15 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) {
table->NumReferencedNonZygoteClasses() + table->NumReferencedZygoteClasses());
UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown));
temp_class_table.VisitRoots(visitor);
+ // Record relocations. (The root visitor does not get to see the slot addresses.)
+ // Note that the low bits in the slots contain bits of the descriptors' hash codes
+ // but the relocation works fine for these "adjusted" references.
+ ReaderMutexLock lock(self, temp_class_table.lock_);
+ DCHECK(!temp_class_table.classes_.empty());
+ DCHECK(!temp_class_table.classes_[0].empty()); // The ClassSet was inserted at the beginning.
+ for (const ClassTable::TableSlot& slot : temp_class_table.classes_[0]) {
+ RecordImageRelocation(&slot, oat_index);
+ }
}
}
@@ -2224,12 +2301,15 @@ void ImageWriter::CopyAndFixupObjects() {
void ImageWriter::FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
- Bin array_type) {
+ Bin array_type,
+ size_t oat_index) {
CHECK(klass->IsArrayClass());
CHECK(arr->IsIntArray() || arr->IsLongArray()) << klass->PrettyClass() << " " << arr;
// Fixup int and long pointers for the ArtMethod or ArtField arrays.
const size_t num_elements = arr->GetLength();
- dst->SetClass(GetImageAddress(arr->GetClass()));
+ CopyAndFixupReference(dst->GetFieldObjectReferenceAddr<kVerifyNone>(Class::ClassOffset()),
+ arr->GetClass(),
+ oat_index);
auto* dest_array = down_cast<mirror::PointerArray*>(dst);
for (size_t i = 0, count = num_elements; i < count; ++i) {
void* elem = arr->GetElementPtrSize<void*>(i, target_ptr_size_);
@@ -2251,7 +2331,7 @@ void ImageWriter::FixupPointerArray(mirror::Object* dst,
UNREACHABLE();
}
}
- CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem);
+ CopyAndFixupPointer(dest_array->ElementAddress(i, target_ptr_size_), elem, oat_index);
}
}
@@ -2282,13 +2362,14 @@ void ImageWriter::CopyAndFixupObject(Object* obj) {
// safe since we mark all of the objects that may reference non immune objects as gray.
CHECK(dst->AtomicSetMarkBit(0, 1));
}
- FixupObject(obj, dst);
+ FixupObject(obj, dst, oat_index);
}
// Rewrite all the references in the copied object to point to their image address equivalent
class ImageWriter::FixupVisitor {
public:
- FixupVisitor(ImageWriter* image_writer, Object* copy) : image_writer_(image_writer), copy_(copy) {
+ FixupVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
+ : image_writer_(image_writer), copy_(copy), oat_index_(oat_index) {
}
// Ignore class roots since we don't have a way to map them to the destination. These are handled
@@ -2302,9 +2383,10 @@ class ImageWriter::FixupVisitor {
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
ObjPtr<Object> ref = obj->GetFieldObject<Object, kVerifyNone>(offset);
// Copy the reference and record the fixup if necessary.
- image_writer_->CopyReference(
+ image_writer_->CopyAndFixupReference(
copy_->GetFieldObjectReferenceAddr<kVerifyNone>(offset),
- ref.Ptr());
+ ref.Ptr(),
+ oat_index_);
}
// java.lang.ref.Reference visitor.
@@ -2317,12 +2399,13 @@ class ImageWriter::FixupVisitor {
protected:
ImageWriter* const image_writer_;
mirror::Object* const copy_;
+ size_t oat_index_;
};
class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
public:
- FixupClassVisitor(ImageWriter* image_writer, Object* copy) : FixupVisitor(image_writer, copy) {
- }
+ FixupClassVisitor(ImageWriter* image_writer, Object* copy, size_t oat_index)
+ : FixupVisitor(image_writer, copy, oat_index) {}
void operator()(ObjPtr<Object> obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
@@ -2337,14 +2420,13 @@ class ImageWriter::FixupClassVisitor FINAL : public FixupVisitor {
}
};
-uintptr_t ImageWriter::NativeOffsetInImage(void* obj) {
+ImageWriter::NativeObjectRelocation ImageWriter::GetNativeRelocation(void* obj) {
DCHECK(obj != nullptr);
DCHECK(!IsInBootImage(obj));
auto it = native_object_relocations_.find(obj);
CHECK(it != native_object_relocations_.end()) << obj << " spaces "
<< Runtime::Current()->GetHeap()->DumpSpaces();
- const NativeObjectRelocation& relocation = it->second;
- return relocation.offset;
+ return it->second;
}
template <typename T>
@@ -2364,45 +2446,43 @@ T* ImageWriter::NativeLocationInImage(T* obj) {
if (obj == nullptr || IsInBootImage(obj)) {
return obj;
} else {
- auto it = native_object_relocations_.find(obj);
- CHECK(it != native_object_relocations_.end()) << obj << " " << PrettyPrint(obj)
- << " spaces " << Runtime::Current()->GetHeap()->DumpSpaces();
- const NativeObjectRelocation& relocation = it->second;
- ImageInfo& image_info = GetImageInfo(relocation.oat_index);
+ NativeObjectRelocation relocation = GetNativeRelocation(obj);
+ const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
return reinterpret_cast<T*>(image_info.image_begin_ + relocation.offset);
}
}
template <typename T>
-T* ImageWriter::NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) {
- if (obj == nullptr || IsInBootImage(obj)) {
- return obj;
- } else {
- size_t oat_index = GetOatIndexForDexCache(dex_cache);
- ImageInfo& image_info = GetImageInfo(oat_index);
- return reinterpret_cast<T*>(image_info.image_->Begin() + NativeOffsetInImage(obj));
- }
+T* ImageWriter::NativeCopyLocation(T* obj) {
+ const NativeObjectRelocation relocation = GetNativeRelocation(obj);
+ const ImageInfo& image_info = GetImageInfo(relocation.oat_index);
+ return reinterpret_cast<T*>(image_info.image_->Begin() + relocation.offset);
}
class ImageWriter::NativeLocationVisitor {
public:
- explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
+ NativeLocationVisitor(ImageWriter* image_writer, size_t oat_index)
+ : image_writer_(image_writer),
+ oat_index_(oat_index) {}
template <typename T>
- T* operator()(T* ptr, void** dest_addr = nullptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
- if (dest_addr != nullptr) {
- image_writer_->CopyAndFixupPointer(dest_addr, ptr);
+ T* operator()(T* ptr, void** dest_addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ptr != nullptr) {
+ image_writer_->CopyAndFixupPointer(dest_addr, ptr, oat_index_);
}
+ // TODO: The caller shall overwrite the value stored by CopyAndFixupPointer()
+ // with the value we return here. We should try to avoid the duplicate work.
return image_writer_->NativeLocationInImage(ptr);
}
private:
ImageWriter* const image_writer_;
+ const size_t oat_index_;
};
-void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
- orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this));
- FixupClassVisitor visitor(this, copy);
+void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index) {
+ orig->FixupNativePointers(copy, target_ptr_size_, NativeLocationVisitor(this, oat_index));
+ FixupClassVisitor visitor(this, copy, oat_index);
ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
if (kBitstringSubtypeCheckEnabled && compile_app_image_) {
@@ -2430,7 +2510,7 @@ void ImageWriter::FixupClass(mirror::Class* orig, mirror::Class* copy) {
copy->SetClinitThreadId(static_cast<pid_t>(0));
}
-void ImageWriter::FixupObject(Object* orig, Object* copy) {
+void ImageWriter::FixupObject(Object* orig, Object* copy, size_t oat_index) {
DCHECK(orig != nullptr);
DCHECK(copy != nullptr);
if (kUseBakerReadBarrier) {
@@ -2442,13 +2522,13 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
auto it = pointer_arrays_.find(down_cast<mirror::PointerArray*>(orig));
if (it != pointer_arrays_.end()) {
// Should only need to fixup every pointer array exactly once.
- FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second);
+ FixupPointerArray(copy, down_cast<mirror::PointerArray*>(orig), klass, it->second, oat_index);
pointer_arrays_.erase(it);
return;
}
}
if (orig->IsClass()) {
- FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy));
+ FixupClass(orig->AsClass<kVerifyNone>(), down_cast<mirror::Class*>(copy), oat_index);
} else {
ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
Runtime::Current()->GetClassLinker()->GetClassRoots();
@@ -2458,107 +2538,136 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) {
auto* dest = down_cast<mirror::Executable*>(copy);
auto* src = down_cast<mirror::Executable*>(orig);
ArtMethod* src_method = src->GetArtMethod();
- dest->SetArtMethod(GetImageMethodAddress(src_method));
- } else if (!klass->IsArrayClass()) {
- if (klass == GetClassRoot<mirror::DexCache>()) {
- FixupDexCache(down_cast<mirror::DexCache*>(orig), down_cast<mirror::DexCache*>(copy));
- } else if (klass->IsClassLoaderClass()) {
- mirror::ClassLoader* copy_loader = down_cast<mirror::ClassLoader*>(copy);
- // If src is a ClassLoader, set the class table to null so that it gets recreated by the
- // ClassLoader.
- copy_loader->SetClassTable(nullptr);
- // Also set allocator to null to be safe. The allocator is created when we create the class
- // table. We also never expect to unload things in the image since they are held live as
- // roots.
- copy_loader->SetAllocator(nullptr);
- }
- }
- FixupVisitor visitor(this, copy);
+ CopyAndFixupPointer(dest, mirror::Executable::ArtMethodOffset(), src_method, oat_index);
+ } else if (klass == GetClassRoot<mirror::DexCache>(class_roots)) {
+ FixupDexCache(down_cast<mirror::DexCache*>(orig),
+ down_cast<mirror::DexCache*>(copy),
+ oat_index);
+ } else if (klass->IsClassLoaderClass()) {
+ mirror::ClassLoader* copy_loader = down_cast<mirror::ClassLoader*>(copy);
+ // If src is a ClassLoader, set the class table to null so that it gets recreated by the
+ // ClassLoader.
+ copy_loader->SetClassTable(nullptr);
+ // Also set allocator to null to be safe. The allocator is created when we create the class
+ // table. We also never expect to unload things in the image since they are held live as
+ // roots.
+ copy_loader->SetAllocator(nullptr);
+ }
+ FixupVisitor visitor(this, copy, oat_index);
orig->VisitReferences(visitor, visitor);
}
}
-class ImageWriter::ImageAddressVisitorForDexCacheArray {
- public:
- explicit ImageAddressVisitorForDexCacheArray(ImageWriter* image_writer)
- : image_writer_(image_writer) {}
-
- template <typename T>
- T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
- return image_writer_->GetImageAddress(ptr);
- }
-
- private:
- ImageWriter* const image_writer_;
-};
+template <typename T>
+void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
+ std::atomic<mirror::DexCachePair<T>>* new_array,
+ uint32_t array_index,
+ size_t oat_index) {
+ static_assert(sizeof(std::atomic<mirror::DexCachePair<T>>) == sizeof(mirror::DexCachePair<T>),
+ "Size check for removing std::atomic<>.");
+ mirror::DexCachePair<T>* orig_pair =
+ reinterpret_cast<mirror::DexCachePair<T>*>(&orig_array[array_index]);
+ mirror::DexCachePair<T>* new_pair =
+ reinterpret_cast<mirror::DexCachePair<T>*>(&new_array[array_index]);
+ CopyAndFixupReference(
+ new_pair->object.AddressWithoutBarrier(), orig_pair->object.Read(), oat_index);
+ new_pair->index = orig_pair->index;
+}
-void ImageWriter::FixupDexCache(mirror::DexCache* orig_dex_cache,
- mirror::DexCache* copy_dex_cache) {
- ImageAddressVisitorForDexCacheArray fixup_visitor(this);
- // Though the DexCache array fields are usually treated as native pointers, we set the full
- // 64-bit values here, clearing the top 32 bits for 32-bit targets. The zero-extension is
- // done by casting to the unsigned type uintptr_t before casting to int64_t, i.e.
- // static_cast<int64_t>(reinterpret_cast<uintptr_t>(image_begin_ + offset))).
- mirror::StringDexCacheType* orig_strings = orig_dex_cache->GetStrings();
- if (orig_strings != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::StringsOffset(),
- NativeLocationInImage(orig_strings),
- PointerSize::k64);
- orig_dex_cache->FixupStrings(NativeCopyLocation(orig_strings, orig_dex_cache), fixup_visitor);
- }
- mirror::TypeDexCacheType* orig_types = orig_dex_cache->GetResolvedTypes();
- if (orig_types != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedTypesOffset(),
- NativeLocationInImage(orig_types),
- PointerSize::k64);
- orig_dex_cache->FixupResolvedTypes(NativeCopyLocation(orig_types, orig_dex_cache),
- fixup_visitor);
- }
- mirror::MethodDexCacheType* orig_methods = orig_dex_cache->GetResolvedMethods();
- if (orig_methods != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodsOffset(),
- NativeLocationInImage(orig_methods),
- PointerSize::k64);
- mirror::MethodDexCacheType* copy_methods = NativeCopyLocation(orig_methods, orig_dex_cache);
- for (size_t i = 0, num = orig_dex_cache->NumResolvedMethods(); i != num; ++i) {
- mirror::MethodDexCachePair orig_pair =
- mirror::DexCache::GetNativePairPtrSize(orig_methods, i, target_ptr_size_);
- // NativeLocationInImage also handles runtime methods since these have relocation info.
- mirror::MethodDexCachePair copy_pair(NativeLocationInImage(orig_pair.object),
- orig_pair.index);
- mirror::DexCache::SetNativePairPtrSize(copy_methods, i, copy_pair, target_ptr_size_);
- }
- }
- mirror::FieldDexCacheType* orig_fields = orig_dex_cache->GetResolvedFields();
- if (orig_fields != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedFieldsOffset(),
- NativeLocationInImage(orig_fields),
- PointerSize::k64);
- mirror::FieldDexCacheType* copy_fields = NativeCopyLocation(orig_fields, orig_dex_cache);
- for (size_t i = 0, num = orig_dex_cache->NumResolvedFields(); i != num; ++i) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(orig_fields, i, target_ptr_size_);
- mirror::FieldDexCachePair copy = orig;
- copy.object = NativeLocationInImage(orig.object);
- mirror::DexCache::SetNativePairPtrSize(copy_fields, i, copy, target_ptr_size_);
- }
- }
- mirror::MethodTypeDexCacheType* orig_method_types = orig_dex_cache->GetResolvedMethodTypes();
- if (orig_method_types != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedMethodTypesOffset(),
- NativeLocationInImage(orig_method_types),
- PointerSize::k64);
- orig_dex_cache->FixupResolvedMethodTypes(NativeCopyLocation(orig_method_types, orig_dex_cache),
- fixup_visitor);
- }
- GcRoot<mirror::CallSite>* orig_call_sites = orig_dex_cache->GetResolvedCallSites();
- if (orig_call_sites != nullptr) {
- copy_dex_cache->SetFieldPtrWithSize<false>(mirror::DexCache::ResolvedCallSitesOffset(),
- NativeLocationInImage(orig_call_sites),
- PointerSize::k64);
- orig_dex_cache->FixupResolvedCallSites(NativeCopyLocation(orig_call_sites, orig_dex_cache),
- fixup_visitor);
- }
+template <typename T>
+void ImageWriter::FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
+ std::atomic<mirror::NativeDexCachePair<T>>* new_array,
+ uint32_t array_index,
+ size_t oat_index) {
+ static_assert(
+ sizeof(std::atomic<mirror::NativeDexCachePair<T>>) == sizeof(mirror::NativeDexCachePair<T>),
+ "Size check for removing std::atomic<>.");
+ if (target_ptr_size_ == PointerSize::k64) {
+ DexCache::ConversionPair64* orig_pair =
+ reinterpret_cast<DexCache::ConversionPair64*>(orig_array) + array_index;
+ DexCache::ConversionPair64* new_pair =
+ reinterpret_cast<DexCache::ConversionPair64*>(new_array) + array_index;
+ *new_pair = *orig_pair; // Copy original value and index.
+ if (orig_pair->first != 0u) {
+ CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
+ reinterpret_cast64<void*>(orig_pair->first),
+ oat_index);
+ }
+ } else {
+ DexCache::ConversionPair32* orig_pair =
+ reinterpret_cast<DexCache::ConversionPair32*>(orig_array) + array_index;
+ DexCache::ConversionPair32* new_pair =
+ reinterpret_cast<DexCache::ConversionPair32*>(new_array) + array_index;
+ *new_pair = *orig_pair; // Copy original value and index.
+ if (orig_pair->first != 0u) {
+ CopyAndFixupPointer(reinterpret_cast<void**>(&new_pair->first),
+ reinterpret_cast32<void*>(orig_pair->first),
+ oat_index);
+ }
+ }
+}
+
+void ImageWriter::FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
+ GcRoot<mirror::CallSite>* new_array,
+ uint32_t array_index,
+ size_t oat_index) {
+ CopyAndFixupReference(new_array[array_index].AddressWithoutBarrier(),
+ orig_array[array_index].Read(),
+ oat_index);
+}
+
+template <typename EntryType>
+void ImageWriter::FixupDexCacheArray(DexCache* orig_dex_cache,
+ DexCache* copy_dex_cache,
+ size_t oat_index,
+ MemberOffset array_offset,
+ uint32_t size) {
+ EntryType* orig_array = orig_dex_cache->GetFieldPtr64<EntryType*>(array_offset);
+ DCHECK_EQ(orig_array != nullptr, size != 0u);
+ if (orig_array != nullptr) {
+ // Though the DexCache array fields are usually treated as native pointers, we clear
+ // the top 32 bits for 32-bit targets.
+ CopyAndFixupPointer(copy_dex_cache, array_offset, orig_array, oat_index, PointerSize::k64);
+ EntryType* new_array = NativeCopyLocation(orig_array);
+ for (uint32_t i = 0; i != size; ++i) {
+ FixupDexCacheArrayEntry(orig_array, new_array, i, oat_index);
+ }
+ }
+}
+
+void ImageWriter::FixupDexCache(DexCache* orig_dex_cache,
+ DexCache* copy_dex_cache,
+ size_t oat_index) {
+ FixupDexCacheArray<mirror::StringDexCacheType>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::StringsOffset(),
+ orig_dex_cache->NumStrings());
+ FixupDexCacheArray<mirror::TypeDexCacheType>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::ResolvedTypesOffset(),
+ orig_dex_cache->NumResolvedTypes());
+ FixupDexCacheArray<mirror::MethodDexCacheType>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::ResolvedMethodsOffset(),
+ orig_dex_cache->NumResolvedMethods());
+ FixupDexCacheArray<mirror::FieldDexCacheType>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::ResolvedFieldsOffset(),
+ orig_dex_cache->NumResolvedFields());
+ FixupDexCacheArray<mirror::MethodTypeDexCacheType>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::ResolvedMethodTypesOffset(),
+ orig_dex_cache->NumResolvedMethodTypes());
+ FixupDexCacheArray<GcRoot<mirror::CallSite>>(orig_dex_cache,
+ copy_dex_cache,
+ oat_index,
+ DexCache::ResolvedCallSitesOffset(),
+ orig_dex_cache->NumResolvedCallSites());
// Remove the DexFile pointers. They will be fixed up when the runtime loads the oat file. Leaving
// compiler pointers in here will make the output non-deterministic.
@@ -2617,7 +2726,7 @@ const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method,
method->GetEntryPointFromQuickCompiledCodePtrSize(target_ptr_size_);
const uint8_t* quick_code;
- if (UNLIKELY(IsInBootImage(method->GetDeclaringClass()))) {
+ if (UNLIKELY(IsInBootImage(method->GetDeclaringClass().Ptr()))) {
DCHECK(method->IsCopied());
// If the code is not in the oat file corresponding to this image (e.g. default methods)
quick_code = reinterpret_cast<const uint8_t*>(quick_oat_entry_point);
@@ -2652,7 +2761,7 @@ const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method,
void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
ArtMethod* copy,
- const ImageInfo& image_info) {
+ size_t oat_index) {
if (orig->IsAbstract()) {
// Ignore the single-implementation info for abstract method.
// Do this on orig instead of copy, otherwise there is a crash due to methods
@@ -2665,23 +2774,24 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
- CopyReference(copy->GetDeclaringClassAddressWithoutBarrier(), orig->GetDeclaringClassUnchecked());
+ CopyAndFixupReference(copy->GetDeclaringClassAddressWithoutBarrier(),
+ orig->GetDeclaringClassUnchecked(),
+ oat_index);
// OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
// oat_begin_
// The resolution method has a special trampoline to call.
Runtime* runtime = Runtime::Current();
+ const void* quick_code;
if (orig->IsRuntimeMethod()) {
ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_);
if (orig_table != nullptr) {
// Special IMT conflict method, normal IMT conflict method or unimplemented IMT method.
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(StubType::kQuickIMTConflictTrampoline), target_ptr_size_);
- copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_);
+ quick_code = GetOatAddress(StubType::kQuickIMTConflictTrampoline);
+ CopyAndFixupPointer(copy, ArtMethod::DataOffset(target_ptr_size_), orig_table, oat_index);
} else if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(StubType::kQuickResolutionTrampoline), target_ptr_size_);
+ quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
} else {
bool found_one = false;
for (size_t i = 0; i < static_cast<size_t>(CalleeSaveType::kLastCalleeSaveType); ++i) {
@@ -2693,18 +2803,19 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
}
CHECK(found_one) << "Expected to find callee save method but got " << orig->PrettyMethod();
CHECK(copy->IsRuntimeMethod());
+ CHECK(copy->GetEntryPointFromQuickCompiledCode() == nullptr);
+ quick_code = nullptr;
}
} else {
// We assume all methods have code. If they don't currently then we set them to the use the
// resolution trampoline. Abstract methods never have code and so we need to make sure their
// use results in an AbstractMethodError. We use the interpreter to achieve this.
if (UNLIKELY(!orig->IsInvokable())) {
- copy->SetEntryPointFromQuickCompiledCodePtrSize(
- GetOatAddress(StubType::kQuickToInterpreterBridge), target_ptr_size_);
+ quick_code = GetOatAddress(StubType::kQuickToInterpreterBridge);
} else {
bool quick_is_interpreted;
- const uint8_t* quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted);
- copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
+ const ImageInfo& image_info = image_infos_[oat_index];
+ quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted);
// JNI entrypoint:
if (orig->IsNative()) {
@@ -2712,9 +2823,20 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
// Note this is not the code_ pointer, that is handled above.
copy->SetEntryPointFromJniPtrSize(
GetOatAddress(StubType::kJNIDlsymLookup), target_ptr_size_);
+ MemberOffset offset = ArtMethod::EntryPointFromJniOffset(target_ptr_size_);
+ const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
+ RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ compile_app_image_);
+ } else {
+ CHECK(copy->GetDataPtrSize(target_ptr_size_) == nullptr);
}
}
}
+ if (quick_code != nullptr) {
+ copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_);
+ MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(target_ptr_size_);
+ const void* dest = reinterpret_cast<const uint8_t*>(copy) + offset.Uint32Value();
+ RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootOatFile(quick_code));
+ }
}
size_t ImageWriter::ImageInfo::GetBinSizeSum(Bin up_to) const {
@@ -2879,30 +3001,80 @@ ImageWriter::ImageInfo::ImageInfo()
: intern_table_(new InternTable),
class_table_(new ClassTable) {}
-void ImageWriter::CopyReference(mirror::HeapReference<mirror::Object>* dest,
- ObjPtr<mirror::Object> src) {
- dest->Assign(GetImageAddress(src.Ptr()));
-}
-
-void ImageWriter::CopyReference(mirror::CompressedReference<mirror::Object>* dest,
- ObjPtr<mirror::Object> src) {
+template <bool kCheckNotNull /* = true */>
+void ImageWriter::RecordImageRelocation(const void* dest,
+ size_t oat_index,
+ bool app_to_boot_image /* = false */) {
+ // Check that we're not recording a relocation for null.
+ if (kCheckNotNull) {
+ DCHECK(reinterpret_cast<const uint32_t*>(dest)[0] != 0u);
+ }
+ // Calculate the offset within the image.
+ ImageInfo* image_info = &image_infos_[oat_index];
+ DCHECK(image_info->image_->HasAddress(dest))
+ << "MemMap range " << static_cast<const void*>(image_info->image_->Begin())
+ << "-" << static_cast<const void*>(image_info->image_->End())
+ << " does not contain " << dest;
+ size_t offset = reinterpret_cast<const uint8_t*>(dest) - image_info->image_->Begin();
+ ImageHeader* const image_header = reinterpret_cast<ImageHeader*>(image_info->image_->Begin());
+ size_t image_end = image_header->GetClassTableSection().End();
+ DCHECK_LT(offset, image_end);
+ // Calculate the location index.
+ size_t size = RelocationIndex(image_end, target_ptr_size_);
+ size_t index = RelocationIndex(offset, target_ptr_size_);
+ if (app_to_boot_image) {
+ index += size;
+ }
+ // Mark the location in the bitmap.
+ DCHECK(compile_app_image_ || !app_to_boot_image);
+ MemoryRegion region(image_info->relocation_bitmap_.data(), image_info->relocation_bitmap_.size());
+ BitMemoryRegion bit_region(region, /* bit_offset */ 0u, compile_app_image_ ? 2u * size : size);
+ DCHECK(!bit_region.LoadBit(index));
+ bit_region.StoreBit(index, /* value*/ true);
+}
+
+template <typename DestType>
+void ImageWriter::CopyAndFixupReference(DestType* dest,
+ ObjPtr<mirror::Object> src,
+ size_t oat_index) {
+ static_assert(std::is_same<DestType, mirror::CompressedReference<mirror::Object>>::value ||
+ std::is_same<DestType, mirror::HeapReference<mirror::Object>>::value,
+ "DestType must be a Compressed-/HeapReference<Object>.");
dest->Assign(GetImageAddress(src.Ptr()));
+ if (src != nullptr) {
+ RecordImageRelocation(dest, oat_index, /* app_to_boot_image */ IsInBootImage(src.Ptr()));
+ }
}
-void ImageWriter::CopyAndFixupPointer(void** target, void* value) {
- void* new_value = value;
- if (value != nullptr && !IsInBootImage(value)) {
- auto it = native_object_relocations_.find(value);
- CHECK(it != native_object_relocations_.end()) << value;
- const NativeObjectRelocation& relocation = it->second;
- ImageInfo& image_info = GetImageInfo(relocation.oat_index);
- new_value = reinterpret_cast<void*>(image_info.image_begin_ + relocation.offset);
- }
- if (target_ptr_size_ == PointerSize::k32) {
- *reinterpret_cast<uint32_t*>(target) = PointerToLowMemUInt32(new_value);
+void ImageWriter::CopyAndFixupPointer(void** target,
+ void* value,
+ size_t oat_index,
+ PointerSize pointer_size) {
+ void* new_value = NativeLocationInImage(value);
+ if (pointer_size == PointerSize::k32) {
+ *reinterpret_cast<uint32_t*>(target) = reinterpret_cast32<uint32_t>(new_value);
} else {
- *reinterpret_cast<uint64_t*>(target) = reinterpret_cast<uintptr_t>(new_value);
+ *reinterpret_cast<uint64_t*>(target) = reinterpret_cast64<uint64_t>(new_value);
}
+ DCHECK(value != nullptr);
+ RecordImageRelocation(target, oat_index, /* app_to_boot_image */ IsInBootImage(value));
+}
+
+void ImageWriter::CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CopyAndFixupPointer(target, value, oat_index, target_ptr_size_);
+}
+
+void ImageWriter::CopyAndFixupPointer(
+ void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size) {
+ void** target =
+ reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(object) + offset.Uint32Value());
+ return CopyAndFixupPointer(target, value, oat_index, pointer_size);
+}
+
+void ImageWriter::CopyAndFixupPointer(
+ void* object, MemberOffset offset, void* value, size_t oat_index) {
+ return CopyAndFixupPointer(object, offset, value, oat_index, target_ptr_size_);
}
} // namespace linker
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 9097cc90c6..9ab9c3eb6f 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -369,6 +369,12 @@ class ImageWriter FINAL {
// Class table associated with this image for serialization.
std::unique_ptr<ClassTable> class_table_;
+
+ // Relocations of references/pointers. For boot image, it contains one bit
+ // for each location that can be relocated. For app image, it contains twice
+ // that many bits, first half contains relocations within this image and the
+ // second half contains relocations for references to the boot image.
+ std::vector<uint8_t> relocation_bitmap_;
};
// We use the lock word to store the offset of the object in the image.
@@ -393,15 +399,10 @@ class ImageWriter FINAL {
REQUIRES_SHARED(Locks::mutator_lock_);
BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
- void AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr<mirror::DexCache> dex_cache)
+ void AddDexCacheArrayRelocation(void* array, size_t offset, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
- static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
- }
-
mirror::Object* GetLocalAddress(mirror::Object* object) const
REQUIRES_SHARED(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
@@ -469,21 +470,53 @@ class ImageWriter FINAL {
void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
+ void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupImTable(ImTable* orig, ImTable* copy, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
+ void CopyAndFixupImtConflictTable(ImtConflictTable* orig,
+ ImtConflictTable* copy,
+ size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupClass(mirror::Class* orig, mirror::Class* copy)
+ template <bool kCheckNotNull = true>
+ void RecordImageRelocation(const void* dest, size_t oat_index, bool app_to_boot_image = false);
+ void FixupClass(mirror::Class* orig, mirror::Class* copy, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void FixupObject(mirror::Object* orig, mirror::Object* copy, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename T>
+ void FixupDexCacheArrayEntry(std::atomic<mirror::DexCachePair<T>>* orig_array,
+ std::atomic<mirror::DexCachePair<T>>* new_array,
+ uint32_t array_index,
+ size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ template <typename T>
+ void FixupDexCacheArrayEntry(std::atomic<mirror::NativeDexCachePair<T>>* orig_array,
+ std::atomic<mirror::NativeDexCachePair<T>>* new_array,
+ uint32_t array_index,
+ size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupObject(mirror::Object* orig, mirror::Object* copy)
+ void FixupDexCacheArrayEntry(GcRoot<mirror::CallSite>* orig_array,
+ GcRoot<mirror::CallSite>* new_array,
+ uint32_t array_index,
+ size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
+ template <typename EntryType>
+ void FixupDexCacheArray(mirror::DexCache* orig_dex_cache,
+ mirror::DexCache* copy_dex_cache,
+ size_t oat_index,
+ MemberOffset array_offset,
+ uint32_t size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void FixupDexCache(mirror::DexCache* orig_dex_cache,
+ mirror::DexCache* copy_dex_cache,
+ size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
- Bin array_type)
+ Bin array_type,
+ size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
@@ -531,7 +564,19 @@ class ImageWriter FINAL {
static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
- uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ struct NativeObjectRelocation {
+ size_t oat_index;
+ uintptr_t offset;
+ NativeObjectRelocationType type;
+
+ bool IsArtMethodRelocation() const {
+ return type == NativeObjectRelocationType::kArtMethodClean ||
+ type == NativeObjectRelocationType::kArtMethodDirty ||
+ type == NativeObjectRelocationType::kRuntimeMethod;
+ }
+ };
+
+ NativeObjectRelocation GetNativeRelocation(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Location of where the object will be when the image is loaded at runtime.
template <typename T>
@@ -539,7 +584,7 @@ class ImageWriter FINAL {
// Location of where the temporary copy of the object currently is.
template <typename T>
- T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
+ T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true of obj is inside of the boot image space. This may only return true if we are
// compiling an app image.
@@ -571,13 +616,21 @@ class ImageWriter FINAL {
// Return true if there already exists a native allocation for an object.
bool NativeRelocationAssigned(void* ptr) const;
- void CopyReference(mirror::HeapReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
+ // Copy a reference and record image relocation.
+ template <typename DestType>
+ void CopyAndFixupReference(DestType* dest, ObjPtr<mirror::Object> src, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
- void CopyReference(mirror::CompressedReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
+ // Copy a native pointer and record image relocation.
+ void CopyAndFixupPointer(void** target, void* value, size_t oat_index, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupPointer(void** target, void* value, size_t oat_index)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupPointer(
+ void* object, MemberOffset offset, void* value, size_t oat_index, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupPointer(void* object, MemberOffset offset, void* value, size_t oat_index)
REQUIRES_SHARED(Locks::mutator_lock_);
-
- void CopyAndFixupPointer(void** target, void* value);
const CompilerOptions& compiler_options_;
@@ -611,17 +664,6 @@ class ImageWriter FINAL {
// ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
// have one entry per art field for convenience. ArtFields are placed right after the end of the
// image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
- struct NativeObjectRelocation {
- size_t oat_index;
- uintptr_t offset;
- NativeObjectRelocationType type;
-
- bool IsArtMethodRelocation() const {
- return type == NativeObjectRelocationType::kArtMethodClean ||
- type == NativeObjectRelocationType::kArtMethodDirty ||
- type == NativeObjectRelocationType::kRuntimeMethod;
- }
- };
std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
// Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
@@ -659,7 +701,6 @@ class ImageWriter FINAL {
class FixupRootVisitor;
class FixupVisitor;
class GetRootsVisitor;
- class ImageAddressVisitorForDexCacheArray;
class NativeLocationVisitor;
class PruneClassesVisitor;
class PruneClassLoaderClassesVisitor;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 09a0d376e0..20ae19afd8 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -442,7 +442,6 @@ OatWriter::OatWriter(const CompilerOptions& compiler_options,
size_type_bss_mappings_(0u),
size_string_bss_mappings_(0u),
relative_patcher_(nullptr),
- absolute_patch_locations_(),
profile_compilation_info_(info),
compact_dex_level_(compact_dex_level) {
// If we have a profile, always use at least the default compact dex level. The reason behind
@@ -1330,36 +1329,20 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
method_info_offset += code_offset;
DCHECK_LT(method_info_offset, code_offset);
}
- uint32_t frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- uint32_t core_spill_mask = compiled_method->GetCoreSpillMask();
- uint32_t fp_spill_mask = compiled_method->GetFpSpillMask();
- *method_header = OatQuickMethodHeader(vmap_table_offset,
- method_info_offset,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
- code_size);
+ *method_header = OatQuickMethodHeader(vmap_table_offset, method_info_offset, code_size);
if (!deduped) {
// Update offsets. (Checksum is updated when writing.)
offset_ += sizeof(*method_header); // Method header is prepended before code.
offset_ += code_size;
- // Record absolute patch locations.
- if (!compiled_method->GetPatches().empty()) {
- uintptr_t base_loc = offset_ - code_size - writer_->oat_header_->GetExecutableOffset();
- for (const LinkerPatch& patch : compiled_method->GetPatches()) {
- if (!patch.IsPcRelative()) {
- writer_->absolute_patch_locations_.push_back(base_loc + patch.LiteralOffset());
- }
- }
- }
}
// Exclude quickened dex methods (code_size == 0) since they have no native code.
if (generate_debug_info_ && code_size != 0) {
DCHECK(has_debug_info);
+ const uint8_t* code_info = compiled_method->GetVmapTable().data();
+ DCHECK(code_info != nullptr);
- bool has_code_info = method_header->IsOptimized();
// Record debug information for this function if we are doing that.
debug::MethodDebugInfo& info = writer_->method_info_[debug_info_idx];
DCHECK(info.custom_name.empty());
@@ -1376,8 +1359,8 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
info.is_code_address_text_relative = true;
info.code_address = code_offset - executable_offset_;
info.code_size = code_size;
- info.frame_size_in_bytes = compiled_method->GetFrameSizeInBytes();
- info.code_info = has_code_info ? compiled_method->GetVmapTable().data() : nullptr;
+ info.frame_size_in_bytes = CodeInfo::DecodeFrameInfo(code_info).FrameSizeInBytes();
+ info.code_info = code_info;
info.cfi = compiled_method->GetCFIInfo();
} else {
DCHECK(!has_debug_info);
@@ -1406,10 +1389,7 @@ class OatWriter::LayoutReserveOffsetCodeMethodVisitor : public OrderedMethodVisi
executable_offset_(writer->oat_header_->GetExecutableOffset()),
debuggable_(compiler_options.GetDebuggable()),
native_debuggable_(compiler_options.GetNativeDebuggable()),
- generate_debug_info_(compiler_options.GenerateAnyDebugInfo()) {
- writer->absolute_patch_locations_.reserve(
- writer->GetCompilerDriver()->GetNonRelativeLinkerPatchCount());
- }
+ generate_debug_info_(compiler_options.GenerateAnyDebugInfo()) {}
struct CodeOffsetsKeyComparator {
bool operator()(const CompiledMethod* lhs, const CompiledMethod* rhs) const {
@@ -1570,7 +1550,7 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
// Find origin method. Declaring class and dex_method_idx
// in the copied method should be the same as in the origin
// method.
- mirror::Class* declaring_class = method.GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method.GetDeclaringClass();
ArtMethod* origin = declaring_class->FindClassMethod(
declaring_class->GetDexCache(),
method.GetDexMethodIndex(),
@@ -1868,11 +1848,6 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
target_offset);
break;
}
- case LinkerPatch::Type::kCall: {
- uint32_t target_offset = GetTargetOffset(patch);
- PatchCodeAddress(&patched_code_, literal_offset, target_offset);
- break;
- }
case LinkerPatch::Type::kMethodRelative: {
uint32_t target_offset = GetTargetMethodOffset(GetTargetMethod(patch));
writer_->relative_patcher_->PatchPcRelativeReference(&patched_code_,
@@ -2051,26 +2026,6 @@ class OatWriter::WriteCodeMethodVisitor : public OrderedMethodVisitor {
data[2] = (address >> 16) & 0xffu;
data[3] = (address >> 24) & 0xffu;
}
-
- void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- uint32_t address = target_offset;
- if (writer_->GetCompilerOptions().IsBootImage()) {
- size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
- // TODO: Clean up offset types.
- // The target_offset must be treated as signed for cross-oat patching.
- const void* target = reinterpret_cast<const void*>(
- writer_->image_writer_->GetOatDataBegin(oat_index) +
- static_cast<int32_t>(target_offset));
- address = PointerToLowMemUInt32(target);
- }
- DCHECK_LE(offset + 4, code->size());
- uint8_t* data = &(*code)[offset];
- data[0] = address & 0xffu;
- data[1] = (address >> 8) & 0xffu;
- data[2] = (address >> 16) & 0xffu;
- data[3] = (address >> 24) & 0xffu;
- }
};
class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor {
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 298859bb38..1618810e71 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -533,9 +533,6 @@ class OatWriter {
// The helper for processing relative patches is external so that we can patch across oat files.
MultiOatRelativePatcher* relative_patcher_;
- // The locations of absolute patches relative to the start of the executable section.
- dchecked_vector<uintptr_t> absolute_patch_locations_;
-
// Profile info used to generate new layout of files.
ProfileCompilationInfo* profile_compilation_info_;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index f8370515b5..37d0a3f5ce 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -72,9 +72,6 @@ class OatTest : public CommonCompilerTest {
} else {
const void* quick_oat_code = oat_method.GetQuickCode();
EXPECT_TRUE(quick_oat_code != nullptr) << method->PrettyMethod();
- EXPECT_EQ(oat_method.GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
- EXPECT_EQ(oat_method.GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
- EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask());
uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2);
quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
@@ -475,7 +472,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
// it is time to update OatHeader::kOatVersion
EXPECT_EQ(76U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
- EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
+ EXPECT_EQ(12U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
sizeof(QuickEntryPoints));
}
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index ae58b54863..075771d152 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -87,9 +87,6 @@ class RelativePatcherTest : public CommonCompilerTest {
compiler_driver_.get(),
instruction_set_,
code,
- /* frame_size_in_bytes */ 0u,
- /* core_spill_mask */ 0u,
- /* fp_spill_mask */ 0u,
/* method_info */ ArrayRef<const uint8_t>(),
/* vmap_table */ ArrayRef<const uint8_t>(),
/* cfi_info */ ArrayRef<const uint8_t>(),
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 82610353b4..f8274e2f9a 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1212,18 +1212,19 @@ static void dumpCode(const DexFile* pDexFile, u4 idx, u4 flags,
/*
* Dumps a method.
*/
-static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
- const DexFile::CodeItem* pCode, u4 codeOffset, int i) {
+static void dumpMethod(const ClassAccessor::Method& method, int i) {
// Bail for anything private if export only requested.
+ const uint32_t flags = method.GetRawAccessFlags();
if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) {
return;
}
- const DexFile::MethodId& pMethodId = pDexFile->GetMethodId(idx);
- const char* name = pDexFile->StringDataByIdx(pMethodId.name_idx_);
- const Signature signature = pDexFile->GetMethodSignature(pMethodId);
+ const DexFile& dex_file = method.GetDexFile();
+ const DexFile::MethodId& pMethodId = dex_file.GetMethodId(method.GetIndex());
+ const char* name = dex_file.StringDataByIdx(pMethodId.name_idx_);
+ const Signature signature = dex_file.GetMethodSignature(pMethodId);
char* typeDescriptor = strdup(signature.ToString().c_str());
- const char* backDescriptor = pDexFile->StringByTypeIdx(pMethodId.class_idx_);
+ const char* backDescriptor = dex_file.StringByTypeIdx(pMethodId.class_idx_);
char* accessStr = createAccessFlagStr(flags, kAccessForMethod);
if (gOptions.outputFormat == OUTPUT_PLAIN) {
@@ -1231,11 +1232,15 @@ static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
fprintf(gOutFile, " name : '%s'\n", name);
fprintf(gOutFile, " type : '%s'\n", typeDescriptor);
fprintf(gOutFile, " access : 0x%04x (%s)\n", flags, accessStr);
- if (pCode == nullptr) {
+ if (method.GetCodeItem() == nullptr) {
fprintf(gOutFile, " code : (none)\n");
} else {
fprintf(gOutFile, " code -\n");
- dumpCode(pDexFile, idx, flags, pCode, codeOffset);
+ dumpCode(&dex_file,
+ method.GetIndex(),
+ flags,
+ method.GetCodeItem(),
+ method.GetCodeItemOffset());
}
if (gOptions.disassemble) {
fputc('\n', gOutFile);
@@ -1316,18 +1321,20 @@ static void dumpMethod(const DexFile* pDexFile, u4 idx, u4 flags,
}
/*
- * Dumps a static (class) field.
+ * Dumps a static or instance (class) field.
*/
-static void dumpSField(const DexFile* pDexFile, u4 idx, u4 flags, int i, const u1** data) {
+static void dumpField(const ClassAccessor::Field& field, int i, const u1** data = nullptr) {
// Bail for anything private if export only requested.
+ const uint32_t flags = field.GetRawAccessFlags();
if (gOptions.exportsOnly && (flags & (kAccPublic | kAccProtected)) == 0) {
return;
}
- const DexFile::FieldId& pFieldId = pDexFile->GetFieldId(idx);
- const char* name = pDexFile->StringDataByIdx(pFieldId.name_idx_);
- const char* typeDescriptor = pDexFile->StringByTypeIdx(pFieldId.type_idx_);
- const char* backDescriptor = pDexFile->StringByTypeIdx(pFieldId.class_idx_);
+ const DexFile& dex_file = field.GetDexFile();
+ const DexFile::FieldId& field_id = dex_file.GetFieldId(field.GetIndex());
+ const char* name = dex_file.StringDataByIdx(field_id.name_idx_);
+ const char* typeDescriptor = dex_file.StringByTypeIdx(field_id.type_idx_);
+ const char* backDescriptor = dex_file.StringByTypeIdx(field_id.class_idx_);
char* accessStr = createAccessFlagStr(flags, kAccessForField);
if (gOptions.outputFormat == OUTPUT_PLAIN) {
@@ -1337,7 +1344,7 @@ static void dumpSField(const DexFile* pDexFile, u4 idx, u4 flags, int i, const u
fprintf(gOutFile, " access : 0x%04x (%s)\n", flags, accessStr);
if (data != nullptr) {
fputs(" value : ", gOutFile);
- dumpEncodedValue(pDexFile, data);
+ dumpEncodedValue(&dex_file, data);
fputs("\n", gOutFile);
}
} else if (gOptions.outputFormat == OUTPUT_XML) {
@@ -1353,7 +1360,7 @@ static void dumpSField(const DexFile* pDexFile, u4 idx, u4 flags, int i, const u
fprintf(gOutFile, " visibility=%s\n", quotedVisibility(flags));
if (data != nullptr) {
fputs(" value=\"", gOutFile);
- dumpEncodedValue(pDexFile, data);
+ dumpEncodedValue(&dex_file, data);
fputs("\"\n", gOutFile);
}
fputs(">\n</field>\n", gOutFile);
@@ -1363,41 +1370,16 @@ static void dumpSField(const DexFile* pDexFile, u4 idx, u4 flags, int i, const u
}
/*
- * Dumps an instance field.
+ * Dumping a CFG.
*/
-static void dumpIField(const DexFile* pDexFile, u4 idx, u4 flags, int i) {
- dumpSField(pDexFile, idx, flags, i, nullptr);
-}
-
-/*
- * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
- * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
- * tool, so this is not performance-critical.
- */
-
-static void dumpCfg(const DexFile* dex_file,
- u4 dex_method_idx,
- const DexFile::CodeItem* code_item) {
- if (code_item != nullptr) {
- std::ostringstream oss;
- DumpMethodCFG(dex_file, dex_method_idx, oss);
- fputs(oss.str().c_str(), gOutFile);
- }
-}
-
static void dumpCfg(const DexFile* dex_file, int idx) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
- const u1* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) { // empty class such as a marker interface?
- return;
- }
- ClassDataItemIterator it(*dex_file, class_data);
- it.SkipAllFields();
- while (it.HasNextMethod()) {
- dumpCfg(dex_file,
- it.GetMemberIndex(),
- it.GetMethodCodeItem());
- it.Next();
+ ClassAccessor accessor(*dex_file, dex_file->GetClassDef(idx));
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ if (method.GetCodeItem() != nullptr) {
+ std::ostringstream oss;
+ DumpMethodCFG(method, oss);
+ fputs(oss.str().c_str(), gOutFile);
+ }
}
}
@@ -1512,65 +1494,50 @@ static void dumpClass(const DexFile* pDexFile, int idx, char** pLastPackage) {
}
// Fields and methods.
- const u1* pEncodedData = pDexFile->GetClassData(pClassDef);
- if (pEncodedData == nullptr) {
- if (gOptions.outputFormat == OUTPUT_PLAIN) {
- fprintf(gOutFile, " Static fields -\n");
- fprintf(gOutFile, " Instance fields -\n");
- fprintf(gOutFile, " Direct methods -\n");
- fprintf(gOutFile, " Virtual methods -\n");
- }
- } else {
- ClassDataItemIterator pClassData(*pDexFile, pEncodedData);
+ ClassAccessor accessor(*pDexFile, pClassDef);
- // Prepare data for static fields.
- const u1* sData = pDexFile->GetEncodedStaticFieldValuesArray(pClassDef);
- const u4 sSize = sData != nullptr ? DecodeUnsignedLeb128(&sData) : 0;
+ // Prepare data for static fields.
+ const u1* sData = pDexFile->GetEncodedStaticFieldValuesArray(pClassDef);
+ const u4 sSize = sData != nullptr ? DecodeUnsignedLeb128(&sData) : 0;
- // Static fields.
- if (gOptions.outputFormat == OUTPUT_PLAIN) {
- fprintf(gOutFile, " Static fields -\n");
- }
- for (u4 i = 0; pClassData.HasNextStaticField(); i++, pClassData.Next()) {
- dumpSField(pDexFile,
- pClassData.GetMemberIndex(),
- pClassData.GetRawMemberAccessFlags(),
- i,
- i < sSize ? &sData : nullptr);
- } // for
+ // Static fields.
+ if (gOptions.outputFormat == OUTPUT_PLAIN) {
+ fprintf(gOutFile, " Static fields -\n");
+ }
+ uint32_t i = 0u;
+ for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
+ dumpField(field, i, i < sSize ? &sData : nullptr);
+ ++i;
+ }
- // Instance fields.
- if (gOptions.outputFormat == OUTPUT_PLAIN) {
- fprintf(gOutFile, " Instance fields -\n");
- }
- for (u4 i = 0; pClassData.HasNextInstanceField(); i++, pClassData.Next()) {
- dumpIField(pDexFile,
- pClassData.GetMemberIndex(),
- pClassData.GetRawMemberAccessFlags(),
- i);
- } // for
+ // Instance fields.
+ if (gOptions.outputFormat == OUTPUT_PLAIN) {
+ fprintf(gOutFile, " Instance fields -\n");
+ }
+ i = 0u;
+ for (const ClassAccessor::Field& field : accessor.GetInstanceFields()) {
+ dumpField(field, i);
+ ++i;
+ }
- // Direct methods.
- if (gOptions.outputFormat == OUTPUT_PLAIN) {
- fprintf(gOutFile, " Direct methods -\n");
- }
- for (int i = 0; pClassData.HasNextDirectMethod(); i++, pClassData.Next()) {
- dumpMethod(pDexFile, pClassData.GetMemberIndex(),
- pClassData.GetRawMemberAccessFlags(),
- pClassData.GetMethodCodeItem(),
- pClassData.GetMethodCodeItemOffset(), i);
- } // for
+ // Direct methods.
+ if (gOptions.outputFormat == OUTPUT_PLAIN) {
+ fprintf(gOutFile, " Direct methods -\n");
+ }
+ i = 0u;
+ for (const ClassAccessor::Method& method : accessor.GetDirectMethods()) {
+ dumpMethod(method, i);
+ ++i;
+ }
- // Virtual methods.
- if (gOptions.outputFormat == OUTPUT_PLAIN) {
- fprintf(gOutFile, " Virtual methods -\n");
- }
- for (int i = 0; pClassData.HasNextVirtualMethod(); i++, pClassData.Next()) {
- dumpMethod(pDexFile, pClassData.GetMemberIndex(),
- pClassData.GetRawMemberAccessFlags(),
- pClassData.GetMethodCodeItem(),
- pClassData.GetMethodCodeItemOffset(), i);
- } // for
+ // Virtual methods.
+ if (gOptions.outputFormat == OUTPUT_PLAIN) {
+ fprintf(gOutFile, " Virtual methods -\n");
+ }
+ i = 0u;
+ for (const ClassAccessor::Method& method : accessor.GetVirtualMethods()) {
+ dumpMethod(method, i);
+ ++i;
}
// End of class.
@@ -1884,6 +1851,7 @@ int processFile(const char* fileName) {
return -1;
}
const DexFileLoader dex_file_loader;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!dex_file_loader.OpenAll(reinterpret_cast<const uint8_t*>(content.data()),
@@ -1891,6 +1859,7 @@ int processFile(const char* fileName) {
fileName,
kVerify,
kVerifyChecksum,
+ &error_code,
&error_msg,
&dex_files)) {
// Display returned error message to user. Note that this error behavior
diff --git a/dexdump/dexdump_cfg.cc b/dexdump/dexdump_cfg.cc
index 7e534ed359..7a0eb0e088 100644
--- a/dexdump/dexdump_cfg.cc
+++ b/dexdump/dexdump_cfg.cc
@@ -25,6 +25,7 @@
#include <set>
#include <sstream>
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_exception_helpers.h"
@@ -32,15 +33,12 @@
namespace art {
-static void dumpMethodCFGImpl(const DexFile* dex_file,
- uint32_t dex_method_idx,
- const DexFile::CodeItem* code_item,
- std::ostream& os) {
+void DumpMethodCFG(const ClassAccessor::Method& method, std::ostream& os) {
+ const DexFile* dex_file = &method.GetDexFile();
os << "digraph {\n";
- os << " # /* " << dex_file->PrettyMethod(dex_method_idx, true) << " */\n";
-
- CodeItemDataAccessor accessor(*dex_file, code_item);
+ os << " # /* " << dex_file->PrettyMethod(method.GetIndex(), true) << " */\n";
+ CodeItemDataAccessor accessor(method.GetInstructionsAndData());
std::set<uint32_t> dex_pc_is_branch_target;
{
// Go and populate.
@@ -353,42 +351,5 @@ static void dumpMethodCFGImpl(const DexFile* dex_file,
os << "}\n";
}
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os) {
- // This is painful, we need to find the code item. That means finding the class, and then
- // iterating the table.
- if (dex_method_idx >= dex_file->NumMethodIds()) {
- os << "Could not find method-idx.";
- return;
- }
- const DexFile::MethodId& method_id = dex_file->GetMethodId(dex_method_idx);
-
- const DexFile::ClassDef* class_def = dex_file->FindClassDef(method_id.class_idx_);
- if (class_def == nullptr) {
- os << "Could not find class-def.";
- return;
- }
-
- const uint8_t* class_data = dex_file->GetClassData(*class_def);
- if (class_data == nullptr) {
- os << "No class data.";
- return;
- }
-
- ClassDataItemIterator it(*dex_file, class_data);
- it.SkipAllFields();
-
- // Find method, and dump it.
- while (it.HasNextMethod()) {
- uint32_t method_idx = it.GetMemberIndex();
- if (method_idx == dex_method_idx) {
- dumpMethodCFGImpl(dex_file, dex_method_idx, it.GetMethodCodeItem(), os);
- return;
- }
- it.Next();
- }
-
- // Otherwise complain.
- os << "Something went wrong, didn't find the method in the class data.";
-}
} // namespace art
diff --git a/dexdump/dexdump_cfg.h b/dexdump/dexdump_cfg.h
index 64e5f9af60..564eef6e68 100644
--- a/dexdump/dexdump_cfg.h
+++ b/dexdump/dexdump_cfg.h
@@ -20,11 +20,11 @@
#include <inttypes.h>
#include <ostream>
-namespace art {
+#include "dex/class_accessor.h"
-class DexFile;
+namespace art {
-void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
+void DumpMethodCFG(const ClassAccessor::Method& method, std::ostream& os);
} // namespace art
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index 6a50258570..e7eaf30b7c 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -30,6 +30,7 @@
#include <android-base/file.h>
#include <android-base/logging.h>
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
@@ -142,27 +143,21 @@ static void dumpMethod(const DexFile* pDexFile,
* Runs through all direct and virtual methods in the class.
*/
void dumpClass(const DexFile* pDexFile, u4 idx) {
- const DexFile::ClassDef& pClassDef = pDexFile->GetClassDef(idx);
+ const DexFile::ClassDef& class_def = pDexFile->GetClassDef(idx);
- const char* fileName;
- if (!pClassDef.source_file_idx_.IsValid()) {
- fileName = nullptr;
- } else {
- fileName = pDexFile->StringDataByIdx(pClassDef.source_file_idx_);
+ const char* fileName = nullptr;
+ if (class_def.source_file_idx_.IsValid()) {
+ fileName = pDexFile->StringDataByIdx(class_def.source_file_idx_);
}
- const u1* pEncodedData = pDexFile->GetClassData(pClassDef);
- if (pEncodedData != nullptr) {
- ClassDataItemIterator pClassData(*pDexFile, pEncodedData);
- pClassData.SkipAllFields();
- // Direct and virtual methods.
- for (; pClassData.HasNextMethod(); pClassData.Next()) {
- dumpMethod(pDexFile, fileName,
- pClassData.GetMemberIndex(),
- pClassData.GetRawMemberAccessFlags(),
- pClassData.GetMethodCodeItem(),
- pClassData.GetMethodCodeItemOffset());
- }
+ ClassAccessor accessor(*pDexFile, class_def);
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ dumpMethod(pDexFile,
+ fileName,
+ method.GetIndex(),
+ method.GetRawAccessFlags(),
+ method.GetCodeItem(),
+ method.GetCodeItemOffset());
}
}
@@ -180,6 +175,7 @@ static int processFile(const char* fileName) {
return -1;
}
std::vector<std::unique_ptr<const DexFile>> dex_files;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
const DexFileLoader dex_file_loader;
if (!dex_file_loader.OpenAll(reinterpret_cast<const uint8_t*>(content.data()),
@@ -187,6 +183,7 @@ static int processFile(const char* fileName) {
fileName,
/*verify*/ true,
kVerifyChecksum,
+ &error_code,
&error_msg,
&dex_files)) {
LOG(ERROR) << error_msg;
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 24ee0892dc..f54c55153a 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -167,14 +167,15 @@ static std::vector<std::pair<V, K>> SortByValueDesc(
// Fixup a remote pointer that we read from a foreign boot.art to point to our own memory.
// Returned pointer will point to inside of remote_contents.
template <typename T>
-static T* FixUpRemotePointer(T* remote_ptr,
- std::vector<uint8_t>& remote_contents,
- const backtrace_map_t& boot_map) {
+static ObjPtr<T> FixUpRemotePointer(ObjPtr<T> remote_ptr,
+ std::vector<uint8_t>& remote_contents,
+ const backtrace_map_t& boot_map)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (remote_ptr == nullptr) {
return nullptr;
}
- uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr);
+ uintptr_t remote = reinterpret_cast<uintptr_t>(remote_ptr.Ptr());
// In the case the remote pointer is out of range, it probably belongs to another image.
// Just return null for this case.
@@ -188,14 +189,15 @@ static T* FixUpRemotePointer(T* remote_ptr,
}
template <typename T>
-static T* RemoteContentsPointerToLocal(T* remote_ptr,
- std::vector<uint8_t>& remote_contents,
- const ImageHeader& image_header) {
+static ObjPtr<T> RemoteContentsPointerToLocal(ObjPtr<T> remote_ptr,
+ std::vector<uint8_t>& remote_contents,
+ const ImageHeader& image_header)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (remote_ptr == nullptr) {
return nullptr;
}
- uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr);
+ uint8_t* remote = reinterpret_cast<uint8_t*>(remote_ptr.Ptr());
ptrdiff_t boot_offset = remote - &remote_contents[0];
const uint8_t* local_ptr = reinterpret_cast<const uint8_t*>(&image_header) + boot_offset;
@@ -534,9 +536,10 @@ class RegionSpecializedBase<mirror::Object> : public RegionCommon<mirror::Object
os_ << " field contents:\n";
for (mirror::Object* object : class_data.dirty_objects) {
// remote class object
- auto remote_klass = reinterpret_cast<mirror::Class*>(object);
+ ObjPtr<mirror::Class> remote_klass =
+ ObjPtr<mirror::Class>::DownCast<mirror::Object>(object);
// local class object
- auto local_klass =
+ ObjPtr<mirror::Class> local_klass =
RemoteContentsPointerToLocal(remote_klass,
*RegionCommon<mirror::Object>::remote_contents_,
RegionCommon<mirror::Object>::image_header_);
@@ -797,12 +800,12 @@ class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
// remote method
auto art_method = reinterpret_cast<ArtMethod*>(method);
// remote class
- mirror::Class* remote_declaring_class =
+ ObjPtr<mirror::Class> remote_declaring_class =
FixUpRemotePointer(art_method->GetDeclaringClass(),
*RegionCommon<ArtMethod>::remote_contents_,
RegionCommon<ArtMethod>::boot_map_);
// local class
- mirror::Class* declaring_class =
+ ObjPtr<mirror::Class> declaring_class =
RemoteContentsPointerToLocal(remote_declaring_class,
*RegionCommon<ArtMethod>::remote_contents_,
RegionCommon<ArtMethod>::image_header_);
@@ -815,7 +818,7 @@ class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
os_ << " field contents:\n";
for (ArtMethod* method : false_dirty_entries_) {
// local class
- mirror::Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
DumpOneArtMethod(method, declaring_class, nullptr);
}
}
@@ -905,8 +908,8 @@ class RegionSpecializedBase<ArtMethod> : public RegionCommon<ArtMethod> {
}
void DumpOneArtMethod(ArtMethod* art_method,
- mirror::Class* declaring_class,
- mirror::Class* remote_declaring_class)
+ ObjPtr<mirror::Class> declaring_class,
+ ObjPtr<mirror::Class> remote_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(Runtime::Current()->GetInstructionSet());
os_ << " " << reinterpret_cast<const void*>(art_method) << " ";
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index ee477215e7..1c7614b695 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -93,6 +93,7 @@ class BitTableBase {
}
ALWAYS_INLINE uint32_t Get(uint32_t row, uint32_t column = 0) const {
+ DCHECK_NE(header_bit_size_, 0u) << "Table has not been loaded";
DCHECK_LT(row, num_rows_);
DCHECK_LT(column, kNumColumns);
size_t offset = row * NumRowBits() + column_offset_[column];
@@ -100,6 +101,7 @@ class BitTableBase {
}
ALWAYS_INLINE BitMemoryRegion GetBitMemoryRegion(uint32_t row, uint32_t column = 0) const {
+ DCHECK_NE(header_bit_size_, 0u) << "Table has not been loaded";
DCHECK_LT(row, num_rows_);
DCHECK_LT(column, kNumColumns);
size_t offset = row * NumRowBits() + column_offset_[column];
diff --git a/libartbase/base/casts.h b/libartbase/base/casts.h
index cbd5b67831..76ff67948b 100644
--- a/libartbase/base/casts.h
+++ b/libartbase/base/casts.h
@@ -165,6 +165,29 @@ inline Dest reinterpret_cast64(Source* ptr) {
return static_cast<Dest>(reinterpret_cast<uintptr_t>(ptr));
}
+// A version of reinterpret_cast<>() between pointers and int32_t/uint32_t that enforces
+// zero-extension and checks that the values are converted without loss of precision.
+
+template <typename Dest, typename Source>
+inline Dest reinterpret_cast32(Source source) {
+ // This is the overload for casting from int32_t/uint32_t to a pointer.
+ static_assert(std::is_same<Source, int32_t>::value || std::is_same<Source, uint32_t>::value,
+ "Source must be int32_t or uint32_t.");
+ static_assert(std::is_pointer<Dest>::value, "Dest must be a pointer.");
+ // Check that we don't lose any non-0 bits here.
+ static_assert(sizeof(uintptr_t) >= sizeof(Source), "Expecting at least 32-bit pointers.");
+ return reinterpret_cast<Dest>(static_cast<uintptr_t>(static_cast<uint32_t>(source)));
+}
+
+template <typename Dest, typename Source>
+inline Dest reinterpret_cast32(Source* ptr) {
+ // This is the overload for casting from a pointer to int32_t/uint32_t.
+ static_assert(std::is_same<Dest, int32_t>::value || std::is_same<Dest, uint32_t>::value,
+ "Dest must be int32_t or uint32_t.");
+ static_assert(sizeof(uintptr_t) >= sizeof(Dest), "Expecting at least 32-bit pointers.");
+ return static_cast<Dest>(dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr)));
+}
+
} // namespace art
#endif // ART_LIBARTBASE_BASE_CASTS_H_
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 67413eb85c..e24b073142 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -24,6 +24,7 @@
#include "nativehelper/scoped_local_ref.h"
#include "android-base/stringprintf.h"
+#include "android-base/unique_fd.h"
#include <unicode/uvernum.h>
#include "art_field-inl.h"
@@ -423,4 +424,83 @@ std::string CommonArtTestImpl::CreateClassPathWithChecksums(
return classpath;
}
+CommonArtTestImpl::ForkAndExecResult CommonArtTestImpl::ForkAndExec(
+ const std::vector<std::string>& argv,
+ const PostForkFn& post_fork,
+ const OutputHandlerFn& handler) {
+ ForkAndExecResult result;
+ result.status_code = 0;
+ result.stage = ForkAndExecResult::kLink;
+
+ std::vector<const char*> c_args;
+ for (const std::string& str : argv) {
+ c_args.push_back(str.c_str());
+ }
+ c_args.push_back(nullptr);
+
+ android::base::unique_fd link[2];
+ {
+ int link_fd[2];
+
+ if (pipe(link_fd) == -1) {
+ return result;
+ }
+ link[0].reset(link_fd[0]);
+ link[1].reset(link_fd[1]);
+ }
+
+ result.stage = ForkAndExecResult::kFork;
+
+ pid_t pid = fork();
+ if (pid == -1) {
+ return result;
+ }
+
+ if (pid == 0) {
+ if (!post_fork()) {
+ LOG(ERROR) << "Failed post-fork function";
+ exit(1);
+ UNREACHABLE();
+ }
+
+ // Redirect stdout and stderr.
+ dup2(link[1].get(), STDOUT_FILENO);
+ dup2(link[1].get(), STDERR_FILENO);
+
+ link[0].reset();
+ link[1].reset();
+
+ execv(c_args[0], const_cast<char* const*>(c_args.data()));
+ exit(1);
+ UNREACHABLE();
+ }
+
+ result.stage = ForkAndExecResult::kWaitpid;
+ link[1].reset();
+
+ char buffer[128] = { 0 };
+ ssize_t bytes_read = 0;
+ while (TEMP_FAILURE_RETRY(bytes_read = read(link[0].get(), buffer, 128)) > 0) {
+ handler(buffer, bytes_read);
+ }
+ handler(buffer, 0u); // End with a virtual write of zero length to simplify clients.
+
+ link[0].reset();
+
+ if (waitpid(pid, &result.status_code, 0) == -1) {
+ return result;
+ }
+
+ result.stage = ForkAndExecResult::kFinished;
+ return result;
+}
+
+CommonArtTestImpl::ForkAndExecResult CommonArtTestImpl::ForkAndExec(
+ const std::vector<std::string>& argv, const PostForkFn& post_fork, std::string* output) {
+ auto string_collect_fn = [output](char* buf, size_t len) {
+ *output += std::string(buf, len);
+ };
+ return ForkAndExec(argv, post_fork, string_collect_fn);
+}
+
} // namespace art
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 0ace09de1a..62834c7d35 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -19,8 +19,11 @@
#include <gtest/gtest.h>
+#include <functional>
#include <string>
+#include <sys/wait.h>
+
#include <android-base/logging.h>
#include "base/globals.h"
@@ -125,6 +128,29 @@ class CommonArtTestImpl {
return true;
}
+ struct ForkAndExecResult {
+ enum Stage {
+ kLink,
+ kFork,
+ kWaitpid,
+ kFinished,
+ };
+ Stage stage;
+ int status_code;
+
+ bool StandardSuccess() {
+ return stage == kFinished && WIFEXITED(status_code) && WEXITSTATUS(status_code) == 0;
+ }
+ };
+ using OutputHandlerFn = std::function<void(char*, size_t)>;
+ using PostForkFn = std::function<bool()>;
+ static ForkAndExecResult ForkAndExec(const std::vector<std::string>& argv,
+ const PostForkFn& post_fork,
+ const OutputHandlerFn& handler);
+ static ForkAndExecResult ForkAndExec(const std::vector<std::string>& argv,
+ const PostForkFn& post_fork,
+ std::string* output);
+
protected:
static bool IsHost() {
return !kIsTargetBuild;
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index 56934aca1f..a63f326d5a 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -60,7 +60,6 @@
namespace art {
-using android::base::StringAppendF;
using android::base::StringPrintf;
static constexpr const char* kClassesDex = "classes.dex";
diff --git a/libartbase/base/safe_map.h b/libartbase/base/safe_map.h
index e08394ea86..a4d845996d 100644
--- a/libartbase/base/safe_map.h
+++ b/libartbase/base/safe_map.h
@@ -129,7 +129,7 @@ class SafeMap {
}
template <typename CreateFn>
- V GetOrCreate(const K& k, CreateFn create) {
+ V& GetOrCreate(const K& k, CreateFn create) {
static_assert(std::is_same<V, typename std::result_of<CreateFn()>::type>::value,
"Argument `create` should return a value of type V.");
auto lb = lower_bound(k);
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index b7a542fbbe..761c6113d6 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -45,7 +45,6 @@
namespace art {
using android::base::ReadFileToString;
-using android::base::StringAppendF;
using android::base::StringPrintf;
pid_t GetTid() {
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index 6e3b78e12c..ba61e1b0a3 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -239,7 +239,6 @@ template <typename Func, typename... Args>
static inline void CheckedCall(const Func& function, const char* what, Args... args) {
int rc = function(args...);
if (UNLIKELY(rc != 0)) {
- errno = rc;
PLOG(FATAL) << "Checked call failed for " << what;
}
}
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 392ce1e7f5..cc7d7aae34 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -352,17 +352,17 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
bool verify,
bool verify_checksum,
std::string* error_msg,
- ZipOpenErrorCode* error_code) const {
+ DexFileLoaderErrorCode* error_code) const {
ScopedTrace trace("Dex file open from Zip Archive " + std::string(location));
CHECK(!location.empty());
std::unique_ptr<ZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
if (zip_entry == nullptr) {
- *error_code = ZipOpenErrorCode::kEntryNotFound;
+ *error_code = DexFileLoaderErrorCode::kEntryNotFound;
return nullptr;
}
if (zip_entry->GetUncompressedLength() == 0) {
*error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
+ *error_code = DexFileLoaderErrorCode::kDexFileError;
return nullptr;
}
@@ -394,7 +394,7 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
if (map == nullptr) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
- *error_code = ZipOpenErrorCode::kExtractToMemoryError;
+ *error_code = DexFileLoaderErrorCode::kExtractToMemoryError;
return nullptr;
}
VerifyResult verify_result;
@@ -417,23 +417,23 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip(
}
if (dex_file == nullptr) {
if (verify_result == VerifyResult::kVerifyNotAttempted) {
- *error_code = ZipOpenErrorCode::kDexFileError;
+ *error_code = DexFileLoaderErrorCode::kDexFileError;
} else {
- *error_code = ZipOpenErrorCode::kVerifyError;
+ *error_code = DexFileLoaderErrorCode::kVerifyError;
}
return nullptr;
}
if (!dex_file->DisableWrite()) {
*error_msg = StringPrintf("Failed to make dex file '%s' read only", location.c_str());
- *error_code = ZipOpenErrorCode::kMakeReadOnlyError;
+ *error_code = DexFileLoaderErrorCode::kMakeReadOnlyError;
return nullptr;
}
CHECK(dex_file->IsReadOnly()) << location;
if (verify_result != VerifyResult::kVerifySucceeded) {
- *error_code = ZipOpenErrorCode::kVerifyError;
+ *error_code = DexFileLoaderErrorCode::kVerifyError;
return nullptr;
}
- *error_code = ZipOpenErrorCode::kNoError;
+ *error_code = DexFileLoaderErrorCode::kNoError;
return dex_file;
}
@@ -452,7 +452,7 @@ bool ArtDexFileLoader::OpenAllDexFilesFromZip(
std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
ScopedTrace trace("Dex file open from Zip " + std::string(location));
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
- ZipOpenErrorCode error_code;
+ DexFileLoaderErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
kClassesDex,
location,
@@ -482,7 +482,7 @@ bool ArtDexFileLoader::OpenAllDexFilesFromZip(
error_msg,
&error_code));
if (next_dex_file.get() == nullptr) {
- if (error_code != ZipOpenErrorCode::kEntryNotFound) {
+ if (error_code != DexFileLoaderErrorCode::kEntryNotFound) {
LOG(WARNING) << "Zip open failed: " << *error_msg;
}
break;
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index a460aee60f..da2620f587 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -119,7 +119,7 @@ class ArtDexFileLoader : public DexFileLoader {
bool verify,
bool verify_checksum,
std::string* error_msg,
- ZipOpenErrorCode* error_code) const;
+ DexFileLoaderErrorCode* error_code) const;
static std::unique_ptr<DexFile> OpenCommon(const uint8_t* base,
size_t size,
diff --git a/libdexfile/dex/art_dex_file_loader_test.cc b/libdexfile/dex/art_dex_file_loader_test.cc
index 5f3fc0266f..3f311b7451 100644
--- a/libdexfile/dex/art_dex_file_loader_test.cc
+++ b/libdexfile/dex/art_dex_file_loader_test.cc
@@ -28,6 +28,7 @@
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "dex/base64_test_util.h"
+#include "dex/class_accessor-inl.h"
#include "dex/code_item_accessors-inl.h"
#include "dex/descriptors_names.h"
#include "dex/dex_file.h"
@@ -124,16 +125,15 @@ TEST_F(ArtDexFileLoaderTest, GetMethodSignature) {
const DexFile::ClassDef& class_def = raw->GetClassDef(0);
ASSERT_STREQ("LGetMethodSignature;", raw->GetClassDescriptor(class_def));
- const uint8_t* class_data = raw->GetClassData(class_def);
- ASSERT_TRUE(class_data != nullptr);
- ClassDataItemIterator it(*raw, class_data);
-
- EXPECT_EQ(1u, it.NumDirectMethods());
+ ClassAccessor accessor(*raw, class_def);
+ ASSERT_TRUE(accessor.HasClassData());
+ auto methods = accessor.GetMethods();
+ auto cur_method = methods.begin();
// Check the signature for the static initializer.
{
- ASSERT_EQ(1U, it.NumDirectMethods());
- const DexFile::MethodId& method_id = raw->GetMethodId(it.GetMemberIndex());
+ ASSERT_EQ(1U, accessor.NumDirectMethods());
+ const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ("<init>", name);
std::string signature(raw->GetMethodSignature(method_id).ToString());
@@ -203,10 +203,11 @@ TEST_F(ArtDexFileLoaderTest, GetMethodSignature) {
"java.lang.Object[][] GetMethodSignature.mB()"
},
};
- ASSERT_EQ(arraysize(results), it.NumVirtualMethods());
+ ASSERT_EQ(arraysize(results), accessor.NumVirtualMethods());
for (const Result& r : results) {
- it.Next();
- const DexFile::MethodId& method_id = raw->GetMethodId(it.GetMemberIndex());
+ ++cur_method;
+ ASSERT_TRUE(cur_method != methods.end());
+ const DexFile::MethodId& method_id = raw->GetMethodId(cur_method->GetIndex());
const char* name = raw->StringDataByIdx(method_id.name_idx_);
ASSERT_STREQ(r.name, name);
@@ -215,8 +216,10 @@ TEST_F(ArtDexFileLoaderTest, GetMethodSignature) {
ASSERT_EQ(r.signature, signature);
std::string plain_method = std::string("GetMethodSignature.") + r.name;
- ASSERT_EQ(plain_method, raw->PrettyMethod(it.GetMemberIndex(), /* with_signature */ false));
- ASSERT_EQ(r.pretty_method, raw->PrettyMethod(it.GetMemberIndex(), /* with_signature */ true));
+ ASSERT_EQ(plain_method,
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ false));
+ ASSERT_EQ(r.pretty_method,
+ raw->PrettyMethod(cur_method->GetIndex(), /* with_signature */ true));
}
}
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index 0d87f93d60..5579be21e5 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -34,12 +34,17 @@ class ClassAccessor {
private:
class BaseItem {
public:
- explicit BaseItem(const uint8_t* ptr_pos) : ptr_pos_(ptr_pos) {}
+ explicit BaseItem(const DexFile& dex_file,
+ const uint8_t* ptr_pos) : dex_file_(dex_file), ptr_pos_(ptr_pos) {}
uint32_t GetIndex() const {
return index_;
}
+ uint32_t GetRawAccessFlags() const {
+ return access_flags_;
+ }
+
uint32_t GetAccessFlags() const {
return HiddenApiAccessFlags::RemoveFromDex(access_flags_);
}
@@ -52,8 +57,13 @@ class ClassAccessor {
return (GetAccessFlags() & kAccFinal) != 0;
}
+ const DexFile& GetDexFile() const {
+ return dex_file_;
+ }
+
protected:
// Internal data pointer for reading.
+ const DexFile& dex_file_;
const uint8_t* ptr_pos_ = nullptr;
uint32_t index_ = 0u;
uint32_t access_flags_ = 0u;
@@ -93,9 +103,7 @@ class ClassAccessor {
explicit Method(const DexFile& dex_file,
const uint8_t* ptr_pos,
bool is_static_or_direct = true)
- : BaseItem(ptr_pos),
- dex_file_(dex_file),
- is_static_or_direct_(is_static_or_direct) {}
+ : BaseItem(dex_file, ptr_pos), is_static_or_direct_(is_static_or_direct) {}
void Read();
@@ -121,7 +129,6 @@ class ClassAccessor {
index_ = 0u;
}
- const DexFile& dex_file_;
bool is_static_or_direct_ = true;
uint32_t code_off_ = 0u;
@@ -132,11 +139,7 @@ class ClassAccessor {
class Field : public BaseItem {
public:
explicit Field(const DexFile& dex_file,
- const uint8_t* ptr_pos) : BaseItem(ptr_pos), dex_file_(dex_file) {}
-
- const DexFile& GetDexFile() const {
- return dex_file_;
- }
+ const uint8_t* ptr_pos) : BaseItem(dex_file, ptr_pos) {}
bool IsStatic() const {
return is_static_;
@@ -154,7 +157,6 @@ class ClassAccessor {
is_static_ = false;
}
- const DexFile& dex_file_;
bool is_static_ = true;
friend class ClassAccessor;
};
diff --git a/libdexfile/dex/descriptors_names.cc b/libdexfile/dex/descriptors_names.cc
index e338b55a9f..206f7a09bb 100644
--- a/libdexfile/dex/descriptors_names.cc
+++ b/libdexfile/dex/descriptors_names.cc
@@ -24,7 +24,6 @@
namespace art {
using android::base::StringAppendF;
-using android::base::StringPrintf;
void AppendPrettyDescriptor(const char* descriptor, std::string* result) {
// Count the number of '['s to get the dimensionality.
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 457addf114..6d9ca4aafa 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -269,6 +269,7 @@ bool DexFileLoader::OpenAll(
const std::string& location,
bool verify,
bool verify_checksum,
+ DexFileLoaderErrorCode* error_code,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
DCHECK(dex_files != nullptr) << "DexFile::Open: out-param is nullptr";
@@ -283,6 +284,7 @@ bool DexFileLoader::OpenAll(
location,
verify,
verify_checksum,
+ error_code,
error_msg,
dex_files);
}
@@ -387,17 +389,17 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
const std::string& location,
bool verify,
bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) const {
+ DexFileLoaderErrorCode* error_code,
+ std::string* error_msg) const {
CHECK(!location.empty());
std::unique_ptr<DexZipEntry> zip_entry(zip_archive.Find(entry_name, error_msg));
if (zip_entry == nullptr) {
- *error_code = ZipOpenErrorCode::kEntryNotFound;
+ *error_code = DexFileLoaderErrorCode::kEntryNotFound;
return nullptr;
}
if (zip_entry->GetUncompressedLength() == 0) {
*error_msg = StringPrintf("Dex file '%s' has zero length", location.c_str());
- *error_code = ZipOpenErrorCode::kDexFileError;
+ *error_code = DexFileLoaderErrorCode::kDexFileError;
return nullptr;
}
@@ -405,7 +407,7 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
if (map.size() == 0) {
*error_msg = StringPrintf("Failed to extract '%s' from '%s': %s", entry_name, location.c_str(),
error_msg->c_str());
- *error_code = ZipOpenErrorCode::kExtractToMemoryError;
+ *error_code = DexFileLoaderErrorCode::kExtractToMemoryError;
return nullptr;
}
VerifyResult verify_result;
@@ -422,19 +424,15 @@ std::unique_ptr<const DexFile> DexFileLoader::OpenOneDexFileFromZip(
error_msg,
std::make_unique<VectorContainer>(std::move(map)),
&verify_result);
- if (dex_file == nullptr) {
+ if (verify_result != VerifyResult::kVerifySucceeded) {
if (verify_result == VerifyResult::kVerifyNotAttempted) {
- *error_code = ZipOpenErrorCode::kDexFileError;
+ *error_code = DexFileLoaderErrorCode::kDexFileError;
} else {
- *error_code = ZipOpenErrorCode::kVerifyError;
+ *error_code = DexFileLoaderErrorCode::kVerifyError;
}
return nullptr;
}
- if (verify_result != VerifyResult::kVerifySucceeded) {
- *error_code = ZipOpenErrorCode::kVerifyError;
- return nullptr;
- }
- *error_code = ZipOpenErrorCode::kNoError;
+ *error_code = DexFileLoaderErrorCode::kNoError;
return dex_file;
}
@@ -449,18 +447,18 @@ bool DexFileLoader::OpenAllDexFilesFromZip(
const std::string& location,
bool verify,
bool verify_checksum,
+ DexFileLoaderErrorCode* error_code,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
DCHECK(dex_files != nullptr) << "DexFile::OpenFromZip: out-param is nullptr";
- ZipOpenErrorCode error_code;
std::unique_ptr<const DexFile> dex_file(OpenOneDexFileFromZip(zip_archive,
kClassesDex,
location,
verify,
verify_checksum,
- error_msg,
- &error_code));
- if (dex_file.get() == nullptr) {
+ error_code,
+ error_msg));
+ if (*error_code != DexFileLoaderErrorCode::kNoError) {
return false;
} else {
// Had at least classes.dex.
@@ -479,10 +477,10 @@ bool DexFileLoader::OpenAllDexFilesFromZip(
fake_location,
verify,
verify_checksum,
- error_msg,
- &error_code));
+ error_code,
+ error_msg));
if (next_dex_file.get() == nullptr) {
- if (error_code != ZipOpenErrorCode::kEntryNotFound) {
+ if (*error_code != DexFileLoaderErrorCode::kEntryNotFound) {
LOG(WARNING) << "Zip open failed: " << *error_msg;
}
break;
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 01532203eb..8fc836e0f5 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -31,6 +31,15 @@ class OatDexFile;
class DexZipArchive;
+enum class DexFileLoaderErrorCode {
+ kNoError,
+ kEntryNotFound,
+ kExtractToMemoryError,
+ kDexFileError,
+ kMakeReadOnlyError,
+ kVerifyError
+};
+
// Class that is used to open dex files and deal with corresponding multidex and location logic.
class DexFileLoader {
public:
@@ -142,19 +151,11 @@ class DexFileLoader {
const std::string& location,
bool verify,
bool verify_checksum,
+ DexFileLoaderErrorCode* error_code,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
protected:
- enum class ZipOpenErrorCode {
- kNoError,
- kEntryNotFound,
- kExtractToMemoryError,
- kDexFileError,
- kMakeReadOnlyError,
- kVerifyError
- };
-
enum class VerifyResult { // private
kVerifyNotAttempted,
kVerifySucceeded,
@@ -180,6 +181,7 @@ class DexFileLoader {
const std::string& location,
bool verify,
bool verify_checksum,
+ DexFileLoaderErrorCode* error_code,
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files) const;
@@ -190,8 +192,8 @@ class DexFileLoader {
const std::string& location,
bool verify,
bool verify_checksum,
- std::string* error_msg,
- ZipOpenErrorCode* error_code) const;
+ DexFileLoaderErrorCode* error_code,
+ std::string* error_msg) const;
};
} // namespace art
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index ab5c3f9a26..5bb01dd7ac 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -210,6 +210,7 @@ static bool OpenDexFilesBase64(const char* base64,
const char* location,
std::vector<uint8_t>* dex_bytes,
std::vector<std::unique_ptr<const DexFile>>* dex_files,
+ DexFileLoaderErrorCode* error_code,
std::string* error_msg) {
DecodeDexFile(base64, dex_bytes);
@@ -222,6 +223,7 @@ static bool OpenDexFilesBase64(const char* base64,
location,
/* verify */ true,
kVerifyChecksum,
+ error_code,
error_msg,
dex_files);
return success;
@@ -231,9 +233,11 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
const char* location,
std::vector<uint8_t>* dex_bytes) {
// read dex files.
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool success = OpenDexFilesBase64(base64, location, dex_bytes, &dex_files, &error_msg);
+ bool success = OpenDexFilesBase64(base64, location, dex_bytes, &dex_files, &error_code,
+ &error_msg);
CHECK(success) << error_msg;
EXPECT_EQ(1U, dex_files.size());
return std::move(dex_files[0]);
@@ -337,6 +341,7 @@ TEST_F(DexFileLoaderTest, Version40Rejected) {
DecodeDexFile(kRawDex40, &dex_bytes);
static constexpr bool kVerifyChecksum = true;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
const DexFileLoader dex_file_loader;
@@ -345,6 +350,7 @@ TEST_F(DexFileLoaderTest, Version40Rejected) {
kLocationString,
/* verify */ true,
kVerifyChecksum,
+ &error_code,
&error_msg,
&dex_files));
}
@@ -354,6 +360,7 @@ TEST_F(DexFileLoaderTest, Version41Rejected) {
DecodeDexFile(kRawDex41, &dex_bytes);
static constexpr bool kVerifyChecksum = true;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
const DexFileLoader dex_file_loader;
@@ -362,6 +369,7 @@ TEST_F(DexFileLoaderTest, Version41Rejected) {
kLocationString,
/* verify */ true,
kVerifyChecksum,
+ &error_code,
&error_msg,
&dex_files));
}
@@ -371,6 +379,7 @@ TEST_F(DexFileLoaderTest, ZeroLengthDexRejected) {
DecodeDexFile(kRawDexZeroLength, &dex_bytes);
static constexpr bool kVerifyChecksum = true;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
const DexFileLoader dex_file_loader;
@@ -379,6 +388,7 @@ TEST_F(DexFileLoaderTest, ZeroLengthDexRejected) {
kLocationString,
/* verify */ true,
kVerifyChecksum,
+ &error_code,
&error_msg,
&dex_files));
}
@@ -412,11 +422,13 @@ TEST(DexFileUtilsTest, GetBaseLocationAndMultiDexSuffix) {
TEST_F(DexFileLoaderTest, ZipOpenClassesPresent) {
std::vector<uint8_t> dex_bytes;
std::vector<std::unique_ptr<const DexFile>> dex_files;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
ASSERT_TRUE(OpenDexFilesBase64(kRawZipClassesDexPresent,
kLocationString,
&dex_bytes,
&dex_files,
+ &error_code,
&error_msg));
EXPECT_EQ(dex_files.size(), 1u);
}
@@ -424,23 +436,28 @@ TEST_F(DexFileLoaderTest, ZipOpenClassesPresent) {
TEST_F(DexFileLoaderTest, ZipOpenClassesAbsent) {
std::vector<uint8_t> dex_bytes;
std::vector<std::unique_ptr<const DexFile>> dex_files;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
ASSERT_FALSE(OpenDexFilesBase64(kRawZipClassesDexAbsent,
kLocationString,
&dex_bytes,
&dex_files,
+ &error_code,
&error_msg));
+ EXPECT_EQ(error_code, DexFileLoaderErrorCode::kEntryNotFound);
EXPECT_EQ(dex_files.size(), 0u);
}
TEST_F(DexFileLoaderTest, ZipOpenThreeDexFiles) {
std::vector<uint8_t> dex_bytes;
std::vector<std::unique_ptr<const DexFile>> dex_files;
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
ASSERT_TRUE(OpenDexFilesBase64(kRawZipThreeDexFiles,
kLocationString,
&dex_bytes,
&dex_files,
+ &error_code,
&error_msg));
EXPECT_EQ(dex_files.size(), 3u);
}
diff --git a/libdexfile/dex/dex_file_tracking_registrar.cc b/libdexfile/dex/dex_file_tracking_registrar.cc
index 551bea108c..29ff6be4fc 100644
--- a/libdexfile/dex/dex_file_tracking_registrar.cc
+++ b/libdexfile/dex/dex_file_tracking_registrar.cc
@@ -30,6 +30,7 @@
#endif
#include "base/memory_tool.h"
+#include "class_accessor-inl.h"
#include "code_item_accessors-inl.h"
#include "dex_file-inl.h"
@@ -155,89 +156,61 @@ void DexFileTrackingRegistrar::SetDexFileRegistration(bool should_poison) {
}
void DexFileTrackingRegistrar::SetAllCodeItemRegistration(bool should_poison) {
- for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
- const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
- const uint8_t* class_data = dex_file_->GetClassData(cd);
- if (class_data != nullptr) {
- ClassDataItemIterator cdit(*dex_file_, class_data);
- cdit.SkipAllFields();
- while (cdit.HasNextMethod()) {
- const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
- if (code_item != nullptr) {
- const void* code_item_begin = reinterpret_cast<const void*>(code_item);
- size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
- range_values_.push_back(std::make_tuple(code_item_begin, code_item_size, should_poison));
- }
- cdit.Next();
+ for (ClassAccessor accessor : dex_file_->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const DexFile::CodeItem* code_item = method.GetCodeItem();
+ if (code_item != nullptr) {
+ const void* code_item_begin = reinterpret_cast<const void*>(code_item);
+ size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
+ range_values_.push_back(std::make_tuple(code_item_begin, code_item_size, should_poison));
}
}
}
}
void DexFileTrackingRegistrar::SetAllCodeItemStartRegistration(bool should_poison) {
- for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
- const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
- const uint8_t* class_data = dex_file_->GetClassData(cd);
- if (class_data != nullptr) {
- ClassDataItemIterator cdit(*dex_file_, class_data);
- cdit.SkipAllFields();
- while (cdit.HasNextMethod()) {
- const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
- if (code_item != nullptr) {
- const void* code_item_begin = reinterpret_cast<const void*>(code_item);
- size_t code_item_start = reinterpret_cast<size_t>(code_item);
- CodeItemInstructionAccessor accessor(*dex_file_, code_item);
- size_t code_item_start_end = reinterpret_cast<size_t>(accessor.Insns());
- size_t code_item_start_size = code_item_start_end - code_item_start;
- range_values_.push_back(std::make_tuple(code_item_begin,
- code_item_start_size,
- should_poison));
- }
- cdit.Next();
+ for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
+ for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
+ const DexFile::CodeItem* code_item = method.GetCodeItem();
+ if (code_item != nullptr) {
+ const void* code_item_begin = reinterpret_cast<const void*>(code_item);
+ size_t code_item_start = reinterpret_cast<size_t>(code_item);
+ CodeItemInstructionAccessor accessor(*dex_file_, code_item);
+ size_t code_item_start_end = reinterpret_cast<size_t>(accessor.Insns());
+ size_t code_item_start_size = code_item_start_end - code_item_start;
+ range_values_.push_back(std::make_tuple(code_item_begin,
+ code_item_start_size,
+ should_poison));
}
}
}
}
void DexFileTrackingRegistrar::SetAllInsnsRegistration(bool should_poison) {
- for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
- const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
- const uint8_t* class_data = dex_file_->GetClassData(cd);
- if (class_data != nullptr) {
- ClassDataItemIterator cdit(*dex_file_, class_data);
- cdit.SkipAllFields();
- while (cdit.HasNextMethod()) {
- const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
- if (code_item != nullptr) {
- CodeItemInstructionAccessor accessor(*dex_file_, code_item);
- const void* insns_begin = reinterpret_cast<const void*>(accessor.Insns());
- // Member insns_size_in_code_units_ is in 2-byte units
- size_t insns_size = accessor.InsnsSizeInCodeUnits() * 2;
- range_values_.push_back(std::make_tuple(insns_begin, insns_size, should_poison));
- }
- cdit.Next();
+ for (ClassAccessor class_accessor : dex_file_->GetClasses()) {
+ for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
+ const DexFile::CodeItem* code_item = method.GetCodeItem();
+ if (code_item != nullptr) {
+ CodeItemInstructionAccessor accessor(*dex_file_, code_item);
+ const void* insns_begin = reinterpret_cast<const void*>(accessor.Insns());
+ // Member insns_size_in_code_units_ is in 2-byte units
+ size_t insns_size = accessor.InsnsSizeInCodeUnits() * 2;
+ range_values_.push_back(std::make_tuple(insns_begin, insns_size, should_poison));
}
}
}
}
void DexFileTrackingRegistrar::SetCodeItemRegistration(const char* class_name, bool should_poison) {
- for (size_t classdef_ctr = 0; classdef_ctr < dex_file_->NumClassDefs(); ++classdef_ctr) {
- const DexFile::ClassDef& cd = dex_file_->GetClassDef(classdef_ctr);
- const uint8_t* class_data = dex_file_->GetClassData(cd);
- if (class_data != nullptr) {
- ClassDataItemIterator cdit(*dex_file_, class_data);
- cdit.SkipAllFields();
- while (cdit.HasNextMethod()) {
- const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(cdit.GetMemberIndex());
- const char * methodid_name = dex_file_->GetMethodName(methodid_item);
- const DexFile::CodeItem* code_item = cdit.GetMethodCodeItem();
- if (code_item != nullptr && strcmp(methodid_name, class_name) == 0) {
- const void* code_item_begin = reinterpret_cast<const void*>(code_item);
- size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
- range_values_.push_back(std::make_tuple(code_item_begin, code_item_size, should_poison));
- }
- cdit.Next();
+ for (ClassAccessor accessor : dex_file_->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ const DexFile::MethodId& methodid_item = dex_file_->GetMethodId(method.GetIndex());
+ const char * methodid_name = dex_file_->GetMethodName(methodid_item);
+ const DexFile::CodeItem* code_item = method.GetCodeItem();
+ if (code_item != nullptr && strcmp(methodid_name, class_name) == 0) {
+ const void* code_item_begin = reinterpret_cast<const void*>(code_item);
+ size_t code_item_size = dex_file_->GetCodeItemSize(*code_item);
+ range_values_.push_back(std::make_tuple(code_item_begin, code_item_size, should_poison));
}
}
}
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index 65448cabd1..78b53a0bd4 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -102,11 +102,13 @@ static std::unique_ptr<const DexFile> OpenDexFileBase64(const char* base64,
// read dex
std::vector<std::unique_ptr<const DexFile>> tmp;
const DexFileLoader dex_file_loader;
+ DexFileLoaderErrorCode error_code;
bool success = dex_file_loader.OpenAll(dex_bytes.get(),
length,
location,
/* verify */ true,
/* verify_checksum */ true,
+ &error_code,
error_msg,
&tmp);
CHECK(success) << *error_msg;
diff --git a/libdexfile/dex/utf.cc b/libdexfile/dex/utf.cc
index 772a610140..d09da735f2 100644
--- a/libdexfile/dex/utf.cc
+++ b/libdexfile/dex/utf.cc
@@ -26,7 +26,6 @@
namespace art {
using android::base::StringAppendF;
-using android::base::StringPrintf;
// This is used only from debugger and test code.
size_t CountModifiedUtf8Chars(const char* utf8) {
diff --git a/oatdump/Android.mk b/oatdump/Android.mk
index 667c37c33d..e82cd979fe 100644
--- a/oatdump/Android.mk
+++ b/oatdump/Android.mk
@@ -72,6 +72,7 @@ dump-oat-boot-$(TARGET_ARCH): $(DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDU
endif
ifdef TARGET_2ND_ARCH
+.PHONY: dump-oat-boot-$(TARGET_2ND_ARCH)
dump-oat-boot-$(TARGET_2ND_ARCH): $(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_FILENAME) $(OATDUMP)
$(OATDUMP) $(addprefix --image=,$(2ND_DEFAULT_DEX_PREOPT_BUILT_IMAGE_LOCATION)) \
--output=$(ART_DUMP_OAT_PATH)/boot.$(TARGET_2ND_ARCH).oatdump.txt --instruction-set=$(TARGET_2ND_ARCH)
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 21ce8c84c4..271d37dce7 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -867,28 +867,28 @@ class OatDumper {
VariableIndentationOutputStream vios(&os);
ScopedIndentation indent1(&vios);
- for (size_t class_def_index = 0;
- class_def_index < dex_file->NumClassDefs();
- class_def_index++) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(class_def_index);
- const char* descriptor = dex_file->GetClassDescriptor(class_def);
-
+ for (ClassAccessor accessor : dex_file->GetClasses()) {
// TODO: Support regex
+ const char* descriptor = accessor.GetDescriptor();
if (DescriptorToDot(descriptor).find(options_.class_filter_) == std::string::npos) {
continue;
}
+ const uint16_t class_def_index = accessor.GetClassDefIndex();
uint32_t oat_class_offset = oat_dex_file.GetOatClassOffset(class_def_index);
const OatFile::OatClass oat_class = oat_dex_file.GetOatClass(class_def_index);
os << StringPrintf("%zd: %s (offset=0x%08x) (type_idx=%d)",
- class_def_index, descriptor, oat_class_offset, class_def.class_idx_.index_)
+ static_cast<ssize_t>(class_def_index),
+ descriptor,
+ oat_class_offset,
+ accessor.GetClassIdx().index_)
<< " (" << oat_class.GetStatus() << ")"
<< " (" << oat_class.GetType() << ")\n";
// TODO: include bitmap here if type is kOatClassSomeCompiled?
if (options_.list_classes_) {
continue;
}
- if (!DumpOatClass(&vios, oat_class, *dex_file, class_def, &stop_analysis)) {
+ if (!DumpOatClass(&vios, oat_class, *dex_file, accessor, &stop_analysis)) {
success = false;
}
if (stop_analysis) {
@@ -1023,22 +1023,23 @@ class OatDumper {
}
bool DumpOatClass(VariableIndentationOutputStream* vios,
- const OatFile::OatClass& oat_class, const DexFile& dex_file,
- const DexFile::ClassDef& class_def, bool* stop_analysis) {
+ const OatFile::OatClass& oat_class,
+ const DexFile& dex_file,
+ const ClassAccessor& class_accessor,
+ bool* stop_analysis) {
bool success = true;
bool addr_found = false;
- const uint8_t* class_data = dex_file.GetClassData(class_def);
- if (class_data == nullptr) { // empty class such as a marker interface?
- vios->Stream() << std::flush;
- return success;
- }
- ClassDataItemIterator it(dex_file, class_data);
- it.SkipAllFields();
uint32_t class_method_index = 0;
- while (it.HasNextMethod()) {
- if (!DumpOatMethod(vios, class_def, class_method_index, oat_class, dex_file,
- it.GetMemberIndex(), it.GetMethodCodeItem(),
- it.GetRawMemberAccessFlags(), &addr_found)) {
+ for (const ClassAccessor::Method& method : class_accessor.GetMethods()) {
+ if (!DumpOatMethod(vios,
+ dex_file.GetClassDef(class_accessor.GetClassDefIndex()),
+ class_method_index,
+ oat_class,
+ dex_file,
+ method.GetIndex(),
+ method.GetCodeItem(),
+ method.GetRawAccessFlags(),
+ &addr_found)) {
success = false;
}
if (addr_found) {
@@ -1046,9 +1047,7 @@ class OatDumper {
return success;
}
class_method_index++;
- it.Next();
}
- DCHECK(!it.HasNext());
vios->Stream() << std::flush;
return success;
}
@@ -1311,7 +1310,7 @@ class OatDumper {
const CodeItemDataAccessor& code_item_accessor) {
if (IsMethodGeneratedByOptimizingCompiler(oat_method, code_item_accessor)) {
// The optimizing compiler outputs its CodeInfo data in the vmap table.
- const void* raw_code_info = oat_method.GetVmapTable();
+ const uint8_t* raw_code_info = oat_method.GetVmapTable();
if (raw_code_info != nullptr) {
CodeInfo code_info(raw_code_info);
DCHECK(code_item_accessor.HasCodeItem());
@@ -1928,6 +1927,7 @@ class ImageDumper {
const auto& intern_section = image_header_.GetInternedStringsSection();
const auto& class_table_section = image_header_.GetClassTableSection();
const auto& bitmap_section = image_header_.GetImageBitmapSection();
+ const auto& relocations_section = image_header_.GetImageRelocationsSection();
stats_.header_bytes = header_bytes;
@@ -1967,7 +1967,11 @@ class ImageDumper {
CHECK_ALIGNED(bitmap_section.Offset(), kPageSize);
stats_.alignment_bytes += RoundUp(bitmap_offset, kPageSize) - bitmap_offset;
+ // There should be no space between the bitmap and relocations.
+ CHECK_EQ(bitmap_section.Offset() + bitmap_section.Size(), relocations_section.Offset());
+
stats_.bitmap_bytes += bitmap_section.Size();
+ stats_.relocations_bytes += relocations_section.Size();
stats_.art_field_bytes += field_section.Size();
stats_.art_method_bytes += method_section.Size();
stats_.dex_cache_arrays_bytes += dex_cache_arrays_section.Size();
@@ -2400,6 +2404,7 @@ class ImageDumper {
size_t interned_strings_bytes;
size_t class_table_bytes;
size_t bitmap_bytes;
+ size_t relocations_bytes;
size_t alignment_bytes;
size_t managed_code_bytes;
@@ -2429,6 +2434,7 @@ class ImageDumper {
interned_strings_bytes(0),
class_table_bytes(0),
bitmap_bytes(0),
+ relocations_bytes(0),
alignment_bytes(0),
managed_code_bytes(0),
managed_code_bytes_ignoring_deduplication(0),
@@ -2592,6 +2598,7 @@ class ImageDumper {
"interned_string_bytes = %8zd (%2.0f%% of art file bytes)\n"
"class_table_bytes = %8zd (%2.0f%% of art file bytes)\n"
"bitmap_bytes = %8zd (%2.0f%% of art file bytes)\n"
+ "relocations_bytes = %8zd (%2.0f%% of art file bytes)\n"
"alignment_bytes = %8zd (%2.0f%% of art file bytes)\n\n",
header_bytes, PercentOfFileBytes(header_bytes),
object_bytes, PercentOfFileBytes(object_bytes),
@@ -2603,12 +2610,13 @@ class ImageDumper {
PercentOfFileBytes(interned_strings_bytes),
class_table_bytes, PercentOfFileBytes(class_table_bytes),
bitmap_bytes, PercentOfFileBytes(bitmap_bytes),
+ relocations_bytes, PercentOfFileBytes(relocations_bytes),
alignment_bytes, PercentOfFileBytes(alignment_bytes))
<< std::flush;
CHECK_EQ(file_bytes,
header_bytes + object_bytes + art_field_bytes + art_method_bytes +
dex_cache_arrays_bytes + interned_strings_bytes + class_table_bytes +
- bitmap_bytes + alignment_bytes);
+ bitmap_bytes + relocations_bytes + alignment_bytes);
}
os << "object_bytes breakdown:\n";
diff --git a/oatdump/oatdump_app_test.cc b/oatdump/oatdump_app_test.cc
index 34b07d2ddf..a344286259 100644
--- a/oatdump/oatdump_app_test.cc
+++ b/oatdump/oatdump_app_test.cc
@@ -19,31 +19,23 @@
namespace art {
TEST_F(OatDumpTest, TestAppWithBootImage) {
- std::string error_msg;
- ASSERT_TRUE(GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M"}, &error_msg)) << error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeOatWithBootImage, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M"}));
+ ASSERT_TRUE(Exec(kDynamic, kModeOatWithBootImage, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestAppWithBootImageStatic) {
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
- std::string error_msg;
- ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M"}, &error_msg)) << error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M"}));
+ ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestPicAppWithBootImage) {
- std::string error_msg;
- ASSERT_TRUE(
- GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}, &error_msg))
- << error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeOatWithBootImage, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(GenerateAppOdexFile(kDynamic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}));
+ ASSERT_TRUE(Exec(kDynamic, kModeOatWithBootImage, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestPicAppWithBootImageStatic) {
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
- std::string error_msg;
- ASSERT_TRUE(
- GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}, &error_msg))
- << error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M", "--compile-pic"}));
+ ASSERT_TRUE(Exec(kStatic, kModeOatWithBootImage, {}, kListAndCode));
}
} // namespace art
diff --git a/oatdump/oatdump_image_test.cc b/oatdump/oatdump_image_test.cc
index d054ecefbb..de48b04214 100644
--- a/oatdump/oatdump_image_test.cc
+++ b/oatdump/oatdump_image_test.cc
@@ -19,25 +19,34 @@
namespace art {
// Disable tests on arm and mips as they are taking too long to run. b/27824283.
-#if !defined(__arm__) && !defined(__mips__)
+#define TEST_DISABLED_FOR_ARM_AND_MIPS() \
+ TEST_DISABLED_FOR_ARM(); \
+ TEST_DISABLED_FOR_ARM64(); \
+ TEST_DISABLED_FOR_MIPS(); \
+ TEST_DISABLED_FOR_MIPS64(); \
+
TEST_F(OatDumpTest, TestImage) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeArt, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestImageStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeArt, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestOatImage) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeOat, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeOat, {}, kListAndCode));
}
TEST_F(OatDumpTest, TestOatImageStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeOat, {}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeOat, {}, kListAndCode));
}
-#endif
+
} // namespace art
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index b4eddb91f9..bcba18208b 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -19,75 +19,92 @@
namespace art {
// Disable tests on arm and mips as they are taking too long to run. b/27824283.
-#if !defined(__arm__) && !defined(__mips__)
+#define TEST_DISABLED_FOR_ARM_AND_MIPS() \
+ TEST_DISABLED_FOR_ARM(); \
+ TEST_DISABLED_FOR_ARM64(); \
+ TEST_DISABLED_FOR_MIPS(); \
+ TEST_DISABLED_FOR_MIPS64(); \
+
TEST_F(OatDumpTest, TestNoDumpVmap) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-dump:vmap"}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-dump:vmap"}, kListAndCode));
}
TEST_F(OatDumpTest, TestNoDumpVmapStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-dump:vmap"}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-dump:vmap"}, kListAndCode));
}
TEST_F(OatDumpTest, TestNoDisassemble) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-disassemble"}, kListAndCode, &error_msg))
- << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-disassemble"}, kListAndCode));
}
TEST_F(OatDumpTest, TestNoDisassembleStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-disassemble"}, kListAndCode, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-disassemble"}, kListAndCode));
}
TEST_F(OatDumpTest, TestListClasses) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-classes"}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-classes"}, kListOnly));
}
TEST_F(OatDumpTest, TestListClassesStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-classes"}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-classes"}, kListOnly));
}
TEST_F(OatDumpTest, TestListMethods) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-methods"}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-methods"}, kListOnly));
}
TEST_F(OatDumpTest, TestListMethodsStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-methods"}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-methods"}, kListOnly));
}
TEST_F(OatDumpTest, TestSymbolize) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeSymbolize, {}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeSymbolize, {}, kListOnly));
}
TEST_F(OatDumpTest, TestSymbolizeStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeSymbolize, {}, kListOnly, &error_msg)) << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeSymbolize, {}, kListOnly));
}
TEST_F(OatDumpTest, TestExportDex) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
// Test is failing on target, b/77469384.
TEST_DISABLED_FOR_TARGET();
std::string error_msg;
- ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly, &error_msg))
- << error_msg;
+ ASSERT_TRUE(Exec(kDynamic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly));
const std::string dex_location = tmp_dir_+ "/core-oj-hostdex.jar_export.dex";
const std::string dexdump2 = GetExecutableFilePath("dexdump2",
/*is_debug*/false,
/*is_static*/false);
- ASSERT_TRUE(ForkAndExecAndWait({dexdump2, "-d", dex_location}, &error_msg)) << error_msg;
+ std::string output;
+ auto post_fork_fn = []() { return true; };
+ ForkAndExecResult res = ForkAndExec({dexdump2, "-d", dex_location}, post_fork_fn, &output);
+ ASSERT_TRUE(res.StandardSuccess());
}
TEST_F(OatDumpTest, TestExportDexStatic) {
+ TEST_DISABLED_FOR_ARM_AND_MIPS();
TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
std::string error_msg;
- ASSERT_TRUE(Exec(kStatic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly, &error_msg))
- << error_msg;
+ ASSERT_TRUE(Exec(kStatic, kModeOat, {"--export-dex-to=" + tmp_dir_}, kListOnly));
}
-#endif
+
} // namespace art
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index 231163b674..2c28f06b2e 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -111,9 +111,8 @@ class OatDumpTest : public CommonRuntimeTest {
return tmp_dir_ + "/" + GetAppBaseName() + ".odex";
}
- bool GenerateAppOdexFile(Flavor flavor,
- const std::vector<std::string>& args,
- /*out*/ std::string* error_msg) {
+ ::testing::AssertionResult GenerateAppOdexFile(Flavor flavor,
+ const std::vector<std::string>& args) {
std::string dex2oat_path = GetExecutableFilePath(flavor, "dex2oat");
std::vector<std::string> exec_argv = {
dex2oat_path,
@@ -131,18 +130,32 @@ class OatDumpTest : public CommonRuntimeTest {
};
exec_argv.insert(exec_argv.end(), args.begin(), args.end());
- return ForkAndExecAndWait(exec_argv, error_msg);
+ auto post_fork_fn = []() {
+ setpgid(0, 0); // Change process groups, so we don't get reaped by ProcessManager.
+ // Ignore setpgid errors.
+ return setenv("ANDROID_LOG_TAGS", "*:e", 1) == 0; // We're only interested in errors and
+ // fatal logs.
+ };
+
+ std::string error_msg;
+ ForkAndExecResult res = ForkAndExec(exec_argv, post_fork_fn, &error_msg);
+ if (res.stage != ForkAndExecResult::kFinished) {
+ return ::testing::AssertionFailure() << strerror(errno);
+ }
+ return res.StandardSuccess() ? ::testing::AssertionSuccess()
+ : (::testing::AssertionFailure() << error_msg);
}
// Run the test with custom arguments.
- bool Exec(Flavor flavor,
- Mode mode,
- const std::vector<std::string>& args,
- Display display,
- /*out*/ std::string* error_msg) {
+ ::testing::AssertionResult Exec(Flavor flavor,
+ Mode mode,
+ const std::vector<std::string>& args,
+ Display display) {
std::string file_path = GetExecutableFilePath(flavor, "oatdump");
- EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
+ if (!OS::FileExists(file_path.c_str())) {
+ return ::testing::AssertionFailure() << file_path << " should be a valid file path";
+ }
// ScratchFile scratch;
std::vector<std::string> exec_argv = { file_path };
@@ -179,129 +192,130 @@ class OatDumpTest : public CommonRuntimeTest {
}
exec_argv.insert(exec_argv.end(), args.begin(), args.end());
- pid_t pid;
- int pipe_fd;
- bool result = ForkAndExec(exec_argv, &pid, &pipe_fd, error_msg);
- if (result) {
- static const size_t kLineMax = 256;
- char line[kLineMax] = {};
- size_t line_len = 0;
- size_t total = 0;
- std::vector<bool> found(expected_prefixes.size(), false);
- while (true) {
- while (true) {
+ std::vector<bool> found(expected_prefixes.size(), false);
+ auto line_handle_fn = [&found, &expected_prefixes](const char* line, size_t line_len) {
+ if (line_len == 0) {
+ return;
+ }
+ // Check contents.
+ for (size_t i = 0; i < expected_prefixes.size(); ++i) {
+ const std::string& expected = expected_prefixes[i];
+ if (!found[i] &&
+ line_len >= expected.length() &&
+ memcmp(line, expected.c_str(), expected.length()) == 0) {
+ found[i] = true;
+ }
+ }
+ };
+
+ static constexpr size_t kLineMax = 256;
+ char line[kLineMax] = {};
+ size_t line_len = 0;
+ size_t total = 0;
+ bool ignore_next_line = false;
+ std::vector<char> error_buf; // Buffer for debug output on error. Limited to 1M.
+ auto line_buf_fn = [&](char* buf, size_t len) {
+ total += len;
+
+ if (len == 0 && line_len > 0 && !ignore_next_line) {
+ // Everything done, handle leftovers.
+ line_handle_fn(line, line_len);
+ }
+
+ if (len > 0) {
+ size_t pos = error_buf.size();
+ if (pos < MB) {
+ error_buf.insert(error_buf.end(), buf, buf + len);
+ }
+ }
+
+ while (len > 0) {
+ // Copy buf into the free tail of the line buffer, and move input buffer along.
+ size_t copy = std::min(kLineMax - line_len, len);
+ memcpy(&line[line_len], buf, copy);
+ buf += copy;
+ len -= copy;
+
+ // Skip spaces up to len, return count of removed spaces. Declare a lambda for reuse.
+ auto trim_space = [&line](size_t len) {
size_t spaces = 0;
- // Trim spaces at the start of the line.
- for (; spaces < line_len && isspace(line[spaces]); ++spaces) {}
+ for (; spaces < len && isspace(line[spaces]); ++spaces) {}
if (spaces > 0) {
- line_len -= spaces;
- memmove(&line[0], &line[spaces], line_len);
+ memmove(&line[0], &line[spaces], len - spaces);
}
- ssize_t bytes_read =
- TEMP_FAILURE_RETRY(read(pipe_fd, &line[line_len], kLineMax - line_len));
- if (bytes_read <= 0) {
- break;
- }
- line_len += bytes_read;
- total += bytes_read;
- }
+ return spaces;
+ };
+ // There can only be spaces if we freshly started a line.
if (line_len == 0) {
- break;
+ copy -= trim_space(copy);
}
- // Check contents.
- for (size_t i = 0; i < expected_prefixes.size(); ++i) {
- const std::string& expected = expected_prefixes[i];
- if (!found[i] &&
- line_len >= expected.length() &&
- memcmp(line, expected.c_str(), expected.length()) == 0) {
- found[i] = true;
+
+ // Scan for newline characters.
+ size_t index = line_len;
+ line_len += copy;
+ while (index < line_len) {
+ if (line[index] == '\n') {
+ // Handle line.
+ if (!ignore_next_line) {
+ line_handle_fn(line, index);
+ }
+ // Move the rest to the front, but trim leading spaces.
+ line_len -= index + 1;
+ memmove(&line[0], &line[index + 1], line_len);
+ line_len -= trim_space(line_len);
+ index = 0;
+ ignore_next_line = false;
+ } else {
+ index++;
}
}
- // Skip to next line.
- size_t next_line = 0;
- for (; next_line + 1 < line_len && line[next_line] != '\n'; ++next_line) {}
- line_len -= next_line + 1;
- memmove(&line[0], &line[next_line + 1], line_len);
- }
- if (mode == kModeSymbolize) {
- EXPECT_EQ(total, 0u);
- } else {
- EXPECT_GT(total, 0u);
- }
- LOG(INFO) << "Processed bytes " << total;
- close(pipe_fd);
- int status = 0;
- if (waitpid(pid, &status, 0) != -1) {
- result = (status == 0);
- }
- for (size_t i = 0; i < expected_prefixes.size(); ++i) {
- if (!found[i]) {
- LOG(ERROR) << "Did not find prefix " << expected_prefixes[i];
- result = false;
+ // Handle a full line without newline characters. Ignore the "next" line, as it is the
+ // tail end of this.
+ if (line_len == kLineMax) {
+ if (!ignore_next_line) {
+ line_handle_fn(line, kLineMax);
+ }
+ line_len = 0;
+ ignore_next_line = true;
}
}
- }
+ };
- return result;
- }
+ auto post_fork_fn = []() {
+ setpgid(0, 0); // Change process groups, so we don't get reaped by ProcessManager.
+ return true; // Ignore setpgid failures.
+ };
- bool ForkAndExec(const std::vector<std::string>& exec_argv,
- /*out*/ pid_t* pid,
- /*out*/ int* pipe_fd,
- /*out*/ std::string* error_msg) {
- int link[2];
- if (pipe(link) == -1) {
- *error_msg = strerror(errno);
- return false;
+ ForkAndExecResult res = ForkAndExec(exec_argv, post_fork_fn, line_buf_fn);
+ if (res.stage != ForkAndExecResult::kFinished) {
+ return ::testing::AssertionFailure() << strerror(errno);
}
-
- *pid = fork();
- if (*pid == -1) {
- *error_msg = strerror(errno);
- close(link[0]);
- close(link[1]);
- return false;
+ if (!res.StandardSuccess()) {
+ return ::testing::AssertionFailure() << "Did not terminate successfully: " << res.status_code;
}
- if (*pid == 0) {
- dup2(link[1], STDOUT_FILENO);
- close(link[0]);
- close(link[1]);
- // change process groups, so we don't get reaped by ProcessManager
- setpgid(0, 0);
- // Use execv here rather than art::Exec to avoid blocking on waitpid here.
- std::vector<char*> argv;
- for (size_t i = 0; i < exec_argv.size(); ++i) {
- argv.push_back(const_cast<char*>(exec_argv[i].c_str()));
- }
- argv.push_back(nullptr);
- UNUSED(execv(argv[0], &argv[0]));
- const std::string command_line(android::base::Join(exec_argv, ' '));
- PLOG(ERROR) << "Failed to execv(" << command_line << ")";
- // _exit to avoid atexit handlers in child.
- _exit(1);
- UNREACHABLE();
+ if (mode == kModeSymbolize) {
+ EXPECT_EQ(total, 0u);
} else {
- close(link[1]);
- *pipe_fd = link[0];
- return true;
+ EXPECT_GT(total, 0u);
}
- }
- bool ForkAndExecAndWait(const std::vector<std::string>& exec_argv,
- /*out*/ std::string* error_msg) {
- pid_t pid;
- int pipe_fd;
- bool result = ForkAndExec(exec_argv, &pid, &pipe_fd, error_msg);
- if (result) {
- close(pipe_fd);
- int status = 0;
- if (waitpid(pid, &status, 0) != -1) {
- result = (status == 0);
+ bool result = true;
+ std::ostringstream oss;
+ for (size_t i = 0; i < expected_prefixes.size(); ++i) {
+ if (!found[i]) {
+ oss << "Did not find prefix " << expected_prefixes[i] << std::endl;
+ result = false;
}
}
- return result;
+ if (!result) {
+ oss << "Processed bytes " << total << ":" << std::endl;
+ error_buf.push_back(0); // Make data a C string.
+ }
+
+ return result ? ::testing::AssertionSuccess()
+ : (::testing::AssertionFailure() << oss.str() << error_buf.data());
}
std::string tmp_dir_;
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index 765225ae95..df002b6efa 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -65,11 +65,6 @@
#undef LOG_TAG
#define LOG_TAG "artopenjdk"
-using ::android::base::WARNING;
-using ::android::base::INFO;
-using ::android::base::ERROR;
-using ::android::base::FATAL;
-
/* posix open() with extensions; used by e.g. ZipFile */
JNIEXPORT jint JVM_Open(const char* fname, jint flags, jint mode) {
/*
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index 5b1a16c3ff..c61d6e585c 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -39,6 +39,7 @@
#include "ti_class.h"
#include "ti_ddms.h"
#include "ti_heap.h"
+#include "ti_monitor.h"
#include "thread-inl.h"
namespace openjdkjvmti {
@@ -252,6 +253,25 @@ jvmtiError ExtensionUtil::GetExtensionFunctions(jvmtiEnv* env,
if (error != ERR(NONE)) {
return error;
}
+
+ // Raw monitors no suspend
+ error = add_extension(
+ reinterpret_cast<jvmtiExtensionFunction>(MonitorUtil::RawMonitorEnterNoSuspend),
+ "com.android.art.concurrent.raw_monitor_enter_no_suspend",
+ "Normally entering a monitor will not return until both the monitor is locked and the"
+ " current thread is not suspended. This method will return once the monitor is locked"
+ " even if the thread is suspended. Note that using rawMonitorWait will wait until the"
+ " thread is not suspended again on wakeup and so should be avoided.",
+ {
+ { "raw_monitor", JVMTI_KIND_IN_PTR, JVMTI_TYPE_CVOID, false },
+ },
+ {
+ ERR(NULL_POINTER),
+ ERR(INVALID_MONITOR),
+ });
+ if (error != ERR(NONE)) {
+ return error;
+ }
// Copy into output buffer.
*extension_count_ptr = ext_vector.size();
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index 9f9dace4f7..87d832caec 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -384,7 +384,7 @@ jvmtiError MethodUtil::GetMethodDeclaringClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
// Note: No GetInterfaceMethodIfProxy, we want to actual class.
art::ScopedObjectAccess soa(art::Thread::Current());
- art::mirror::Class* klass = art_method->GetDeclaringClass();
+ art::ObjPtr<art::mirror::Class> klass = art_method->GetDeclaringClass();
*declaring_class_ptr = soa.AddLocalReference<jclass>(klass);
return ERR(NONE);
diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc
index 1cfc64a61d..6d3a37e333 100644
--- a/openjdkjvmti/ti_monitor.cc
+++ b/openjdkjvmti/ti_monitor.cc
@@ -75,14 +75,16 @@ class JvmtiMonitor {
return true;
}
- void MonitorEnter(art::Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+ void MonitorEnter(art::Thread* self, bool suspend) NO_THREAD_SAFETY_ANALYSIS {
// Perform a suspend-check. The spec doesn't require this but real-world agents depend on this
// behavior. We do this by performing a suspend-check then retrying if the thread is suspended
// before or after locking the internal mutex.
do {
- ThreadUtil::SuspendCheck(self);
- if (ThreadUtil::WouldSuspendForUserCode(self)) {
- continue;
+ if (suspend) {
+ ThreadUtil::SuspendCheck(self);
+ if (ThreadUtil::WouldSuspendForUserCode(self)) {
+ continue;
+ }
}
// Check for recursive enter.
@@ -100,7 +102,7 @@ class JvmtiMonitor {
// Lock with sleep. We will need to check for suspension after this to make sure that agents
// won't deadlock.
mutex_.lock();
- if (!ThreadUtil::WouldSuspendForUserCode(self)) {
+ if (!suspend || !ThreadUtil::WouldSuspendForUserCode(self)) {
break;
} else {
// We got suspended in the middle of waiting for the mutex. We should release the mutex
@@ -187,7 +189,8 @@ class JvmtiMonitor {
}
// Reaquire the mutex/monitor, also go to sleep if we were suspended.
- MonitorEnter(self);
+ // TODO Give an extension to wait without suspension as well.
+ MonitorEnter(self, /*suspend*/ true);
CHECK(owner_.load(std::memory_order_relaxed) == self);
DCHECK_EQ(1u, count_);
// Reset the count.
@@ -249,6 +252,19 @@ jvmtiError MonitorUtil::DestroyRawMonitor(jvmtiEnv* env ATTRIBUTE_UNUSED, jrawMo
return ERR(NONE);
}
+jvmtiError MonitorUtil::RawMonitorEnterNoSuspend(jvmtiEnv* env ATTRIBUTE_UNUSED, jrawMonitorID id) {
+ if (id == nullptr) {
+ return ERR(INVALID_MONITOR);
+ }
+
+ JvmtiMonitor* monitor = DecodeMonitor(id);
+ art::Thread* self = art::Thread::Current();
+
+ monitor->MonitorEnter(self, /*suspend*/false);
+
+ return ERR(NONE);
+}
+
jvmtiError MonitorUtil::RawMonitorEnter(jvmtiEnv* env ATTRIBUTE_UNUSED, jrawMonitorID id) {
if (id == nullptr) {
return ERR(INVALID_MONITOR);
@@ -257,7 +273,7 @@ jvmtiError MonitorUtil::RawMonitorEnter(jvmtiEnv* env ATTRIBUTE_UNUSED, jrawMoni
JvmtiMonitor* monitor = DecodeMonitor(id);
art::Thread* self = art::Thread::Current();
- monitor->MonitorEnter(self);
+ monitor->MonitorEnter(self, /*suspend*/true);
return ERR(NONE);
}
diff --git a/openjdkjvmti/ti_monitor.h b/openjdkjvmti/ti_monitor.h
index e0a865b9fa..5c361b43d0 100644
--- a/openjdkjvmti/ti_monitor.h
+++ b/openjdkjvmti/ti_monitor.h
@@ -43,6 +43,8 @@ class MonitorUtil {
static jvmtiError DestroyRawMonitor(jvmtiEnv* env, jrawMonitorID monitor);
+ static jvmtiError RawMonitorEnterNoSuspend(jvmtiEnv* env, jrawMonitorID monitor);
+
static jvmtiError RawMonitorEnter(jvmtiEnv* env, jrawMonitorID monitor);
static jvmtiError RawMonitorExit(jvmtiEnv* env, jrawMonitorID monitor);
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 50d8dfeb70..1476880f45 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -45,6 +45,7 @@
#include "class_root.h"
#include "debugger.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_file_types.h"
@@ -569,9 +570,10 @@ void DoAllocateObsoleteMethodsCallback(art::Thread* t, void* vdata) NO_THREAD_SA
// This creates any ArtMethod* structures needed for obsolete methods and ensures that the stack is
// updated so they will be run.
// TODO Rewrite so we can do this only once regardless of how many redefinitions there are.
-void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass) {
+void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(
+ art::ObjPtr<art::mirror::Class> art_klass) {
art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
- art::mirror::ClassExt* ext = art_klass->GetExtData();
+ art::ObjPtr<art::mirror::ClassExt> ext = art_klass->GetExtData();
CHECK(ext->GetObsoleteMethods() != nullptr);
art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
// This holds pointers to the obsolete methods map fields which are updated as needed.
@@ -619,11 +621,9 @@ bool Redefiner::ClassRedefinition::CheckSameMethods() {
art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
- art::ClassDataItemIterator new_iter(*dex_file_,
- dex_file_->GetClassData(dex_file_->GetClassDef(0)));
-
// Make sure we have the same number of methods.
- uint32_t num_new_method = new_iter.NumVirtualMethods() + new_iter.NumDirectMethods();
+ art::ClassAccessor accessor(*dex_file_, dex_file_->GetClassDef(0));
+ uint32_t num_new_method = accessor.NumMethods();
uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
if (num_new_method != num_old_method) {
bool bigger = num_new_method > num_old_method;
@@ -635,13 +635,12 @@ bool Redefiner::ClassRedefinition::CheckSameMethods() {
}
// Skip all of the fields. We should have already checked this.
- new_iter.SkipAllFields();
// Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
// files have the same number of methods, which means there must be an equal amount of additions
- // and removals.
- for (; new_iter.HasNextMethod(); new_iter.Next()) {
+ // and removals. We should have already checked the fields.
+ for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
// Get the data on the method we are searching for
- const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(new_iter.GetMemberIndex());
+ const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
const char* new_method_name = dex_file_->GetMethodName(new_method_id);
art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
@@ -658,7 +657,7 @@ bool Redefiner::ClassRedefinition::CheckSameMethods() {
// Since direct methods have different flags than virtual ones (specifically direct methods must
// have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
// virtual to direct.
- uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ uint32_t new_flags = method.GetAccessFlags();
if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
StringPrintf("method '%s' (sig: %s) had different access flags",
@@ -674,20 +673,21 @@ bool Redefiner::ClassRedefinition::CheckSameFields() {
art::StackHandleScope<1> hs(driver_->self_);
art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
- art::ClassDataItemIterator new_iter(*dex_file_,
- dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+ art::ClassAccessor new_accessor(*dex_file_, dex_file_->GetClassDef(0));
+
const art::DexFile& old_dex_file = h_klass->GetDexFile();
- art::ClassDataItemIterator old_iter(old_dex_file,
- old_dex_file.GetClassData(*h_klass->GetClassDef()));
+ art::ClassAccessor old_accessor(old_dex_file, *h_klass->GetClassDef());
// Instance and static fields can be differentiated by their flags so no need to check them
// separately.
- while (new_iter.HasNextInstanceField() || new_iter.HasNextStaticField()) {
+ auto old_fields = old_accessor.GetFields();
+ auto old_iter = old_fields.begin();
+ for (const art::ClassAccessor::Field& new_field : new_accessor.GetFields()) {
// Get the data on the method we are searching for
- const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_iter.GetMemberIndex());
+ const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
const char* new_field_name = dex_file_->GetFieldName(new_field_id);
const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
- if (!(old_iter.HasNextInstanceField() || old_iter.HasNextStaticField())) {
+ if (old_iter == old_fields.end()) {
// We are missing the old version of this method!
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
StringPrintf("Unknown field '%s' (type: %s) added!",
@@ -696,7 +696,7 @@ bool Redefiner::ClassRedefinition::CheckSameFields() {
return false;
}
- const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter.GetMemberIndex());
+ const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
@@ -714,7 +714,7 @@ bool Redefiner::ClassRedefinition::CheckSameFields() {
// Since static fields have different flags than instance ones (specifically static fields must
// have the kAccStatic flag) we can tell if a field changes from static to instance.
- if (new_iter.GetFieldAccessFlags() != old_iter.GetFieldAccessFlags()) {
+ if (new_field.GetAccessFlags() != old_iter->GetAccessFlags()) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
StringPrintf("Field '%s' (sig: %s) had different access flags",
new_field_name,
@@ -722,16 +722,15 @@ bool Redefiner::ClassRedefinition::CheckSameFields() {
return false;
}
- new_iter.Next();
- old_iter.Next();
+ ++old_iter;
}
- if (old_iter.HasNextInstanceField() || old_iter.HasNextStaticField()) {
+ if (old_iter != old_fields.end()) {
RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
StringPrintf("field '%s' (sig: %s) is missing!",
old_dex_file.GetFieldName(old_dex_file.GetFieldId(
- old_iter.GetMemberIndex())),
+ old_iter->GetIndex())),
old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
- old_iter.GetMemberIndex()))));
+ old_iter->GetIndex()))));
return false;
}
return true;
@@ -873,73 +872,76 @@ class RedefinitionDataHolder {
return arr_.IsNull();
}
- art::mirror::ClassLoader* GetSourceClassLoader(jint klass_index) const
+ art::ObjPtr<art::mirror::ClassLoader> GetSourceClassLoader(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::ClassLoader*>(GetSlot(klass_index, kSlotSourceClassLoader));
+ return art::ObjPtr<art::mirror::ClassLoader>::DownCast(
+ GetSlot(klass_index, kSlotSourceClassLoader));
}
- art::mirror::Object* GetJavaDexFile(jint klass_index) const
+ art::ObjPtr<art::mirror::Object> GetJavaDexFile(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return GetSlot(klass_index, kSlotJavaDexFile);
}
- art::mirror::LongArray* GetNewDexFileCookie(jint klass_index) const
+ art::ObjPtr<art::mirror::LongArray> GetNewDexFileCookie(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::LongArray*>(GetSlot(klass_index, kSlotNewDexFileCookie));
+ return art::ObjPtr<art::mirror::LongArray>::DownCast(
+ GetSlot(klass_index, kSlotNewDexFileCookie));
}
- art::mirror::DexCache* GetNewDexCache(jint klass_index) const
+ art::ObjPtr<art::mirror::DexCache> GetNewDexCache(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::DexCache*>(GetSlot(klass_index, kSlotNewDexCache));
+ return art::ObjPtr<art::mirror::DexCache>::DownCast(GetSlot(klass_index, kSlotNewDexCache));
}
- art::mirror::Class* GetMirrorClass(jint klass_index) const
+ art::ObjPtr<art::mirror::Class> GetMirrorClass(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::Class*>(GetSlot(klass_index, kSlotMirrorClass));
+ return art::ObjPtr<art::mirror::Class>::DownCast(GetSlot(klass_index, kSlotMirrorClass));
}
- art::mirror::Object* GetOriginalDexFile(jint klass_index) const
+ art::ObjPtr<art::mirror::Object> GetOriginalDexFile(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::Object*>(GetSlot(klass_index, kSlotOrigDexFile));
+ return art::ObjPtr<art::mirror::Object>::DownCast(GetSlot(klass_index, kSlotOrigDexFile));
}
- art::mirror::PointerArray* GetOldObsoleteMethods(jint klass_index) const
+ art::ObjPtr<art::mirror::PointerArray> GetOldObsoleteMethods(jint klass_index) const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::PointerArray*>(
+ return art::ObjPtr<art::mirror::PointerArray>::DownCast(
GetSlot(klass_index, kSlotOldObsoleteMethods));
}
- art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches(jint klass_index) const
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
- return art::down_cast<art::mirror::ObjectArray<art::mirror::DexCache>*>(
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> GetOldDexCaches(
+ jint klass_index) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ return art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>>::DownCast(
GetSlot(klass_index, kSlotOldDexCaches));
}
- void SetSourceClassLoader(jint klass_index, art::mirror::ClassLoader* loader)
+ void SetSourceClassLoader(jint klass_index, art::ObjPtr<art::mirror::ClassLoader> loader)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotSourceClassLoader, loader);
}
- void SetJavaDexFile(jint klass_index, art::mirror::Object* dexfile)
+ void SetJavaDexFile(jint klass_index, art::ObjPtr<art::mirror::Object> dexfile)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotJavaDexFile, dexfile);
}
- void SetNewDexFileCookie(jint klass_index, art::mirror::LongArray* cookie)
+ void SetNewDexFileCookie(jint klass_index, art::ObjPtr<art::mirror::LongArray> cookie)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotNewDexFileCookie, cookie);
}
- void SetNewDexCache(jint klass_index, art::mirror::DexCache* cache)
+ void SetNewDexCache(jint klass_index, art::ObjPtr<art::mirror::DexCache> cache)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotNewDexCache, cache);
}
- void SetMirrorClass(jint klass_index, art::mirror::Class* klass)
+ void SetMirrorClass(jint klass_index, art::ObjPtr<art::mirror::Class> klass)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotMirrorClass, klass);
}
- void SetOriginalDexFile(jint klass_index, art::mirror::Object* bytes)
+ void SetOriginalDexFile(jint klass_index, art::ObjPtr<art::mirror::Object> bytes)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOrigDexFile, bytes);
}
- void SetOldObsoleteMethods(jint klass_index, art::mirror::PointerArray* methods)
+ void SetOldObsoleteMethods(jint klass_index, art::ObjPtr<art::mirror::PointerArray> methods)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOldObsoleteMethods, methods);
}
- void SetOldDexCaches(jint klass_index, art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ void SetOldDexCaches(jint klass_index,
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> caches)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
SetSlot(klass_index, kSlotOldDexCaches, caches);
}
@@ -970,8 +972,8 @@ class RedefinitionDataHolder {
mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
std::vector<Redefiner::ClassRedefinition>* redefinitions_;
- art::mirror::Object* GetSlot(jint klass_index,
- DataSlot slot) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::Object> GetSlot(jint klass_index, DataSlot slot) const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
DCHECK_LT(klass_index, Length());
return arr_->Get((kNumSlots * klass_index) + slot);
}
@@ -1036,31 +1038,35 @@ class RedefinitionDataIter {
return holder_;
}
- art::mirror::ClassLoader* GetSourceClassLoader() const
+ art::ObjPtr<art::mirror::ClassLoader> GetSourceClassLoader() const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetSourceClassLoader(idx_);
}
- art::mirror::Object* GetJavaDexFile() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::Object> GetJavaDexFile() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetJavaDexFile(idx_);
}
- art::mirror::LongArray* GetNewDexFileCookie() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::LongArray> GetNewDexFileCookie() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetNewDexFileCookie(idx_);
}
- art::mirror::DexCache* GetNewDexCache() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::DexCache> GetNewDexCache() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetNewDexCache(idx_);
}
- art::mirror::Class* GetMirrorClass() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ObjPtr<art::mirror::Class> GetMirrorClass() const
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetMirrorClass(idx_);
}
- art::mirror::Object* GetOriginalDexFile() const
+ art::ObjPtr<art::mirror::Object> GetOriginalDexFile() const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetOriginalDexFile(idx_);
}
- art::mirror::PointerArray* GetOldObsoleteMethods() const
+ art::ObjPtr<art::mirror::PointerArray> GetOldObsoleteMethods() const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetOldObsoleteMethods(idx_);
}
- art::mirror::ObjectArray<art::mirror::DexCache>* GetOldDexCaches() const
+ art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> GetOldDexCaches() const
REQUIRES_SHARED(art::Locks::mutator_lock_) {
return holder_.GetOldDexCaches(idx_);
}
@@ -1073,28 +1079,31 @@ class RedefinitionDataIter {
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetSourceClassLoader(idx_, loader);
}
- void SetJavaDexFile(art::mirror::Object* dexfile) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void SetJavaDexFile(art::ObjPtr<art::mirror::Object> dexfile)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetJavaDexFile(idx_, dexfile);
}
- void SetNewDexFileCookie(art::mirror::LongArray* cookie)
+ void SetNewDexFileCookie(art::ObjPtr<art::mirror::LongArray> cookie)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetNewDexFileCookie(idx_, cookie);
}
- void SetNewDexCache(art::mirror::DexCache* cache) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void SetNewDexCache(art::ObjPtr<art::mirror::DexCache> cache)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetNewDexCache(idx_, cache);
}
- void SetMirrorClass(art::mirror::Class* klass) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ void SetMirrorClass(art::ObjPtr<art::mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetMirrorClass(idx_, klass);
}
- void SetOriginalDexFile(art::mirror::Object* bytes)
+ void SetOriginalDexFile(art::ObjPtr<art::mirror::Object> bytes)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetOriginalDexFile(idx_, bytes);
}
- void SetOldObsoleteMethods(art::mirror::PointerArray* methods)
+ void SetOldObsoleteMethods(art::ObjPtr<art::mirror::PointerArray> methods)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetOldObsoleteMethods(idx_, methods);
}
- void SetOldDexCaches(art::mirror::ObjectArray<art::mirror::DexCache>* caches)
+ void SetOldDexCaches(art::ObjPtr<art::mirror::ObjectArray<art::mirror::DexCache>> caches)
REQUIRES_SHARED(art::Locks::mutator_lock_) {
holder_.SetOldDexCaches(idx_, caches);
}
@@ -1378,7 +1387,7 @@ jvmtiError Redefiner::Run() {
if (data.GetSourceClassLoader() != nullptr) {
ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
}
- art::mirror::Class* klass = data.GetMirrorClass();
+ art::ObjPtr<art::mirror::Class> klass = data.GetMirrorClass();
// TODO Rewrite so we don't do a stack walk for each and every class.
redef.FindAndAllocateObsoleteMethods(klass);
redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFile());
@@ -1492,10 +1501,10 @@ void Redefiner::ClassRedefinition::UpdateClass(
// obsolete methods).
void Redefiner::ClassRedefinition::RestoreObsoleteMethodMapsIfUnneeded(
const RedefinitionDataIter* cur_data) {
- art::mirror::Class* klass = GetMirrorClass();
- art::mirror::ClassExt* ext = klass->GetExtData();
- art::mirror::PointerArray* methods = ext->GetObsoleteMethods();
- art::mirror::PointerArray* old_methods = cur_data->GetOldObsoleteMethods();
+ art::ObjPtr<art::mirror::Class> klass = GetMirrorClass();
+ art::ObjPtr<art::mirror::ClassExt> ext = klass->GetExtData();
+ art::ObjPtr<art::mirror::PointerArray> methods = ext->GetObsoleteMethods();
+ art::ObjPtr<art::mirror::PointerArray> old_methods = cur_data->GetOldObsoleteMethods();
int32_t old_length = old_methods == nullptr ? 0 : old_methods->GetLength();
int32_t expected_length =
old_length + klass->NumDirectMethods() + klass->NumDeclaredVirtualMethods();
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index e337491ae3..6d8f6bf0db 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -136,7 +136,7 @@ class Redefiner {
/*out*/RedefinitionDataIter* cur_data)
REQUIRES_SHARED(art::Locks::mutator_lock_);
- void FindAndAllocateObsoleteMethods(art::mirror::Class* art_klass)
+ void FindAndAllocateObsoleteMethods(art::ObjPtr<art::mirror::Class> art_klass)
REQUIRES(art::Locks::mutator_lock_);
// Checks that the dex file contains only the single expected class and that the top-level class
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index cabf9e8b09..949b566860 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -60,6 +60,8 @@
namespace openjdkjvmti {
+static const char* kJvmtiTlsKey = "JvmtiTlsKey";
+
art::ArtField* ThreadUtil::context_class_loader_ = nullptr;
struct ThreadCallback : public art::ThreadLifecycleCallback {
@@ -624,14 +626,15 @@ jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
// The struct that we store in the art::Thread::custom_tls_ that maps the jvmtiEnvs to the data
// stored with that thread. This is needed since different jvmtiEnvs are not supposed to share TLS
// data but we only have a single slot in Thread objects to store data.
-struct JvmtiGlobalTLSData {
+struct JvmtiGlobalTLSData : public art::TLSData {
std::unordered_map<jvmtiEnv*, const void*> data GUARDED_BY(art::Locks::thread_list_lock_);
};
static void RemoveTLSData(art::Thread* target, void* ctx) REQUIRES(art::Locks::thread_list_lock_) {
jvmtiEnv* env = reinterpret_cast<jvmtiEnv*>(ctx);
art::Locks::thread_list_lock_->AssertHeld(art::Thread::Current());
- JvmtiGlobalTLSData* global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS());
+ JvmtiGlobalTLSData* global_tls =
+ reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
if (global_tls != nullptr) {
global_tls->data.erase(env);
}
@@ -654,10 +657,12 @@ jvmtiError ThreadUtil::SetThreadLocalStorage(jvmtiEnv* env, jthread thread, cons
return err;
}
- JvmtiGlobalTLSData* global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS());
+ JvmtiGlobalTLSData* global_tls =
+ reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
if (global_tls == nullptr) {
- target->SetCustomTLS(new JvmtiGlobalTLSData);
- global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS());
+ // Synchronized using thread_list_lock_ to prevent racing sets.
+ target->SetCustomTLS(kJvmtiTlsKey, new JvmtiGlobalTLSData);
+ global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
}
global_tls->data[env] = data;
@@ -681,7 +686,8 @@ jvmtiError ThreadUtil::GetThreadLocalStorage(jvmtiEnv* env,
return err;
}
- JvmtiGlobalTLSData* global_tls = reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS());
+ JvmtiGlobalTLSData* global_tls =
+ reinterpret_cast<JvmtiGlobalTLSData*>(target->GetCustomTLS(kJvmtiTlsKey));
if (global_tls == nullptr) {
*data_ptr = nullptr;
return OK;
@@ -812,6 +818,37 @@ jvmtiError ThreadUtil::RunAgentThread(jvmtiEnv* jvmti_env,
return ERR(NONE);
}
+class ScopedSuspendByPeer {
+ public:
+ explicit ScopedSuspendByPeer(jthread jtarget)
+ : thread_list_(art::Runtime::Current()->GetThreadList()),
+ timeout_(false),
+ target_(thread_list_->SuspendThreadByPeer(jtarget,
+ /* suspend_thread */ true,
+ art::SuspendReason::kInternal,
+ &timeout_)) { }
+ ~ScopedSuspendByPeer() {
+ if (target_ != nullptr) {
+ if (!thread_list_->Resume(target_, art::SuspendReason::kInternal)) {
+ LOG(ERROR) << "Failed to resume " << target_ << "!";
+ }
+ }
+ }
+
+ art::Thread* GetTargetThread() const {
+ return target_;
+ }
+
+ bool TimedOut() const {
+ return timeout_;
+ }
+
+ private:
+ art::ThreadList* thread_list_;
+ bool timeout_;
+ art::Thread* target_;
+};
+
jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
jthread target_jthread) {
// Loop since we need to bail out and try again if we would end up getting suspended while holding
@@ -839,29 +876,27 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
if (!GetAliveNativeThread(target_jthread, soa, &target, &err)) {
return err;
}
- art::ThreadState state = target->GetState();
- if (state == art::ThreadState::kStarting || target->IsStillStarting()) {
- return ERR(THREAD_NOT_ALIVE);
- } else {
- art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (target->GetUserCodeSuspendCount() != 0) {
- return ERR(THREAD_SUSPENDED);
- }
- }
}
- bool timeout = true;
- art::Thread* ret_target = art::Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
- target_jthread,
- /* request_suspension */ true,
- art::SuspendReason::kForUserCode,
- &timeout);
- if (ret_target == nullptr && !timeout) {
+ // Get the actual thread in a suspended state so we can change the user-code suspend count.
+ ScopedSuspendByPeer ssbp(target_jthread);
+ if (ssbp.GetTargetThread() == nullptr && !ssbp.TimedOut()) {
// TODO It would be good to get more information about why exactly the thread failed to
// suspend.
return ERR(INTERNAL);
- } else if (!timeout) {
- // we didn't time out and got a result.
- return OK;
+ } else if (!ssbp.TimedOut()) {
+ art::ThreadState state = ssbp.GetTargetThread()->GetState();
+ if (state == art::ThreadState::kStarting || ssbp.GetTargetThread()->IsStillStarting()) {
+ return ERR(THREAD_NOT_ALIVE);
+ }
+ // we didn't time out and got a result. Suspend the thread by usercode and return. It's
+ // already suspended internal so we don't need to do anything but increment the count.
+ art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() != 0) {
+ return ERR(THREAD_SUSPENDED);
+ }
+ bool res = ssbp.GetTargetThread()->ModifySuspendCount(
+ self, +1, nullptr, art::SuspendReason::kForUserCode);
+ return res ? OK : ERR(INTERNAL);
}
// We timed out. Just go around and try again.
} while (true);
@@ -870,6 +905,17 @@ jvmtiError ThreadUtil::SuspendOther(art::Thread* self,
jvmtiError ThreadUtil::SuspendSelf(art::Thread* self) {
CHECK(self == art::Thread::Current());
+ if (!self->CanBeSuspendedByUserCode()) {
+ // TODO This is really undesirable. As far as I can tell this is can only come about because of
+ // class-loads in the jit-threads (through either VMObjectAlloc or the ClassLoad/ClassPrepare
+ // events that we send). It's unlikely that anyone would be suspending themselves there since
+ // it's almost guaranteed to cause a deadlock but it is technically allowed. Ideally we'd want
+ // to put a CHECK here (or in the event-dispatch code) that we are only in this situation when
+ // sending the GC callbacks but the jit causing events means we cannot do this.
+ LOG(WARNING) << "Attempt to self-suspend on a thread without suspension enabled. Thread is "
+ << *self;
+ return ERR(INTERNAL);
+ }
{
art::MutexLock mu(self, *art::Locks::user_code_suspension_lock_);
art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
@@ -917,7 +963,6 @@ jvmtiError ThreadUtil::ResumeThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
return ERR(NULL_POINTER);
}
art::Thread* self = art::Thread::Current();
- art::Thread* target;
// Retry until we know we won't get suspended by user code while resuming something.
do {
SuspendCheck(self);
@@ -928,36 +973,37 @@ jvmtiError ThreadUtil::ResumeThread(jvmtiEnv* env ATTRIBUTE_UNUSED,
continue;
}
// From now on we know we cannot get suspended by user-code.
- {
- // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
- // have the 'suspend_lock' locked here.
- art::ScopedObjectAccess soa(self);
- art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
- jvmtiError err = ERR(INTERNAL);
- if (!GetAliveNativeThread(thread, soa, &target, &err)) {
- return err;
- } else if (target == self) {
- // We would have paused until we aren't suspended anymore due to the ScopedObjectAccess so
- // we can just return THREAD_NOT_SUSPENDED. Unfortunately we cannot do any real DCHECKs
- // about current state since it's all concurrent.
- return ERR(THREAD_NOT_SUSPENDED);
- }
- // The JVMTI spec requires us to return THREAD_NOT_SUSPENDED if it is alive but we really
- // cannot tell why resume failed.
- {
- art::MutexLock thread_suspend_count_mu(self, *art::Locks::thread_suspend_count_lock_);
- if (target->GetUserCodeSuspendCount() == 0) {
- return ERR(THREAD_NOT_SUSPENDED);
- }
- }
+ // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+ // have the 'suspend_lock' locked here.
+ art::ScopedObjectAccess soa(self);
+ if (thread == nullptr) {
+ // The thread is the current thread.
+ return ERR(THREAD_NOT_SUSPENDED);
+ } else if (!soa.Env()->IsInstanceOf(thread, art::WellKnownClasses::java_lang_Thread)) {
+ // Not a thread object.
+ return ERR(INVALID_THREAD);
+ } else if (self->GetPeer() == soa.Decode<art::mirror::Object>(thread)) {
+ // The thread is the current thread.
+ return ERR(THREAD_NOT_SUSPENDED);
+ }
+ ScopedSuspendByPeer ssbp(thread);
+ if (ssbp.TimedOut()) {
+ // Unknown error. Couldn't suspend thread!
+ return ERR(INTERNAL);
+ } else if (ssbp.GetTargetThread() == nullptr) {
+ // Thread must not be alive.
+ return ERR(THREAD_NOT_ALIVE);
}
- // It is okay that we don't have a thread_list_lock here since we know that the thread cannot
- // die since it is currently held suspended by a SuspendReason::kForUserCode suspend.
- DCHECK(target != self);
- if (!art::Runtime::Current()->GetThreadList()->Resume(target,
- art::SuspendReason::kForUserCode)) {
+ // We didn't time out and got a result. Check the thread is suspended by usercode, unsuspend it
+ // and return. It's already suspended internal so we don't need to do anything but decrement the
+ // count.
+ art::MutexLock thread_list_mu(self, *art::Locks::thread_suspend_count_lock_);
+ if (ssbp.GetTargetThread()->GetUserCodeSuspendCount() == 0) {
+ return ERR(THREAD_NOT_SUSPENDED);
+ } else if (!ssbp.GetTargetThread()->ModifySuspendCount(
+ self, -1, nullptr, art::SuspendReason::kForUserCode)) {
// TODO Give a better error.
- // This is most likely THREAD_NOT_SUSPENDED but we cannot really be sure.
+ // This should not really be possible and is probably some race.
return ERR(INTERNAL);
} else {
return OK;
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 97b315e85c..a15f7b88d8 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -31,11 +31,13 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/bit_memory_region.h"
#include "base/dumpable.h"
#include "base/file_utils.h"
#include "base/leb128.h"
#include "base/logging.h" // For InitLogging.
#include "base/mutex.h"
+#include "base/memory_region.h"
#include "base/memory_tool.h"
#include "base/os.h"
#include "base/scoped_flock.h"
@@ -187,10 +189,6 @@ bool PatchOat::GeneratePatch(
"Original and relocated image sizes differ: %zu vs %zu", original_size, relocated_size);
return false;
}
- if ((original_size % 4) != 0) {
- *error_msg = StringPrintf("Image size not multiple of 4: %zu", original_size);
- return false;
- }
if (original_size > UINT32_MAX) {
*error_msg = StringPrintf("Image too large: %zu" , original_size);
return false;
@@ -206,20 +204,58 @@ bool PatchOat::GeneratePatch(
return false;
}
+ const ImageHeader* image_header = reinterpret_cast<const ImageHeader*>(original.Begin());
+ if (image_header->GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
+ *error_msg = "Unexpected compressed image.";
+ return false;
+ }
+ if (image_header->IsAppImage()) {
+ *error_msg = "Unexpected app image.";
+ return false;
+ }
+ if (image_header->GetPointerSize() != PointerSize::k32 &&
+ image_header->GetPointerSize() != PointerSize::k64) {
+ *error_msg = "Unexpected pointer size.";
+ return false;
+ }
+ static_assert(sizeof(GcRoot<mirror::Object>) == sizeof(mirror::HeapReference<mirror::Object>),
+ "Expecting heap GC roots and references to have the same size.");
+ DCHECK_LE(sizeof(GcRoot<mirror::Object>), static_cast<size_t>(image_header->GetPointerSize()));
+
+ const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
+ kPageSize);
+ const size_t end_of_bitmap = image_bitmap_offset + image_header->GetImageBitmapSection().Size();
+ const ImageSection& relocation_section = image_header->GetImageRelocationsSection();
+ MemoryRegion relocations_data(original.Begin() + end_of_bitmap, relocation_section.Size());
+ size_t image_end = image_header->GetClassTableSection().End();
+ if (!IsAligned<sizeof(GcRoot<mirror::Object>)>(image_end)) {
+ *error_msg = StringPrintf("Unaligned image end: %zu", image_end);
+ return false;
+ }
+ size_t num_indexes = image_end / sizeof(GcRoot<mirror::Object>);
+ if (relocation_section.Size() != BitsToBytesRoundUp(num_indexes)) {
+ *error_msg = StringPrintf("Unexpected size of relocation section: %zu expected: %zu",
+ static_cast<size_t>(relocation_section.Size()),
+ BitsToBytesRoundUp(num_indexes));
+ return false;
+ }
+ BitMemoryRegion relocation_bitmap(relocations_data, /* bit_offset */ 0u, num_indexes);
+
// Output the SHA-256 digest of the original
output->resize(SHA256_DIGEST_LENGTH);
const uint8_t* original_bytes = original.Begin();
SHA256(original_bytes, original_size, output->data());
- // Output the list of offsets at which the original and patched images differ
- size_t last_diff_offset = 0;
+ // Check the list of offsets at which the original and patched images differ.
size_t diff_offset_count = 0;
const uint8_t* relocated_bytes = relocated.Begin();
- for (size_t offset = 0; offset < original_size; offset += 4) {
+ for (size_t index = 0; index != num_indexes; ++index) {
+ size_t offset = index * sizeof(GcRoot<mirror::Object>);
uint32_t original_value = *reinterpret_cast<const uint32_t*>(original_bytes + offset);
uint32_t relocated_value = *reinterpret_cast<const uint32_t*>(relocated_bytes + offset);
off_t diff = relocated_value - original_value;
if (diff == 0) {
+ CHECK(!relocation_bitmap.LoadBit(index));
continue;
} else if (diff != expected_diff) {
*error_msg =
@@ -230,13 +266,11 @@ bool PatchOat::GeneratePatch(
(intmax_t) diff);
return false;
}
-
- uint32_t offset_diff = offset - last_diff_offset;
- last_diff_offset = offset;
+ CHECK(relocation_bitmap.LoadBit(index));
diff_offset_count++;
-
- EncodeUnsignedLeb128(output, offset_diff);
}
+ size_t tail_bytes = original_size - image_end;
+ CHECK_EQ(memcmp(original_bytes + image_end, relocated_bytes + image_end, tail_bytes), 0);
if (diff_offset_count == 0) {
*error_msg = "Original and patched images are identical";
@@ -290,6 +324,14 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation(
rel_filename.c_str());
return false;
}
+ if (rel_size != SHA256_DIGEST_LENGTH) {
+ *error_msg = StringPrintf("Unexpected size of image relocation file %s: %" PRId64
+ ", expected %zu",
+ rel_filename.c_str(),
+ rel_size,
+ static_cast<size_t>(SHA256_DIGEST_LENGTH));
+ return false;
+ }
std::unique_ptr<uint8_t[]> rel(new uint8_t[rel_size]);
if (!rel_file->ReadFully(rel.get(), rel_size)) {
*error_msg = StringPrintf("Failed to read image relocation file %s", rel_filename.c_str());
@@ -309,10 +351,10 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation(
relocated_filename.c_str());
return false;
}
- if ((image_size % 4) != 0) {
+ if (static_cast<uint64_t>(image_size) < sizeof(ImageHeader)) {
*error_msg =
StringPrintf(
- "Relocated image file %s size not multiple of 4: %" PRId64,
+ "Relocated image file %s too small: %" PRId64,
relocated_filename.c_str(), image_size);
return false;
}
@@ -329,16 +371,39 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation(
return false;
}
- const uint8_t* original_image_digest = rel.get();
- if (rel_size < SHA256_DIGEST_LENGTH) {
- *error_msg = StringPrintf("Malformed image relocation file %s: too short",
- rel_filename.c_str());
+ const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
+ if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) {
+ *error_msg = StringPrintf("Unsuported compressed image file %s",
+ relocated_filename.c_str());
+ return false;
+ }
+ size_t image_end = image_header.GetClassTableSection().End();
+ if (image_end > static_cast<uint64_t>(image_size) || !IsAligned<4u>(image_end)) {
+ *error_msg = StringPrintf("Heap size too big or unaligned in image file %s: %zu",
+ relocated_filename.c_str(),
+ image_end);
+ return false;
+ }
+ size_t number_of_relocation_locations = image_end / 4u;
+ const ImageSection& relocation_section = image_header.GetImageRelocationsSection();
+ if (relocation_section.Size() != BitsToBytesRoundUp(number_of_relocation_locations)) {
+ *error_msg = StringPrintf("Unexpected size of relocation section in image file %s: %zu"
+ " expected: %zu",
+ relocated_filename.c_str(),
+ static_cast<size_t>(relocation_section.Size()),
+ BitsToBytesRoundUp(number_of_relocation_locations));
+ return false;
+ }
+ if (relocation_section.End() != image_size) {
+ *error_msg = StringPrintf("Relocation section does not end at file end in image file %s: %zu"
+ " expected: %" PRId64,
+ relocated_filename.c_str(),
+ static_cast<size_t>(relocation_section.End()),
+ image_size);
return false;
}
- const ImageHeader& image_header = *reinterpret_cast<const ImageHeader*>(image.get());
off_t expected_diff = image_header.GetPatchDelta();
-
if (expected_diff == 0) {
*error_msg = StringPrintf("Unsuported patch delta of zero in %s",
relocated_filename.c_str());
@@ -347,35 +412,14 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation(
// Relocated image is expected to differ from the original due to relocation.
// Unrelocate the image in memory to compensate.
- uint8_t* image_start = image.get();
- const uint8_t* rel_end = &rel[rel_size];
- const uint8_t* rel_ptr = &rel[SHA256_DIGEST_LENGTH];
- // The remaining .rel file consists of offsets at which relocation should've occurred.
- // For each offset, we "unrelocate" the image by subtracting the expected relocation
- // diff value (as specified in the image header).
- //
- // Each offset is encoded as a delta/diff relative to the previous offset. With the
- // very first offset being encoded relative to offset 0.
- // Deltas are encoded using little-endian 7 bits per byte encoding, with all bytes except
- // the last one having the highest bit set.
- uint32_t offset = 0;
- while (rel_ptr != rel_end) {
- uint32_t offset_delta = 0;
- if (DecodeUnsignedLeb128Checked(&rel_ptr, rel_end, &offset_delta)) {
- offset += offset_delta;
- if (static_cast<int64_t>(offset) + static_cast<int64_t>(sizeof(uint32_t)) > image_size) {
- *error_msg = StringPrintf("Relocation out of bounds in %s", relocated_filename.c_str());
- return false;
- }
- uint32_t *image_value = reinterpret_cast<uint32_t*>(image_start + offset);
+ MemoryRegion relocations(image.get() + relocation_section.Offset(), relocation_section.Size());
+ BitMemoryRegion relocation_bitmask(relocations,
+ /* bit_offset */ 0u,
+ number_of_relocation_locations);
+ for (size_t index = 0; index != number_of_relocation_locations; ++index) {
+ if (relocation_bitmask.LoadBit(index)) {
+ uint32_t* image_value = reinterpret_cast<uint32_t*>(image.get() + index * 4u);
*image_value -= expected_diff;
- } else {
- *error_msg =
- StringPrintf(
- "Malformed image relocation file %s: "
- "last byte has it's most significant bit set",
- rel_filename.c_str());
- return false;
}
}
@@ -384,7 +428,7 @@ static bool CheckImageIdenticalToOriginalExceptForRelocation(
// digest from relocation file.
uint8_t image_digest[SHA256_DIGEST_LENGTH];
SHA256(image.get(), image_size, image_digest);
- if (memcmp(image_digest, original_image_digest, SHA256_DIGEST_LENGTH) != 0) {
+ if (memcmp(image_digest, rel.get(), SHA256_DIGEST_LENGTH) != 0) {
*error_msg =
StringPrintf(
"Relocated image %s does not match the original %s after unrelocation",
@@ -886,7 +930,7 @@ class PatchOat::RelocatedPointerVisitor {
explicit RelocatedPointerVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
template <typename T>
- T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = 0) const {
+ T* operator()(T* ptr, void** dest_addr ATTRIBUTE_UNUSED = nullptr) const {
return patch_oat_->RelocatedAddressOfPointer(ptr);
}
@@ -1072,7 +1116,7 @@ void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) {
copy->CopyFrom(object, pointer_size);
// Just update the entry points if it looks like we should.
// TODO: sanity check all the pointers' values
- copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass()));
+ copy->SetDeclaringClass(RelocatedAddressOfPointer(object->GetDeclaringClass().Ptr()));
copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer(
object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size);
// No special handling for IMT conflict table since all pointers are moved by the same offset.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 8411982b30..6ec626591a 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -31,6 +31,7 @@ libart_cc_defaults {
"aot_class_linker.cc",
"art_field.cc",
"art_method.cc",
+ "backtrace_helper.cc",
"barrier.cc",
"base/mem_map_arena_pool.cc",
"base/mutex.cc",
diff --git a/runtime/arch/instruction_set_features_test.cc b/runtime/arch/instruction_set_features_test.cc
index 1e3275cc00..3a39a2a523 100644
--- a/runtime/arch/instruction_set_features_test.cc
+++ b/runtime/arch/instruction_set_features_test.cc
@@ -27,9 +27,10 @@
namespace art {
+#ifdef ART_TARGET_ANDROID
+
using android::base::StringPrintf;
-#ifdef ART_TARGET_ANDROID
#if defined(__aarch64__)
TEST(InstructionSetFeaturesTest, DISABLED_FeaturesFromSystemPropertyVariant) {
LOG(WARNING) << "Test disabled due to no CPP define for A53 erratum 835769";
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index 18595cf17a..ac22f07a34 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -49,23 +49,17 @@
namespace art {
template <ReadBarrierOption kReadBarrierOption>
-inline mirror::Class* ArtMethod::GetDeclaringClassUnchecked() {
+inline ObjPtr<mirror::Class> ArtMethod::GetDeclaringClassUnchecked() {
GcRootSource gc_root_source(this);
return declaring_class_.Read<kReadBarrierOption>(&gc_root_source);
}
template <ReadBarrierOption kReadBarrierOption>
-inline mirror::Class* ArtMethod::GetDeclaringClass() {
- mirror::Class* result = GetDeclaringClassUnchecked<kReadBarrierOption>();
+inline ObjPtr<mirror::Class> ArtMethod::GetDeclaringClass() {
+ ObjPtr<mirror::Class> result = GetDeclaringClassUnchecked<kReadBarrierOption>();
if (kIsDebugBuild) {
if (!IsRuntimeMethod()) {
CHECK(result != nullptr) << this;
- if (kCheckDeclaringClassState) {
- if (!(result->IsIdxLoaded() || result->IsErroneous())) {
- LOG(FATAL_WITHOUT_ABORT) << "Class status: " << result->GetStatus();
- LOG(FATAL) << result->PrettyClass();
- }
- }
} else {
CHECK(result == nullptr) << this;
}
@@ -77,8 +71,8 @@ inline void ArtMethod::SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_cla
declaring_class_ = GcRoot<mirror::Class>(new_declaring_class);
}
-inline bool ArtMethod::CASDeclaringClass(mirror::Class* expected_class,
- mirror::Class* desired_class) {
+inline bool ArtMethod::CASDeclaringClass(ObjPtr<mirror::Class> expected_class,
+ ObjPtr<mirror::Class> desired_class) {
GcRoot<mirror::Class> expected_root(expected_class);
GcRoot<mirror::Class> desired_root(desired_class);
auto atomic_root_class = reinterpret_cast<Atomic<GcRoot<mirror::Class>>*>(&declaring_class_);
@@ -94,16 +88,6 @@ inline uint16_t ArtMethod::GetMethodIndexDuringLinking() {
return method_index_;
}
-template <ReadBarrierOption kReadBarrierOption>
-inline uint32_t ArtMethod::GetDexMethodIndex() {
- if (kCheckDeclaringClassState) {
- CHECK(IsRuntimeMethod() ||
- GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
- GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
- }
- return GetDexMethodIndexUnchecked();
-}
-
inline ObjPtr<mirror::Class> ArtMethod::LookupResolvedClassFromTypeIndex(dex::TypeIndex type_idx) {
ScopedAssertNoThreadSuspension ants(__FUNCTION__);
ObjPtr<mirror::Class> type =
@@ -127,14 +111,14 @@ inline bool ArtMethod::CheckIncompatibleClassChange(InvokeType type) {
case kVirtual: {
// We have an error if we are direct or a non-copied (i.e. not part of a real class) interface
// method.
- mirror::Class* methods_class = GetDeclaringClass();
+ ObjPtr<mirror::Class> methods_class = GetDeclaringClass();
return IsDirect() || (methods_class->IsInterface() && !IsCopied());
}
case kSuper:
// Constructors and static methods are called with invoke-direct.
return IsConstructor() || IsStatic();
case kInterface: {
- mirror::Class* methods_class = GetDeclaringClass();
+ ObjPtr<mirror::Class> methods_class = GetDeclaringClass();
return IsDirect() || !(methods_class->IsInterface() || methods_class->IsObjectClass());
}
default:
@@ -196,14 +180,7 @@ inline const char* ArtMethod::GetShorty() {
inline const char* ArtMethod::GetShorty(uint32_t* out_length) {
DCHECK(!IsProxyMethod());
const DexFile* dex_file = GetDexFile();
- // Don't do a read barrier in the DCHECK() inside GetDexMethodIndex() as GetShorty()
- // can be called when the declaring class is about to be unloaded and cannot be added
- // to the mark stack (subsequent GC assertion would fail).
- // It is safe to avoid the read barrier as the ArtMethod is constructed with a declaring
- // Class already satisfying the DCHECK() inside GetDexMethodIndex(), so even if that copy
- // of declaring class becomes a from-space object, it shall satisfy the DCHECK().
- return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex<kWithoutReadBarrier>()),
- out_length);
+ return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length);
}
inline const Signature ArtMethod::GetSignature() {
@@ -329,7 +306,7 @@ inline mirror::ClassLoader* ArtMethod::GetClassLoader() {
template <ReadBarrierOption kReadBarrierOption>
inline mirror::DexCache* ArtMethod::GetDexCache() {
- if (LIKELY(!IsObsolete<kReadBarrierOption>())) {
+ if (LIKELY(!IsObsolete())) {
ObjPtr<mirror::Class> klass = GetDeclaringClass<kReadBarrierOption>();
return klass->GetDexCache<kDefaultVerifyFlags, kReadBarrierOption>();
} else {
@@ -382,12 +359,12 @@ inline ObjPtr<mirror::Class> ArtMethod::ResolveReturnType() {
template <ReadBarrierOption kReadBarrierOption>
inline bool ArtMethod::HasSingleImplementation() {
- if (IsFinal<kReadBarrierOption>() || GetDeclaringClass<kReadBarrierOption>()->IsFinal()) {
+ if (IsFinal() || GetDeclaringClass<kReadBarrierOption>()->IsFinal()) {
// We don't set kAccSingleImplementation for these cases since intrinsic
// can use the flag also.
return true;
}
- return (GetAccessFlags<kReadBarrierOption>() & kAccSingleImplementation) != 0;
+ return (GetAccessFlags() & kAccSingleImplementation) != 0;
}
inline HiddenApiAccessFlags::ApiList ArtMethod::GetHiddenApiAccessFlags()
@@ -509,7 +486,7 @@ template<ReadBarrierOption kReadBarrierOption, typename RootVisitorType>
void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
if (LIKELY(!declaring_class_.IsNull())) {
visitor.VisitRoot(declaring_class_.AddressWithoutBarrier());
- mirror::Class* klass = declaring_class_.Read<kReadBarrierOption>();
+ ObjPtr<mirror::Class> klass = declaring_class_.Read<kReadBarrierOption>();
if (UNLIKELY(klass->IsProxyClass())) {
// For normal methods, dex cache shortcuts will be visited through the declaring class.
// However, for proxies we need to keep the interface method alive, so we visit its roots.
@@ -522,16 +499,16 @@ void ArtMethod::VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) {
template <typename Visitor>
inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor) {
- mirror::Class* old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
- mirror::Class* new_class = visitor(old_class);
+ ObjPtr<mirror::Class> old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
+ ObjPtr<mirror::Class> new_class = visitor(old_class.Ptr());
if (old_class != new_class) {
SetDeclaringClass(new_class);
}
}
-template <ReadBarrierOption kReadBarrierOption, typename Visitor>
+template <typename Visitor>
inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size) {
- if (IsNative<kReadBarrierOption>()) {
+ if (IsNative()) {
const void* old_native_code = GetEntryPointFromJniPtrSize(pointer_size);
const void* new_native_code = visitor(old_native_code);
if (old_native_code != new_native_code) {
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 80b6921c8a..68ccfee089 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -59,8 +59,6 @@ extern "C" void art_quick_invoke_stub(ArtMethod*, uint32_t*, uint32_t, Thread*,
extern "C" void art_quick_invoke_static_stub(ArtMethod*, uint32_t*, uint32_t, Thread*, JValue*,
const char*);
-DEFINE_RUNTIME_DEBUG_FLAG(ArtMethod, kCheckDeclaringClassState);
-
// Enforce that we he have the right index for runtime methods.
static_assert(ArtMethod::kRuntimeMethodDexMethodIndex == dex::kDexNoIndex,
"Wrong runtime-method dex method index");
@@ -90,18 +88,13 @@ ArtMethod* ArtMethod::GetNonObsoleteMethod() {
}
}
-template <ReadBarrierOption kReadBarrierOption>
ArtMethod* ArtMethod::GetSingleImplementation(PointerSize pointer_size) {
- if (!IsAbstract<kReadBarrierOption>()) {
+ if (!IsAbstract()) {
// A non-abstract's single implementation is itself.
return this;
}
return reinterpret_cast<ArtMethod*>(GetDataPtrSize(pointer_size));
}
-template ArtMethod* ArtMethod::GetSingleImplementation<ReadBarrierOption::kWithReadBarrier>(
- PointerSize pointer_size);
-template ArtMethod* ArtMethod::GetSingleImplementation<ReadBarrierOption::kWithoutReadBarrier>(
- PointerSize pointer_size);
ArtMethod* ArtMethod::FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method) {
@@ -801,25 +794,4 @@ ALWAYS_INLINE static inline void DoGetAccessFlagsHelper(ArtMethod* method)
method->GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
}
-template <ReadBarrierOption kReadBarrierOption> void ArtMethod::GetAccessFlagsDCheck() {
- if (kCheckDeclaringClassState) {
- Thread* self = Thread::Current();
- if (!Locks::mutator_lock_->IsSharedHeld(self)) {
- if (self->IsThreadSuspensionAllowable()) {
- ScopedObjectAccess soa(self);
- CHECK(IsRuntimeMethod() ||
- GetDeclaringClass<kReadBarrierOption>()->IsIdxLoaded() ||
- GetDeclaringClass<kReadBarrierOption>()->IsErroneous());
- }
- } else {
- // We cannot use SOA in this case. We might be holding the lock, but may not be in the
- // runnable state (e.g., during GC).
- Locks::mutator_lock_->AssertSharedHeld(self);
- DoGetAccessFlagsHelper<kReadBarrierOption>(this);
- }
- }
-}
-template void ArtMethod::GetAccessFlagsDCheck<ReadBarrierOption::kWithReadBarrier>();
-template void ArtMethod::GetAccessFlagsDCheck<ReadBarrierOption::kWithoutReadBarrier>();
-
} // namespace art
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 09debb0c50..ce08cb0bea 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -87,10 +87,10 @@ class ArtMethod FINAL {
REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ ALWAYS_INLINE ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
+ ALWAYS_INLINE ObjPtr<mirror::Class> GetDeclaringClassUnchecked()
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::CompressedReference<mirror::Object>* GetDeclaringClassAddressWithoutBarrier() {
@@ -100,20 +100,14 @@ class ArtMethod FINAL {
void SetDeclaringClass(ObjPtr<mirror::Class> new_declaring_class)
REQUIRES_SHARED(Locks::mutator_lock_);
- bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class)
+ bool CASDeclaringClass(ObjPtr<mirror::Class> expected_class, ObjPtr<mirror::Class> desired_class)
REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
}
- // Note: GetAccessFlags acquires the mutator lock in debug mode to check that it is not called for
- // a proxy method.
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
uint32_t GetAccessFlags() {
- if (kCheckDeclaringClassState) {
- GetAccessFlagsDCheck<kReadBarrierOption>();
- }
return access_flags_.load(std::memory_order_relaxed);
}
@@ -172,14 +166,12 @@ class ArtMethod FINAL {
return (GetAccessFlags() & synchonized) != 0;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsFinal() {
- return (GetAccessFlags<kReadBarrierOption>() & kAccFinal) != 0;
+ return (GetAccessFlags() & kAccFinal) != 0;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsIntrinsic() {
- return (GetAccessFlags<kReadBarrierOption>() & kAccIntrinsic) != 0;
+ return (GetAccessFlags() & kAccIntrinsic) != 0;
}
ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -241,25 +233,22 @@ class ArtMethod FINAL {
}
// This is set by the class linker.
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsDefault() {
static_assert((kAccDefault & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
"kAccDefault conflicts with intrinsic modifier");
- return (GetAccessFlags<kReadBarrierOption>() & kAccDefault) != 0;
+ return (GetAccessFlags() & kAccDefault) != 0;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsObsolete() {
- return (GetAccessFlags<kReadBarrierOption>() & kAccObsoleteMethod) != 0;
+ return (GetAccessFlags() & kAccObsoleteMethod) != 0;
}
void SetIsObsolete() {
AddAccessFlags(kAccObsoleteMethod);
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsNative() {
- return (GetAccessFlags<kReadBarrierOption>() & kAccNative) != 0;
+ return (GetAccessFlags() & kAccNative) != 0;
}
// Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative.
@@ -280,9 +269,8 @@ class ArtMethod FINAL {
return (GetAccessFlags() & mask) == mask;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
bool IsAbstract() {
- return (GetAccessFlags<kReadBarrierOption>() & kAccAbstract) != 0;
+ return (GetAccessFlags() & kAccAbstract) != 0;
}
bool IsSynthetic() {
@@ -305,7 +293,7 @@ class ArtMethod FINAL {
void SetSkipAccessChecks() {
// SkipAccessChecks() is applicable only to non-native methods.
- DCHECK(!IsNative<kWithoutReadBarrier>());
+ DCHECK(!IsNative());
AddAccessFlags(kAccSkipAccessChecks);
}
@@ -317,13 +305,12 @@ class ArtMethod FINAL {
return (GetAccessFlags() & kAccPreviouslyWarm) != 0;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
void SetPreviouslyWarm() {
- if (IsIntrinsic<kReadBarrierOption>()) {
+ if (IsIntrinsic()) {
// kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
return;
}
- AddAccessFlags<kReadBarrierOption>(kAccPreviouslyWarm);
+ AddAccessFlags(kAccPreviouslyWarm);
}
// Should this method be run in the interpreter and count locks (e.g., failed structured-
@@ -384,21 +371,19 @@ class ArtMethod FINAL {
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
- ALWAYS_INLINE uint32_t GetDexMethodIndexUnchecked() {
+ ALWAYS_INLINE uint32_t GetDexMethodIndex() {
return dex_method_index_;
}
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
dex_method_index_ = new_idx;
}
- // Lookup the Class* from the type index into this method's dex cache.
+ // Lookup the Class from the type index into this method's dex cache.
ObjPtr<mirror::Class> LookupResolvedClassFromTypeIndex(dex::TypeIndex type_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Resolve the Class* from the type index into this method's dex cache.
+ // Resolve the Class from the type index into this method's dex cache.
ObjPtr<mirror::Class> ResolveClassFromTypeIndex(dex::TypeIndex type_idx)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -472,11 +457,7 @@ class ArtMethod FINAL {
}
ProfilingInfo* GetProfilingInfo(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) {
- // Don't do a read barrier in the DCHECK() inside GetAccessFlags() called by IsNative(),
- // as GetProfilingInfo is called in places where the declaring class is treated as a weak
- // reference (accessing it with a read barrier would either prevent unloading the class,
- // or crash the runtime if the GC wants to unload it).
- if (UNLIKELY(IsNative<kWithoutReadBarrier>()) || UNLIKELY(IsProxyMethod())) {
+ if (UNLIKELY(IsNative()) || UNLIKELY(IsProxyMethod())) {
return nullptr;
}
return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
@@ -513,15 +494,12 @@ class ArtMethod FINAL {
ArtMethod* GetCanonicalMethod(PointerSize pointer_size = kRuntimePointerSize)
REQUIRES_SHARED(Locks::mutator_lock_);
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ArtMethod* GetSingleImplementation(PointerSize pointer_size)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetSingleImplementation(PointerSize pointer_size);
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size) {
- DCHECK(!IsNative<kReadBarrierOption>());
+ DCHECK(!IsNative());
// Non-abstract method's single implementation is just itself.
- DCHECK(IsAbstract<kReadBarrierOption>());
+ DCHECK(IsAbstract());
SetDataPtrSize(method, pointer_size);
}
@@ -713,7 +691,7 @@ class ArtMethod FINAL {
REQUIRES_SHARED(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
+ template <typename Visitor>
ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size);
// Visit the individual members of an ArtMethod. Used by imgdiag.
@@ -833,8 +811,6 @@ class ArtMethod FINAL {
}
}
- template <ReadBarrierOption kReadBarrierOption> void GetAccessFlagsDCheck();
-
static inline bool IsValidIntrinsicUpdate(uint32_t modifier) {
return (((modifier & kAccIntrinsic) == kAccIntrinsic) &&
(((modifier & ~(kAccIntrinsic | kAccIntrinsicBits)) == 0)));
@@ -845,9 +821,8 @@ class ArtMethod FINAL {
}
// This setter guarantees atomicity.
- template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
void AddAccessFlags(uint32_t flag) {
- DCHECK(!IsIntrinsic<kReadBarrierOption>() ||
+ DCHECK(!IsIntrinsic() ||
!OverlapsIntrinsicBits(flag) ||
IsValidIntrinsicUpdate(flag));
uint32_t old_access_flags;
diff --git a/runtime/backtrace_helper.cc b/runtime/backtrace_helper.cc
new file mode 100644
index 0000000000..21a0568033
--- /dev/null
+++ b/runtime/backtrace_helper.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "backtrace_helper.h"
+
+#if defined(__linux__)
+
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
+#include <unistd.h>
+#include <sys/types.h>
+
+#include "thread-inl.h"
+
+#else
+
+// For UNUSED
+#include "base/macros.h"
+
+#endif
+
+namespace art {
+
+// We only really support libbacktrace on linux which is unfortunate but since this is only for
+// gcstress this isn't a huge deal.
+#if defined(__linux__)
+
+static const char* kBacktraceCollectorTlsKey = "BacktraceCollectorTlsKey";
+
+struct BacktraceMapHolder : public TLSData {
+ BacktraceMapHolder() : map_(BacktraceMap::Create(getpid())) {}
+
+ std::unique_ptr<BacktraceMap> map_;
+};
+
+static BacktraceMap* GetMap(Thread* self) {
+ BacktraceMapHolder* map_holder =
+ reinterpret_cast<BacktraceMapHolder*>(self->GetCustomTLS(kBacktraceCollectorTlsKey));
+ if (map_holder == nullptr) {
+ map_holder = new BacktraceMapHolder;
+ // We don't care about the function names. Turning this off makes everything significantly
+ // faster.
+ map_holder->map_->SetResolveNames(false);
+ // Only created and queried on Thread::Current so no sync needed.
+ self->SetCustomTLS(kBacktraceCollectorTlsKey, map_holder);
+ }
+
+ return map_holder->map_.get();
+}
+
+void BacktraceCollector::Collect() {
+ std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
+ BACKTRACE_CURRENT_THREAD,
+ GetMap(Thread::Current())));
+ backtrace->SetSkipFrames(true);
+ if (!backtrace->Unwind(skip_count_, nullptr)) {
+ return;
+ }
+ for (Backtrace::const_iterator it = backtrace->begin();
+ max_depth_ > num_frames_ && it != backtrace->end();
+ ++it) {
+ out_frames_[num_frames_++] = static_cast<uintptr_t>(it->pc);
+ }
+}
+
+#else
+
+#pragma clang diagnostic push
+#pragma clang diagnostic warning "-W#warnings"
+#warning "Backtrace collector is not implemented. GCStress cannot be used."
+#pragma clang diagnostic pop
+
+// We only have an implementation for linux. On other plaforms just return nothing. This is not
+// really correct but we only use this for hashing and gcstress so it's not too big a deal.
+void BacktraceCollector::Collect() {
+ UNUSED(skip_count_);
+ UNUSED(out_frames_);
+ UNUSED(max_depth_);
+ num_frames_ = 0;
+}
+
+#endif
+
+} // namespace art
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
index ace118c50b..8eda3fa0a1 100644
--- a/runtime/backtrace_helper.h
+++ b/runtime/backtrace_helper.h
@@ -17,11 +17,12 @@
#ifndef ART_RUNTIME_BACKTRACE_HELPER_H_
#define ART_RUNTIME_BACKTRACE_HELPER_H_
-#include <unwind.h>
+#include <stddef.h>
+#include <stdint.h>
namespace art {
-// Based on debug malloc logic from libc/bionic/debug_stacktrace.cpp.
+// Using libbacktrace
class BacktraceCollector {
public:
BacktraceCollector(uintptr_t* out_frames, size_t max_depth, size_t skip_count)
@@ -32,25 +33,9 @@ class BacktraceCollector {
}
// Collect the backtrace, do not call more than once.
- void Collect() {
- _Unwind_Backtrace(&Callback, this);
- }
+ void Collect();
private:
- static _Unwind_Reason_Code Callback(_Unwind_Context* context, void* arg) {
- auto* const state = reinterpret_cast<BacktraceCollector*>(arg);
- const uintptr_t ip = _Unwind_GetIP(context);
- // The first stack frame is get_backtrace itself. Skip it.
- if (ip != 0 && state->skip_count_ > 0) {
- --state->skip_count_;
- return _URC_NO_REASON;
- }
- // ip may be off for ARM but it shouldn't matter since we only use it for hashing.
- state->out_frames_[state->num_frames_] = ip;
- state->num_frames_++;
- return state->num_frames_ >= state->max_depth_ ? _URC_END_OF_STACK : _URC_NO_REASON;
- }
-
uintptr_t* const out_frames_ = nullptr;
size_t num_frames_ = 0u;
const size_t max_depth_ = 0u;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index da286d7e41..dd58d75a32 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -42,6 +42,7 @@ Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
+Mutex* Locks::custom_tls_lock_ = nullptr;
Mutex* Locks::deoptimization_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
@@ -1057,6 +1058,7 @@ void Locks::Init() {
DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
+ DCHECK(custom_tls_lock_ != nullptr);
DCHECK(deoptimization_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
DCHECK(oat_file_manager_lock_ != nullptr);
@@ -1220,6 +1222,10 @@ void Locks::Init() {
DCHECK(jni_function_table_lock_ == nullptr);
jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
+ UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
+ DCHECK(custom_tls_lock_ == nullptr);
+ custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
DCHECK(native_debug_interface_lock_ == nullptr);
native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index ced0cb1d91..af2e7b2763 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -78,6 +78,7 @@ enum LockLevel : uint8_t {
kRosAllocBulkFreeLock,
kTaggingLockLevel,
kTransactionLogLock,
+ kCustomTlsLock,
kJniFunctionTableLock,
kJniWeakGlobalsLock,
kJniGlobalsLock,
@@ -738,14 +739,20 @@ class Locks {
// Guard accesses to the JNI function table override.
static Mutex* jni_function_table_lock_ ACQUIRED_AFTER(jni_weak_globals_lock_);
+ // Guard accesses to the Thread::custom_tls_. We use this to allow the TLS of other threads to be
+ // read (the reader must hold the ThreadListLock or have some other way of ensuring the thread
+ // will not die in that case though). This is useful for (eg) the implementation of
+ // GetThreadLocalStorage.
+ static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+
// When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
// actually only encodes the mutex being below jni_function_table_lock_ although having
// kGenericBottomLock level is lower than this.
- #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::jni_function_table_lock_)
+ #define BOTTOM_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(art::Locks::custom_tls_lock_)
// Have an exclusive aborting thread.
- static Mutex* abort_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
+ static Mutex* abort_lock_ ACQUIRED_AFTER(custom_tls_lock_);
// Allow mutual exclusion when manipulating Thread::suspend_count_.
// TODO: Does the trade-off of a per-thread lock make sense?
diff --git a/runtime/cha.cc b/runtime/cha.cc
index f2e6a7314e..ccbe066ed6 100644
--- a/runtime/cha.cc
+++ b/runtime/cha.cc
@@ -142,12 +142,12 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
ArtMethod* super_method = super_it->
GetVTableEntry<kDefaultVerifyFlags, kWithoutReadBarrier>(vtbl_index, pointer_size);
- if (super_method->IsAbstract<kWithoutReadBarrier>() &&
+ if (super_method->IsAbstract() &&
super_method->HasSingleImplementation<kWithoutReadBarrier>() &&
- super_method->GetSingleImplementation<kWithoutReadBarrier>(pointer_size) == method) {
+ super_method->GetSingleImplementation(pointer_size) == method) {
// Do like there was no single implementation defined previously
// for this method of the superclass.
- super_method->SetSingleImplementation<kWithoutReadBarrier>(nullptr, pointer_size);
+ super_method->SetSingleImplementation(nullptr, pointer_size);
} else {
// No related SingleImplementations could possibly be found any further.
DCHECK(!super_method->HasSingleImplementation<kWithoutReadBarrier>());
@@ -168,11 +168,10 @@ void ClassHierarchyAnalysis::ResetSingleImplementationInHierarchy(ObjPtr<mirror:
++j) {
ArtMethod* method = interface->GetVirtualMethod(j, pointer_size);
if (method->HasSingleImplementation<kWithoutReadBarrier>() &&
- alloc->ContainsUnsafe(
- method->GetSingleImplementation<kWithoutReadBarrier>(pointer_size)) &&
- !method->IsDefault<kWithoutReadBarrier>()) {
+ alloc->ContainsUnsafe(method->GetSingleImplementation(pointer_size)) &&
+ !method->IsDefault()) {
// Do like there was no single implementation defined previously for this method.
- method->SetSingleImplementation<kWithoutReadBarrier>(nullptr, pointer_size);
+ method->SetSingleImplementation(nullptr, pointer_size);
}
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 966d636f62..f80d34ca2f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -7150,7 +7150,7 @@ ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtab
vtable->SetElementPtrSize(i, translated_method, pointer_size);
}
}
- klass_->SetVTable(vtable.Ptr());
+ klass_->SetVTable(vtable);
return vtable;
}
@@ -8760,7 +8760,7 @@ void ClassLinker::VisitClassLoaders(ClassLoaderVisitor* visitor) const {
ObjPtr<mirror::ClassLoader> class_loader = ObjPtr<mirror::ClassLoader>::DownCast(
self->DecodeJObject(data.weak_root));
if (class_loader != nullptr) {
- visitor->Visit(class_loader.Ptr());
+ visitor->Visit(class_loader);
}
}
}
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index d21973ec7e..234b66a862 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -191,12 +191,30 @@ class CheckJniAbortCatcher {
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
+#define TEST_DISABLED_FOR_ARM() \
+ if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) { \
+ printf("WARNING: TEST DISABLED FOR ARM\n"); \
+ return; \
+ }
+
+#define TEST_DISABLED_FOR_ARM64() \
+ if (kRuntimeISA == InstructionSet::kArm64) { \
+ printf("WARNING: TEST DISABLED FOR ARM64\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_MIPS() \
if (kRuntimeISA == InstructionSet::kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
return; \
}
+#define TEST_DISABLED_FOR_MIPS64() \
+ if (kRuntimeISA == InstructionSet::kMips64) { \
+ printf("WARNING: TEST DISABLED FOR MIPS64\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_X86() \
if (kRuntimeISA == InstructionSet::kX86) { \
printf("WARNING: TEST DISABLED FOR X86\n"); \
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index f75f47c075..e607b31e68 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1484,7 +1484,7 @@ void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t d
if (m == nullptr) {
memset(location, 0, sizeof(*location));
} else {
- mirror::Class* c = m->GetDeclaringClass();
+ ObjPtr<mirror::Class> c = m->GetDeclaringClass();
location->type_tag = GetTypeTag(c);
location->class_id = gRegistry->AddRefType(c);
// The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same.
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index 9358cbe5a9..b87bf8db1f 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -728,7 +728,7 @@ ObjPtr<mirror::Object> CreateAnnotationMember(const ClassData& klass,
ObjPtr<mirror::Class> annotation_member_class =
WellKnownClasses::ToClass(WellKnownClasses::libcore_reflect_AnnotationMember);
Handle<mirror::Object> new_member(hs.NewHandle(annotation_member_class->AllocObject(self)));
- mirror::Method* method_obj_ptr;
+ ObjPtr<mirror::Method> method_obj_ptr;
DCHECK(!Runtime::Current()->IsActiveTransaction());
if (pointer_size == PointerSize::k64) {
method_obj_ptr = mirror::Method::CreateFromArtMethod<PointerSize::k64, false>(
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 40ef10f904..0ed26d37c0 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -255,7 +255,7 @@ inline mirror::Class* CheckArrayAlloc(dex::TypeIndex type_idx,
CHECK(klass->IsArrayClass()) << klass->PrettyClass();
}
if (kAccessCheck) {
- mirror::Class* referrer = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> referrer = method->GetDeclaringClass();
if (UNLIKELY(!referrer->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referrer, klass);
*slow_path = true;
@@ -366,7 +366,7 @@ inline ArtField* FindFieldFromCode(uint32_t field_idx,
ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, referrer);
return nullptr;
}
- mirror::Class* referring_class = referrer->GetDeclaringClass();
+ ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
resolved_field,
referrer->GetDexCache(),
@@ -721,7 +721,7 @@ inline ObjPtr<mirror::Class> ResolveVerifyAndClinit(dex::TypeIndex type_idx,
return nullptr; // Failure - Indicate to caller to deliver exception
}
// Perform access check if necessary.
- mirror::Class* referring_class = referrer->GetDeclaringClass();
+ ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
if (verify_access && UNLIKELY(!referring_class->CanAccess(klass))) {
ThrowIllegalAccessErrorClass(referring_class, klass);
return nullptr; // Failure - Indicate to caller to deliver exception
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 0c61965908..a5ebce5f5b 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -201,7 +201,7 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method,
DCHECK(current_code != nullptr);
DCHECK(current_code->IsOptimized());
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
- CodeInfo code_info(current_code);
+ CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly);
MethodInfo method_info = current_code->GetOptimizedMethodInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
DCHECK(stack_map.IsValid());
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 505e183ced..be4e4e613c 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -343,7 +343,7 @@ class QuickArgumentVisitor {
uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
if (current_code->IsOptimized()) {
- CodeInfo code_info(current_code);
+ CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
DCHECK(stack_map.IsValid());
BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
@@ -2146,7 +2146,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceHandleScope((**sp)->GetDeclaringClass());
+ sm_.AdvanceHandleScope((**sp)->GetDeclaringClass().Ptr());
} // else "this" reference is already handled by QuickArgumentVisitor.
}
}
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 671079b128..1ab0b0e652 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -70,7 +70,7 @@ static mirror::Class* SafeGetDeclaringClass(ArtMethod* method)
CHECK_NE(-1, rc);
if (kVerifySafeImpls) {
- mirror::Class* actual_class = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
+ ObjPtr<mirror::Class> actual_class = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
CHECK_EQ(actual_class, cls.AsMirrorPtr());
}
@@ -115,7 +115,7 @@ static bool SafeVerifyClassClass(mirror::Class* cls) REQUIRES_SHARED(Locks::muta
static mirror::Class* SafeGetDeclaringClass(ArtMethod* method_obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- return method_obj->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
+ return method_obj->GetDeclaringClassUnchecked<kWithoutReadBarrier>().Ptr();
}
static bool SafeVerifyClassClass(mirror::Class* cls) REQUIRES_SHARED(Locks::mutator_lock_) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index cbce940337..826f382f72 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -618,10 +618,21 @@ class ImageSpaceLoader {
const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
kPageSize);
const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
- if (end_of_bitmap != image_file_size) {
+ const ImageSection& relocations_section = image_header->GetImageRelocationsSection();
+ if (relocations_section.Offset() != bitmap_section.Offset() + bitmap_section.Size()) {
*error_msg = StringPrintf(
- "Image file size does not equal end of bitmap: size=%" PRIu64 " vs. %zu.", image_file_size,
- end_of_bitmap);
+ "Relocations do not start immediately after bitmap: %u vs. %u + %u.",
+ relocations_section.Offset(),
+ bitmap_section.Offset(),
+ bitmap_section.Size());
+ return nullptr;
+ }
+ const size_t end_of_relocations = end_of_bitmap + relocations_section.Size();
+ if (end_of_relocations != image_file_size) {
+ *error_msg = StringPrintf(
+ "Image file size does not equal end of relocations: size=%" PRIu64 " vs. %zu.",
+ image_file_size,
+ end_of_relocations);
return nullptr;
}
@@ -1163,7 +1174,7 @@ class ImageSpaceLoader {
if (fixup_heap_objects_) {
method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this));
}
- method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_);
+ method->UpdateEntrypoints(ForwardCodeAdapter(this), pointer_size_);
}
}
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index c9e8426340..dc42cfa4fe 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -718,7 +718,7 @@ class Hprof : public SingleRootVisitor {
source_file = "";
}
__ AddStringId(LookupStringId(source_file));
- auto class_result = classes_.find(method->GetDeclaringClass());
+ auto class_result = classes_.find(method->GetDeclaringClass().Ptr());
CHECK(class_result != classes_.end());
__ AddU4(class_result->second);
__ AddU4(frame->ComputeLineNumber());
diff --git a/runtime/image.cc b/runtime/image.cc
index 7819c0bc00..028c515c91 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -26,7 +26,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '2', '\0' }; // Boot image live objects.
+const uint8_t ImageHeader::kImageVersion[] = { '0', '6', '3', '\0' }; // Image relocations.
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
@@ -135,11 +135,6 @@ ArtMethod* ImageHeader::GetImageMethod(ImageMethod index) const {
return reinterpret_cast<ArtMethod*>(image_methods_[index]);
}
-void ImageHeader::SetImageMethod(ImageMethod index, ArtMethod* method) {
- CHECK_LT(static_cast<size_t>(index), kImageMethodsCount);
- image_methods_[index] = reinterpret_cast<uint64_t>(method);
-}
-
std::ostream& operator<<(std::ostream& os, const ImageSection& section) {
return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End();
}
diff --git a/runtime/image.h b/runtime/image.h
index c1cde0a74a..af092ad3fe 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -230,6 +230,7 @@ class PACKED(4) ImageHeader {
kSectionInternedStrings,
kSectionClassTable,
kSectionImageBitmap,
+ kSectionImageRelocations,
kSectionCount, // Number of elements in enum.
};
@@ -240,7 +241,6 @@ class PACKED(4) ImageHeader {
}
ArtMethod* GetImageMethod(ImageMethod index) const;
- void SetImageMethod(ImageMethod index, ArtMethod* method);
const ImageSection& GetImageSection(ImageSections index) const {
DCHECK_LT(static_cast<size_t>(index), kSectionCount);
@@ -287,6 +287,10 @@ class PACKED(4) ImageHeader {
return GetImageSection(kSectionImageBitmap);
}
+ const ImageSection& GetImageRelocationsSection() const {
+ return GetImageSection(kSectionImageRelocations);
+ }
+
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ObjPtr<mirror::Object> GetImageRoot(ImageRoot image_root) const
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index c9127d6987..5ba3e189ba 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -39,7 +39,7 @@ class ImageSpace;
enum VisitRootFlags : uint8_t;
namespace linker {
-class OatWriter;
+class ImageWriter;
} // namespace linker
namespace mirror {
@@ -227,6 +227,7 @@ class InternTable {
// modifying the zygote intern table. The back of table is modified when strings are interned.
std::vector<UnorderedSet> tables_;
+ friend class linker::ImageWriter;
ART_FRIEND_TEST(InternTableTest, CrossHash);
};
@@ -286,6 +287,7 @@ class InternTable {
// Weak root state, used for concurrent system weak processing and more.
gc::WeakRootState weak_root_state_ GUARDED_BY(Locks::intern_table_lock_);
+ friend class linker::ImageWriter;
friend class Transaction;
ART_FRIEND_TEST(InternTableTest, CrossHash);
DISALLOW_COPY_AND_ASSIGN(InternTable);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index eeb35156b5..9aa05561f0 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -413,9 +413,6 @@ uint8_t* JitCodeCache::CommitCode(Thread* self,
uint8_t* stack_map,
uint8_t* method_info,
uint8_t* roots_data,
- size_t frame_size_in_bytes,
- size_t core_spill_mask,
- size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
size_t data_size,
@@ -428,9 +425,6 @@ uint8_t* JitCodeCache::CommitCode(Thread* self,
stack_map,
method_info,
roots_data,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
code,
code_size,
data_size,
@@ -446,9 +440,6 @@ uint8_t* JitCodeCache::CommitCode(Thread* self,
stack_map,
method_info,
roots_data,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
code,
code_size,
data_size,
@@ -744,11 +735,7 @@ void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic,
static void ClearMethodCounter(ArtMethod* method, bool was_warm) {
if (was_warm) {
- // Don't do any read barrier, as the declaring class of `method` may
- // be in the process of being GC'ed (reading the declaring class is done
- // when DCHECKing the declaring class is resolved, which we know it is
- // at this point).
- method->SetPreviouslyWarm<kWithoutReadBarrier>();
+ method->SetPreviouslyWarm();
}
// We reset the counter to 1 so that the profile knows that the method was executed at least once.
// This is required for layout purposes.
@@ -763,9 +750,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
uint8_t* stack_map,
uint8_t* method_info,
uint8_t* roots_data,
- size_t frame_size_in_bytes,
- size_t core_spill_mask,
- size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
size_t data_size,
@@ -800,9 +784,6 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
new (method_header) OatQuickMethodHeader(
(stack_map != nullptr) ? code_ptr - stack_map : 0u,
(method_info != nullptr) ? code_ptr - method_info : 0u,
- frame_size_in_bytes,
- core_spill_mask,
- fp_spill_mask,
code_size);
// Flush caches before we remove write permission because some ARMv8 Qualcomm kernels may
// trigger a segfault if a page fault occurs when requesting a cache maintenance operation.
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 49a19a18f1..d17fb261b6 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -138,9 +138,6 @@ class JitCodeCache {
uint8_t* stack_map,
uint8_t* method_info,
uint8_t* roots_data,
- size_t frame_size_in_bytes,
- size_t core_spill_mask,
- size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
size_t data_size,
@@ -300,9 +297,6 @@ class JitCodeCache {
uint8_t* stack_map,
uint8_t* method_info,
uint8_t* roots_data,
- size_t frame_size_in_bytes,
- size_t core_spill_mask,
- size_t fp_spill_mask,
const uint8_t* code,
size_t code_size,
size_t data_size,
diff --git a/runtime/jni/check_jni.cc b/runtime/jni/check_jni.cc
index 7919c32737..66bd74b504 100644
--- a/runtime/jni/check_jni.cc
+++ b/runtime/jni/check_jni.cc
@@ -1286,7 +1286,7 @@ class ScopedCheck {
}
ArtMethod* m = jni::DecodeArtMethod(mid);
// TODO: Better check here.
- if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass())) {
+ if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m->GetDeclaringClass().Ptr())) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(ERROR));
AbortF("invalid jmethodID: %p", mid);
return nullptr;
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index 44679a5afa..fdf0feec14 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -1078,7 +1078,7 @@ static void* FindCodeForNativeMethodInAgents(ArtMethod* m) REQUIRES_SHARED(Locks
void* JavaVMExt::FindCodeForNativeMethod(ArtMethod* m) {
CHECK(m->IsNative());
- mirror::Class* c = m->GetDeclaringClass();
+ ObjPtr<mirror::Class> c = m->GetDeclaringClass();
// If this is a static method, it could be called before the class has been initialized.
CHECK(c->IsInitializing()) << c->GetStatus() << " " << m->PrettyMethod();
std::string detail;
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index a02e76ae54..5200607e9b 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -501,7 +501,7 @@ class JNI {
CHECK_NON_NULL_ARGUMENT(mid);
ScopedObjectAccess soa(env);
ArtMethod* m = jni::DecodeArtMethod(mid);
- mirror::Executable* method;
+ ObjPtr<mirror::Executable> method;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
if (m->IsConstructor()) {
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index fffd7f3062..bc72517a06 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -331,11 +331,11 @@ inline void Class::SetEmbeddedVTableLength(int32_t len) {
}
inline ImTable* Class::GetImt(PointerSize pointer_size) {
- return GetFieldPtrWithSize<ImTable*>(MemberOffset(ImtPtrOffset(pointer_size)), pointer_size);
+ return GetFieldPtrWithSize<ImTable*>(ImtPtrOffset(pointer_size), pointer_size);
}
inline void Class::SetImt(ImTable* imt, PointerSize pointer_size) {
- return SetFieldPtrWithSize<false>(MemberOffset(ImtPtrOffset(pointer_size)), imt, pointer_size);
+ return SetFieldPtrWithSize<false>(ImtPtrOffset(pointer_size), imt, pointer_size);
}
inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size) {
@@ -1070,20 +1070,26 @@ template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption,
inline void Class::FixupNativePointers(Class* dest,
PointerSize pointer_size,
const Visitor& visitor) {
+ auto dest_address_fn = [dest](MemberOffset offset) {
+ return reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) + offset.Uint32Value());
+ };
// Update the field arrays.
LengthPrefixedArray<ArtField>* const sfields = GetSFieldsPtr();
- LengthPrefixedArray<ArtField>* const new_sfields = visitor(sfields);
+ void** sfields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
+ LengthPrefixedArray<ArtField>* const new_sfields = visitor(sfields, sfields_dest_address);
if (sfields != new_sfields) {
dest->SetSFieldsPtrUnchecked(new_sfields);
}
LengthPrefixedArray<ArtField>* const ifields = GetIFieldsPtr();
- LengthPrefixedArray<ArtField>* const new_ifields = visitor(ifields);
+ void** ifields_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
+ LengthPrefixedArray<ArtField>* const new_ifields = visitor(ifields, ifields_dest_address);
if (ifields != new_ifields) {
dest->SetIFieldsPtrUnchecked(new_ifields);
}
// Update method array.
LengthPrefixedArray<ArtMethod>* methods = GetMethodsPtr();
- LengthPrefixedArray<ArtMethod>* new_methods = visitor(methods);
+ void** methods_dest_address = dest_address_fn(OFFSET_OF_OBJECT_MEMBER(Class, methods_));
+ LengthPrefixedArray<ArtMethod>* new_methods = visitor(methods, methods_dest_address);
if (methods != new_methods) {
dest->SetMethodsPtrInternal(new_methods);
}
@@ -1091,16 +1097,18 @@ inline void Class::FixupNativePointers(Class* dest,
if (!IsTemp() && ShouldHaveEmbeddedVTable<kVerifyNone, kReadBarrierOption>()) {
for (int32_t i = 0, count = GetEmbeddedVTableLength(); i < count; ++i) {
ArtMethod* method = GetEmbeddedVTableEntry(i, pointer_size);
- void** dest_addr = reinterpret_cast<void**>(reinterpret_cast<uintptr_t>(dest) +
- EmbeddedVTableEntryOffset(i, pointer_size).Uint32Value());
- ArtMethod* new_method = visitor(method, dest_addr);
+ void** method_dest_addr = dest_address_fn(EmbeddedVTableEntryOffset(i, pointer_size));
+ ArtMethod* new_method = visitor(method, method_dest_addr);
if (method != new_method) {
dest->SetEmbeddedVTableEntryUnchecked(i, new_method, pointer_size);
}
}
}
if (!IsTemp() && ShouldHaveImt<kVerifyNone, kReadBarrierOption>()) {
- dest->SetImt(visitor(GetImt(pointer_size)), pointer_size);
+ ImTable* imt = GetImt(pointer_size);
+ void** imt_dest_addr = dest_address_fn(ImtPtrOffset(pointer_size));
+ ImTable* new_imt = visitor(imt, imt_dest_addr);
+ dest->SetImt(new_imt, pointer_size);
}
}
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index faec6e6bf8..bbe15ac1bb 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -257,7 +257,7 @@ NativeDexCachePair<T> DexCache::GetNativePairPtrSize(std::atomic<NativeDexCacheP
} else {
auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
ConversionPair32 value = array[idx].load(std::memory_order_relaxed);
- return NativeDexCachePair<T>(reinterpret_cast<T*>(value.first), value.second);
+ return NativeDexCachePair<T>(reinterpret_cast32<T*>(value.first), value.second);
}
}
@@ -272,9 +272,8 @@ void DexCache::SetNativePairPtrSize(std::atomic<NativeDexCachePair<T>>* pair_arr
AtomicStoreRelease16B(&array[idx], v);
} else {
auto* array = reinterpret_cast<std::atomic<ConversionPair32>*>(pair_array);
- ConversionPair32 v(
- dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(pair.object)),
- dchecked_integral_cast<uint32_t>(pair.index));
+ ConversionPair32 v(reinterpret_cast32<uint32_t>(pair.object),
+ dchecked_integral_cast<uint32_t>(pair.index));
array[idx].store(v, std::memory_order_release);
}
}
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 941248edf7..ab5fb85dc0 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -27,11 +27,14 @@
namespace art {
+namespace linker {
+class ImageWriter;
+} // namespace linker
+
class ArtField;
class ArtMethod;
struct DexCacheOffsets;
class DexFile;
-class ImageWriter;
union JValue;
class LinearAlloc;
class Thread;
@@ -539,6 +542,7 @@ class MANAGED DexCache FINAL : public Object {
uint32_t num_strings_; // Number of elements in the strings_ array.
friend struct art::DexCacheOffsets; // for verifying offset information
+ friend class linker::ImageWriter;
friend class Object; // For VisitReferences
DISALLOW_IMPLICIT_CONSTRUCTORS(DexCache);
};
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index 23dd787c80..bf66d7952a 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -42,6 +42,10 @@ class MANAGED Executable : public AccessibleObject {
void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static MemberOffset ArtMethodOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(Executable, art_method_));
+ }
+
private:
uint16_t has_real_parameter_data_;
HeapReference<mirror::Class> declaring_class_;
@@ -51,9 +55,6 @@ class MANAGED Executable : public AccessibleObject {
uint32_t access_flags_;
uint32_t dex_method_index_;
- static MemberOffset ArtMethodOffset() {
- return MemberOffset(OFFSETOF_MEMBER(Executable, art_method_));
- }
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_));
}
diff --git a/runtime/mirror/method.cc b/runtime/mirror/method.cc
index cf03b95d5e..910a1fc821 100644
--- a/runtime/mirror/method.cc
+++ b/runtime/mirror/method.cc
@@ -20,32 +20,33 @@
#include "class_root.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
namespace art {
namespace mirror {
template <PointerSize kPointerSize, bool kTransactionActive>
-Method* Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
+ObjPtr<Method> Method::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(!method->IsConstructor()) << method->PrettyMethod();
ObjPtr<Method> ret = ObjPtr<Method>::DownCast(GetClassRoot<Method>()->AllocObject(self));
if (LIKELY(ret != nullptr)) {
ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret.Ptr();
+ return ret;
}
-template Method* Method::CreateFromArtMethod<PointerSize::k32, false>(Thread* self,
- ArtMethod* method);
-template Method* Method::CreateFromArtMethod<PointerSize::k32, true>(Thread* self,
- ArtMethod* method);
-template Method* Method::CreateFromArtMethod<PointerSize::k64, false>(Thread* self,
- ArtMethod* method);
-template Method* Method::CreateFromArtMethod<PointerSize::k64, true>(Thread* self,
- ArtMethod* method);
+template ObjPtr<Method> Method::CreateFromArtMethod<PointerSize::k32, false>(
+ Thread* self, ArtMethod* method);
+template ObjPtr<Method> Method::CreateFromArtMethod<PointerSize::k32, true>(
+ Thread* self, ArtMethod* method);
+template ObjPtr<Method> Method::CreateFromArtMethod<PointerSize::k64, false>(
+ Thread* self, ArtMethod* method);
+template ObjPtr<Method> Method::CreateFromArtMethod<PointerSize::k64, true>(
+ Thread* self, ArtMethod* method);
template <PointerSize kPointerSize, bool kTransactionActive>
-Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
+ObjPtr<Constructor> Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
DCHECK(method->IsConstructor()) << method->PrettyMethod();
ObjPtr<Constructor> ret =
ObjPtr<Constructor>::DownCast(GetClassRoot<Constructor>()->AllocObject(self));
@@ -53,16 +54,16 @@ Constructor* Constructor::CreateFromArtMethod(Thread* self, ArtMethod* method) {
ObjPtr<Executable>(ret)->
CreateFromArtMethod<kPointerSize, kTransactionActive>(method);
}
- return ret.Ptr();
+ return ret;
}
-template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, false>(
+template ObjPtr<Constructor> Constructor::CreateFromArtMethod<PointerSize::k32, false>(
Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<PointerSize::k32, true>(
+template ObjPtr<Constructor> Constructor::CreateFromArtMethod<PointerSize::k32, true>(
Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, false>(
+template ObjPtr<Constructor> Constructor::CreateFromArtMethod<PointerSize::k64, false>(
Thread* self, ArtMethod* method);
-template Constructor* Constructor::CreateFromArtMethod<PointerSize::k64, true>(
+template ObjPtr<Constructor> Constructor::CreateFromArtMethod<PointerSize::k64, true>(
Thread* self, ArtMethod* method);
} // namespace mirror
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index aea15a7748..a73cd45ca4 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -20,6 +20,9 @@
#include "executable.h"
namespace art {
+
+template<class MirrorType> class ObjPtr;
+
namespace mirror {
class Class;
@@ -28,7 +31,7 @@ class Class;
class MANAGED Method : public Executable {
public:
template <PointerSize kPointerSize, bool kTransactionActive>
- static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
+ static ObjPtr<Method> CreateFromArtMethod(Thread* self, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
private:
@@ -39,7 +42,7 @@ class MANAGED Method : public Executable {
class MANAGED Constructor: public Executable {
public:
template <PointerSize kPointerSize, bool kTransactionActive>
- static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
+ static ObjPtr<Constructor> CreateFromArtMethod(Thread* self, ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
private:
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index c7cffed69b..2801928240 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -542,7 +542,7 @@ class MANAGED LOCKABLE Object {
void SetFieldPtr64(MemberOffset field_offset, T new_value)
REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, new_value, 8u);
+ field_offset, new_value, PointerSize::k64);
}
template<bool kTransactionActive,
@@ -554,10 +554,8 @@ class MANAGED LOCKABLE Object {
PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
- uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
- DCHECK_EQ(static_cast<uint32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, static_cast<int32_t>(static_cast<uint32_t>(ptr)));
+ field_offset, reinterpret_cast32<int32_t>(new_value));
} else {
SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, reinterpret_cast64<int64_t>(new_value));
@@ -658,8 +656,8 @@ class MANAGED LOCKABLE Object {
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
- uint64_t address = static_cast<uint32_t>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
- return reinterpret_cast<T>(static_cast<uintptr_t>(address));
+ int32_t v = GetField32<kVerifyFlags, kIsVolatile>(field_offset);
+ return reinterpret_cast32<T>(v);
} else {
int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
return reinterpret_cast64<T>(v);
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index 48c9d74e30..0cfa51c042 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -197,11 +197,6 @@ class MANAGED FieldVarHandle : public VarHandle {
ArtField* GetField() REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
-
private:
static MemberOffset ArtFieldOffset() {
return MemberOffset(OFFSETOF_MEMBER(FieldVarHandle, art_field_));
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 82e54e2f4c..5a5fb16d0c 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -544,8 +544,8 @@ static jobjectArray Class_getDeclaredConstructorsInternal(
if (MethodMatchesConstructor(&m, public_only, enforce_hidden_api)) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- auto* constructor = mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(
- soa.Self(), &m);
+ ObjPtr<mirror::Constructor> constructor =
+ mirror::Constructor::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m);
if (UNLIKELY(constructor == nullptr)) {
soa.Self()->AssertPendingOOMException();
return nullptr;
@@ -605,7 +605,7 @@ static jobjectArray Class_getDeclaredMethodsUnchecked(JNIEnv* env, jobject javaT
IsDiscoverable(public_only, enforce_hidden_api, &m)) {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
DCHECK(!Runtime::Current()->IsActiveTransaction());
- auto* method =
+ ObjPtr<mirror::Method> method =
mirror::Method::CreateFromArtMethod<kRuntimePointerSize, false>(soa.Self(), &m);
if (method == nullptr) {
soa.Self()->AssertPendingException();
@@ -838,7 +838,7 @@ static jobject Class_newInstance(JNIEnv* env, jobject javaThis) {
return nullptr;
}
// Verify that we can access the constructor.
- auto* declaring_class = constructor->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = constructor->GetDeclaringClass();
if (!constructor->IsPublic()) {
if (caller == nullptr) {
caller.Assign(GetCallingClass(soa.Self(), 1));
diff --git a/runtime/oat.h b/runtime/oat.h
index f8ec665683..3939eec1ac 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Remove InvokeInfo from stack maps.
- static constexpr uint8_t kOatVersion[] = { '1', '5', '4', '\0' };
+ // Last oat version changed reason: Remove frame info from OatQuickMethodHeader.
+ static constexpr uint8_t kOatVersion[] = { '1', '5', '6', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 0b239c1919..3ed2a91be6 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -24,17 +24,6 @@
namespace art {
-OatQuickMethodHeader::OatQuickMethodHeader(uint32_t vmap_table_offset,
- uint32_t method_info_offset,
- uint32_t frame_size_in_bytes,
- uint32_t core_spill_mask,
- uint32_t fp_spill_mask,
- uint32_t code_size)
- : vmap_table_offset_(vmap_table_offset),
- method_info_offset_(method_info_offset),
- frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask),
- code_size_(code_size) {}
-
uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
const uintptr_t pc,
bool abort_on_failure) const {
@@ -44,7 +33,7 @@ uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
return dex::kDexNoIndex;
} else {
DCHECK(IsOptimized());
- CodeInfo code_info(this);
+ CodeInfo code_info(this, CodeInfo::DecodeFlags::InlineInfoOnly);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
if (stack_map.IsValid()) {
return stack_map.GetDexPc();
@@ -69,7 +58,7 @@ uintptr_t OatQuickMethodHeader::ToNativeQuickPc(ArtMethod* method,
DCHECK(!method->IsNative());
DCHECK(IsOptimized());
// Search for the dex-to-pc mapping in stack maps.
- CodeInfo code_info(this);
+ CodeInfo code_info(this, CodeInfo::DecodeFlags::InlineInfoOnly);
// All stack maps are stored in the same CodeItem section, safepoint stack
// maps first, then catch stack maps. We use `is_for_catch_handler` to select
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 1e4ca3e450..3b9f466220 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -32,12 +32,13 @@ class ArtMethod;
class PACKED(4) OatQuickMethodHeader {
public:
OatQuickMethodHeader() = default;
- explicit OatQuickMethodHeader(uint32_t vmap_table_offset,
- uint32_t method_info_offset,
- uint32_t frame_size_in_bytes,
- uint32_t core_spill_mask,
- uint32_t fp_spill_mask,
- uint32_t code_size);
+ OatQuickMethodHeader(uint32_t vmap_table_offset,
+ uint32_t method_info_offset,
+ uint32_t code_size)
+ : vmap_table_offset_(vmap_table_offset),
+ method_info_offset_(method_info_offset),
+ code_size_(code_size) {
+ }
static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
@@ -151,7 +152,7 @@ class PACKED(4) OatQuickMethodHeader {
template <bool kCheckFrameSize = true>
uint32_t GetFrameSizeInBytes() const {
- uint32_t result = frame_info_.FrameSizeInBytes();
+ uint32_t result = GetFrameInfo().FrameSizeInBytes();
if (kCheckFrameSize) {
DCHECK_ALIGNED(result, kStackAlignment);
}
@@ -160,11 +161,7 @@ class PACKED(4) OatQuickMethodHeader {
QuickMethodFrameInfo GetFrameInfo() const {
DCHECK(IsOptimized());
- QuickMethodFrameInfo frame_info = CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr());
- DCHECK_EQ(frame_info.FrameSizeInBytes(), frame_info_.FrameSizeInBytes());
- DCHECK_EQ(frame_info.CoreSpillMask(), frame_info_.CoreSpillMask());
- DCHECK_EQ(frame_info.FpSpillMask(), frame_info_.FpSpillMask());
- return frame_info;
+ return CodeInfo::DecodeFrameInfo(GetOptimizedCodeInfoPtr());
}
uintptr_t ToNativeQuickPc(ArtMethod* method,
@@ -194,8 +191,6 @@ class PACKED(4) OatQuickMethodHeader {
// would be lost from doing so. The method info memory region contains method indices since they
// are hard to dedupe.
uint32_t method_info_offset_ = 0u;
- // The stack frame information.
- QuickMethodFrameInfo frame_info_;
// The code size in bytes. The highest bit is used to signify if the compiled
// code with the method header has should_deoptimize flag.
uint32_t code_size_ = 0u;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index f58fc3b564..85b1ea0524 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -597,7 +597,7 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
void StackVisitor::SanityCheckFrame() const {
if (kIsDebugBuild) {
ArtMethod* method = GetMethod();
- mirror::Class* declaring_class = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
// Runtime methods have null declaring class.
if (!method->IsRuntimeMethod()) {
CHECK(declaring_class != nullptr);
@@ -613,7 +613,7 @@ void StackVisitor::SanityCheckFrame() const {
// We get the canonical method as copied methods may have their declaring
// class from another class loader.
ArtMethod* canonical = method->GetCanonicalMethod();
- mirror::Class* klass = canonical->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
LinearAlloc* const class_linear_alloc = (klass != nullptr)
? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
: linear_alloc;
@@ -795,7 +795,7 @@ void StackVisitor::WalkStack(bool include_transitions) {
// JNI methods cannot have any inlined frames.
&& !method->IsNative()) {
DCHECK_NE(cur_quick_frame_pc_, 0u);
- CodeInfo code_info(cur_oat_quick_method_header_);
+ CodeInfo code_info(cur_oat_quick_method_header_, CodeInfo::DecodeFlags::InlineInfoOnly);
uint32_t native_pc_offset =
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 9fa9d84993..62b9f35341 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -27,11 +27,11 @@
namespace art {
-CodeInfo::CodeInfo(const OatQuickMethodHeader* header)
- : CodeInfo(header->GetOptimizedCodeInfoPtr()) {
+CodeInfo::CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags)
+ : CodeInfo(header->GetOptimizedCodeInfoPtr(), flags) {
}
-void CodeInfo::Decode(const uint8_t* data) {
+void CodeInfo::Decode(const uint8_t* data, DecodeFlags flags) {
const uint8_t* begin = data;
frame_size_in_bytes_ = DecodeUnsignedLeb128(&data);
core_spill_mask_ = DecodeUnsignedLeb128(&data);
@@ -39,9 +39,12 @@ void CodeInfo::Decode(const uint8_t* data) {
number_of_dex_registers_ = DecodeUnsignedLeb128(&data);
BitMemoryReader reader(data, /* bit_offset */ 0);
stack_maps_.Decode(reader);
+ inline_infos_.Decode(reader);
+ if (flags & DecodeFlags::InlineInfoOnly) {
+ return;
+ }
register_masks_.Decode(reader);
stack_masks_.Decode(reader);
- inline_infos_.Decode(reader);
dex_register_masks_.Decode(reader);
dex_register_maps_.Decode(reader);
dex_register_catalog_.Decode(reader);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 26b95b0c2b..928f0f24d6 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -268,15 +268,22 @@ class RegisterMask : public BitTableAccessor<2> {
*/
class CodeInfo {
public:
- explicit CodeInfo(const void* data) {
- Decode(reinterpret_cast<const uint8_t*>(data));
+ enum DecodeFlags {
+ Default = 0,
+ // Limits the decoding only to the main stack map table and inline info table.
+ // This is sufficient for many use cases and makes the header decoding faster.
+ InlineInfoOnly = 1,
+ };
+
+ explicit CodeInfo(const uint8_t* data, DecodeFlags flags = DecodeFlags::Default) {
+ Decode(reinterpret_cast<const uint8_t*>(data), flags);
}
explicit CodeInfo(MemoryRegion region) : CodeInfo(region.begin()) {
DCHECK_EQ(Size(), region.size());
}
- explicit CodeInfo(const OatQuickMethodHeader* header);
+ explicit CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags = DecodeFlags::Default);
size_t Size() const {
return BitsToBytesRoundUp(size_in_bits_);
@@ -421,20 +428,20 @@ class CodeInfo {
uint32_t first_dex_register,
/*out*/ DexRegisterMap* map) const;
- void Decode(const uint8_t* data);
+ void Decode(const uint8_t* data, DecodeFlags flags);
uint32_t frame_size_in_bytes_;
uint32_t core_spill_mask_;
uint32_t fp_spill_mask_;
uint32_t number_of_dex_registers_;
BitTable<StackMap> stack_maps_;
+ BitTable<InlineInfo> inline_infos_;
BitTable<RegisterMask> register_masks_;
BitTable<MaskInfo> stack_masks_;
- BitTable<InlineInfo> inline_infos_;
BitTable<MaskInfo> dex_register_masks_;
BitTable<DexRegisterMapInfo> dex_register_maps_;
BitTable<DexRegisterInfo> dex_register_catalog_;
- uint32_t size_in_bits_;
+ uint32_t size_in_bits_ = 0;
};
#undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index 289a1a4fb3..4e75a4feec 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -22,6 +22,8 @@
namespace art {
// The various reasons that we might be suspending a thread.
+// TODO Once kForDebugger is removed by removing the old debugger we should make the kForUserCode
+// just a basic count for bookkeeping instead of linking it as directly with internal suspends.
enum class SuspendReason {
// Suspending for internal reasons (e.g. GC, stack trace, etc.).
// TODO Split this into more descriptive sections.
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 99a8829e62..18dc0e8c45 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -42,6 +42,7 @@
#include "base/file_utils.h"
#include "base/memory_tool.h"
#include "base/mutex.h"
+#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/timing_logger.h"
#include "base/to_str.h"
@@ -393,6 +394,22 @@ ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
return shadow_frame;
}
+TLSData* Thread::GetCustomTLS(const char* key) {
+ MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
+ auto it = custom_tls_.find(key);
+ return (it != custom_tls_.end()) ? it->second.get() : nullptr;
+}
+
+void Thread::SetCustomTLS(const char* key, TLSData* data) {
+ // We will swap the old data (which might be nullptr) with this and then delete it outside of the
+ // custom_tls_lock_.
+ std::unique_ptr<TLSData> old_data(data);
+ {
+ MutexLock mu(Thread::Current(), *Locks::custom_tls_lock_);
+ custom_tls_.GetOrCreate(key, []() { return std::unique_ptr<TLSData>(); }).swap(old_data);
+ }
+}
+
void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) {
FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame;
if (head->GetFrameId() == frame_id) {
@@ -1211,6 +1228,34 @@ static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREA
LOG(FATAL) << ss.str();
}
+void Thread::SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code) {
+ CHECK_EQ(this, Thread::Current()) << "This function may only be called on the current thread. "
+ << *Thread::Current() << " tried to modify the suspendability "
+ << "of " << *this;
+ // NB This checks the new value! This ensures that we can only set can_be_suspended_by_user_code
+ // to false if !CanCallIntoJava().
+ DCHECK(!CanCallIntoJava() || can_be_suspended_by_user_code)
+ << "Threads able to call into java may not be marked as unsuspendable!";
+ if (can_be_suspended_by_user_code == CanBeSuspendedByUserCode()) {
+ // Don't need to do anything if nothing is changing.
+ return;
+ }
+ art::MutexLock mu(this, *Locks::user_code_suspension_lock_);
+ art::MutexLock thread_list_mu(this, *Locks::thread_suspend_count_lock_);
+
+ // We want to add the user-code suspend count if we are newly allowing user-code suspends and
+ // remove them if we are disabling them.
+ int adj = can_be_suspended_by_user_code ? GetUserCodeSuspendCount() : -GetUserCodeSuspendCount();
+ // Adjust the global suspend count appropriately. Use kInternal to not change the ForUserCode
+ // count.
+ if (adj != 0) {
+ bool suspend = ModifySuspendCountInternal(this, adj, nullptr, SuspendReason::kInternal);
+ CHECK(suspend) << this << " was unable to modify it's own suspend count!";
+ }
+ // Mark thread as accepting user-code suspensions.
+ can_be_suspended_by_user_code_ = can_be_suspended_by_user_code;
+}
+
bool Thread::ModifySuspendCountInternal(Thread* self,
int delta,
AtomicInteger* suspend_barrier,
@@ -1232,6 +1277,17 @@ bool Thread::ModifySuspendCountInternal(Thread* self,
LOG(ERROR) << "attempting to modify suspend count in an illegal way.";
return false;
}
+ DCHECK(this == self || this->IsSuspended())
+ << "Only self kForUserCode suspension on an unsuspended thread is allowed: " << this;
+ if (UNLIKELY(!CanBeSuspendedByUserCode())) {
+ VLOG(threads) << this << " is being requested to suspend for user code but that is disabled "
+ << "the thread will not actually go to sleep.";
+ // Having the user_code_suspend_count still be around is useful but we don't need to actually
+ // do anything since we aren't going to 'really' suspend. Just adjust the
+ // user_code_suspend_count and return.
+ tls32_.user_code_suspend_count += delta;
+ return true;
+ }
}
if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
UnsafeLogFatalForSuspendCount(self, this);
@@ -2092,8 +2148,8 @@ void Thread::NotifyThreadGroup(ScopedObjectAccessAlreadyRunnable& soa, jobject t
Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
- custom_tls_(nullptr),
- can_call_into_java_(true) {
+ can_call_into_java_(true),
+ can_be_suspended_by_user_code_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
diff --git a/runtime/thread.h b/runtime/thread.h
index c8a4b61792..d169a62198 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -33,6 +33,7 @@
#include "base/globals.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/safe_map.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "handle_scope.h"
@@ -97,6 +98,14 @@ class Thread;
class ThreadList;
enum VisitRootFlags : uint8_t;
+// A piece of data that can be held in the CustomTls. The destructor will be called during thread
+// shutdown. The thread the destructor is called on is not necessarily the same thread it was stored
+// on.
+class TLSData {
+ public:
+ virtual ~TLSData() {}
+};
+
// Thread priorities. These must match the Thread.MIN_PRIORITY,
// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
enum ThreadPriority {
@@ -980,6 +989,17 @@ class Thread {
--tls32_.disable_thread_flip_count;
}
+ // Returns true if the thread is subject to user_code_suspensions.
+ bool CanBeSuspendedByUserCode() const {
+ return can_be_suspended_by_user_code_;
+ }
+
+ // Sets CanBeSuspenededByUserCode and adjusts the suspend-count as needed. This may only be called
+ // when running on the current thread. It is **absolutely required** that this be called only on
+ // the Thread::Current() thread.
+ void SetCanBeSuspendedByUserCode(bool can_be_suspended_by_user_code)
+ REQUIRES(!Locks::thread_suspend_count_lock_, !Locks::user_code_suspension_lock_);
+
// Returns true if the thread is allowed to call into java.
bool CanCallIntoJava() const {
return can_call_into_java_;
@@ -1248,13 +1268,14 @@ class Thread {
return debug_disallow_read_barrier_;
}
- void* GetCustomTLS() const REQUIRES(Locks::thread_list_lock_) {
- return custom_tls_;
- }
+ // Gets the current TLSData associated with the key or nullptr if there isn't any. Note that users
+ // do not gain ownership of TLSData and must synchronize with SetCustomTls themselves to prevent
+ // it from being deleted.
+ TLSData* GetCustomTLS(const char* key) REQUIRES(!Locks::custom_tls_lock_);
- void SetCustomTLS(void* data) REQUIRES(Locks::thread_list_lock_) {
- custom_tls_ = data;
- }
+ // Sets the tls entry at 'key' to data. The thread takes ownership of the TLSData. The destructor
+ // will be run when the thread exits or when SetCustomTLS is called again with the same key.
+ void SetCustomTLS(const char* key, TLSData* data) REQUIRES(!Locks::custom_tls_lock_);
// Returns true if the current thread is the jit sensitive thread.
bool IsJitSensitiveThread() const {
@@ -1542,8 +1563,9 @@ class Thread {
// critical section enter.
uint32_t disable_thread_flip_count;
- // How much of 'suspend_count_' is by request of user code, used to distinguish threads
- // suspended by the runtime from those suspended by user code.
+ // If CanBeSuspendedByUserCode, how much of 'suspend_count_' is by request of user code, used to
+ // distinguish threads suspended by the runtime from those suspended by user code. Otherwise
+ // this is just a count of how many user-code suspends have been attempted (but were ignored).
// This should have GUARDED_BY(Locks::user_code_suspension_lock_) but auto analysis cannot be
// told that AssertHeld should be good enough.
int user_code_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
@@ -1754,14 +1776,18 @@ class Thread {
// Pending extra checkpoints if checkpoint_function_ is already used.
std::list<Closure*> checkpoint_overflow_ GUARDED_BY(Locks::thread_suspend_count_lock_);
- // Custom TLS field that can be used by plugins.
- // TODO: Generalize once we have more plugins.
- void* custom_tls_;
+ // Custom TLS field that can be used by plugins or the runtime. Should not be accessed directly by
+ // compiled code or entrypoints.
+ SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
// True if the thread is allowed to call back into java (for e.g. during class resolution).
// By default this is true.
bool can_call_into_java_;
+ // True if the thread is subject to user-code suspension. By default this is true. This can only
+ // be false for threads where '!can_call_into_java_'.
+ bool can_be_suspended_by_user_code_;
+
friend class Dbg; // For SetStateUnsafe.
friend class gc::collector::SemiSpace; // For getting stack traces.
friend class Runtime; // For CreatePeer.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index b2be549996..ba333f6dd9 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -902,6 +902,8 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer,
bool request_suspension,
SuspendReason reason,
bool* timed_out) {
+ CHECK_NE(reason, SuspendReason::kForUserCode) << "Cannot suspend for user-code by peer. Must be "
+ << "done directly on the thread.";
const uint64_t start_time = NanoTime();
useconds_t sleep_us = kThreadSuspendInitialSleepUs;
*timed_out = false;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index bec1150807..26ca19054d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -100,8 +100,13 @@ void* ThreadPoolWorker::Callback(void* arg) {
worker->thread_ = Thread::Current();
// Thread pool workers cannot call into java.
worker->thread_->SetCanCallIntoJava(false);
+ // Thread pool workers should not be getting paused by user-code.
+ worker->thread_->SetCanBeSuspendedByUserCode(false);
// Do work until its time to shut down.
worker->Run();
+ // Thread pool worker is finished. We want to allow suspension during shutdown.
+ worker->thread_->SetCanBeSuspendedByUserCode(true);
+ // Thread shuts down.
runtime->DetachCurrentThread();
return nullptr;
}
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index b7f28f0d03..866a57e7d2 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -281,8 +281,8 @@ class VdexFile {
// In-place unquicken the given `dex_files` based on `quickening_info`.
// `decompile_return_instruction` controls if RETURN_VOID_BARRIER instructions are
- // decompiled to RETURN_VOID instructions using the slower ClassDataItemIterator
- // instead of the faster QuickeningInfoIterator.
+ // decompiled to RETURN_VOID instructions using the slower ClassAccessor instead of the faster
+ // QuickeningInfoIterator.
// Always unquickens using the vdex dex files as the source for quicken tables.
void Unquicken(const std::vector<const DexFile*>& target_dex_files,
bool decompile_return_instruction) const;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 47877bd195..01b6bf8f15 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -157,7 +157,7 @@ FailureKind MethodVerifier::VerifyClass(Thread* self,
std::string failure_message;
const DexFile& dex_file = klass->GetDexFile();
const DexFile::ClassDef* class_def = klass->GetClassDef();
- mirror::Class* super = klass->GetSuperClass();
+ ObjPtr<mirror::Class> super = klass->GetSuperClass();
std::string temp;
if (super == nullptr && strcmp("Ljava/lang/Object;", klass->GetDescriptor(&temp)) != 0) {
early_failure = true;
@@ -2254,6 +2254,45 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
}
break;
+ // Catch a case of register aliasing when two registers are linked to the same
+ // java.lang.Class object via two consequent const-class instructions immediately
+ // preceding monitor-enter called on one of those registers.
+ case Instruction::CONST_CLASS: {
+ // Get the second previous instruction.
+ if (prev_idx == 0 || GetInstructionFlags(prev_idx).IsBranchTarget()) {
+ break;
+ }
+ prev_idx--;
+ while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
+ prev_idx--;
+ }
+ const Instruction& prev2_inst = code_item_accessor_.InstructionAt(prev_idx);
+
+ // Match the pattern "const-class; const-class; monitor-enter;"
+ if (prev2_inst.Opcode() != Instruction::CONST_CLASS) {
+ break;
+ }
+
+ // Ensure both const-classes are called for the same type_idx.
+ if (prev_inst.VRegB_21c() != prev2_inst.VRegB_21c()) {
+ break;
+ }
+
+ // Update the lock status for the aliased register.
+ if (prev_inst.VRegA() == inst->VRegA_11x()) {
+ work_line_->CopyRegister1(this,
+ prev2_inst.VRegA(),
+ inst->VRegA_11x(),
+ kTypeCategoryRef);
+ } else if (prev2_inst.VRegA() == inst->VRegA_11x()) {
+ work_line_->CopyRegister1(this,
+ prev_inst.VRegA(),
+ inst->VRegA_11x(),
+ kTypeCategoryRef);
+ }
+ break;
+ }
+
default: // Other instruction types ignored.
break;
}
@@ -2955,7 +2994,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
ArtMethod* abs_method = VerifyInvocationArgs(inst, METHOD_INTERFACE, is_range);
if (abs_method != nullptr) {
- mirror::Class* called_interface = abs_method->GetDeclaringClass();
+ ObjPtr<mirror::Class> called_interface = abs_method->GetDeclaringClass();
if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "expected interface class in invoke-interface '"
<< abs_method->PrettyMethod() << "'";
@@ -3286,7 +3325,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
// Special instructions.
case Instruction::RETURN_VOID_NO_BARRIER:
if (IsConstructor() && !IsStatic()) {
- auto& declaring_class = GetDeclaringClass();
+ const RegType& declaring_class = GetDeclaringClass();
if (declaring_class.IsUnresolvedReference()) {
// We must iterate over the fields, even if we cannot use mirror classes to do so. Do it
// manually over the underlying dex file.
@@ -3892,7 +3931,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(
// class. It would be wrong to use this for the type check (interface type checks are
// postponed to runtime).
if (res_method != nullptr && !res_method->IsMiranda()) {
- mirror::Class* klass = res_method->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = res_method->GetDeclaringClass();
std::string temp;
res_method_class = &FromClass(klass->GetDescriptor(&temp), klass,
klass->CannotBeAssignedFromOtherTypes());
@@ -4154,7 +4193,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs(
}
bool MethodVerifier::CheckSignaturePolymorphicMethod(ArtMethod* method) {
- mirror::Class* klass = method->GetDeclaringClass();
+ ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
const char* method_name = method->GetName();
const char* expected_return_descriptor;
diff --git a/test/163-app-image-methods/src/Main.java b/test/163-app-image-methods/src/Main.java
index c513470b7b..33590fc450 100644
--- a/test/163-app-image-methods/src/Main.java
+++ b/test/163-app-image-methods/src/Main.java
@@ -23,7 +23,7 @@ public class Main {
String aaaDerivedName = "AAA.Derived";
System.out.println("Eating all memory.");
// Resolve VMClassLoader before eating all the memory since we can not fail
- // initializtaion of boot classpath classes.
+ // initialization of boot classpath classes.
Class.forName("java.lang.VMClassLoader");
Object memory = eatAllMemory();
diff --git a/test/1951-monitor-enter-no-suspend/expected.txt b/test/1951-monitor-enter-no-suspend/expected.txt
new file mode 100644
index 0000000000..35821117c8
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/expected.txt
@@ -0,0 +1 @@
+Success
diff --git a/test/1951-monitor-enter-no-suspend/info.txt b/test/1951-monitor-enter-no-suspend/info.txt
new file mode 100644
index 0000000000..a608834793
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/info.txt
@@ -0,0 +1 @@
+Tests the jvmti-extension to lock a monitor without regards to suspension.
diff --git a/test/1951-monitor-enter-no-suspend/raw_monitor.cc b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
new file mode 100644
index 0000000000..0425e350fd
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/raw_monitor.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1951MonitorEnterNoSuspend {
+
+typedef jvmtiError (*RawMonitorEnterNoSuspend)(jvmtiEnv* env, jrawMonitorID mon);
+
+template <typename T>
+static void Dealloc(T* t) {
+ jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename ...Rest>
+static void Dealloc(T* t, Rest... rs) {
+ Dealloc(t);
+ Dealloc(rs...);
+}
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+ for (jint i = 0; i < n_params; i++) {
+ Dealloc(params[i].name);
+ }
+}
+
+RawMonitorEnterNoSuspend GetNoSuspendFunction(JNIEnv* env) {
+ // Get the extensions.
+ jint n_ext = 0;
+ jvmtiExtensionFunctionInfo* infos = nullptr;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+ return nullptr;
+ }
+ RawMonitorEnterNoSuspend result = nullptr;
+ for (jint i = 0; i < n_ext; i++) {
+ jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+ if (strcmp("com.android.art.concurrent.raw_monitor_enter_no_suspend", cur_info->id) == 0) {
+ result = reinterpret_cast<RawMonitorEnterNoSuspend>(cur_info->func);
+ }
+ // Cleanup the cur_info
+ DeallocParams(cur_info->params, cur_info->param_count);
+ Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+ }
+ // Cleanup the array.
+ Dealloc(infos);
+ return result;
+}
+
+static std::atomic<bool> started(false);
+static std::atomic<bool> resumed(false);
+static std::atomic<bool> progress(false);
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1951_otherThreadStart(JNIEnv* env, jclass) {
+ jrawMonitorID mon;
+ if (JvmtiErrorToException(env, jvmti_env, jvmti_env->CreateRawMonitor("test 1951", &mon))) {
+ return;
+ }
+ RawMonitorEnterNoSuspend enter_func = GetNoSuspendFunction(env);
+ if (enter_func == nullptr) {
+ return;
+ }
+ started = true;
+ while (!resumed) {}
+ jvmtiError err = enter_func(jvmti_env, mon);
+ CHECK_EQ(err, JVMTI_ERROR_NONE);
+ progress = true;
+ err = jvmti_env->RawMonitorExit(mon);
+ CHECK_EQ(err, JVMTI_ERROR_NONE);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1951_waitForStart(JNIEnv*, jclass) {
+ while (!started) {}
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1951_otherThreadResume(JNIEnv*, jclass) {
+ resumed = true;
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_art_Test1951_otherThreadProgressed(JNIEnv*, jclass) {
+ return progress;
+}
+
+} // namespace Test1951MonitorEnterNoSuspend
+} // namespace art
diff --git a/test/1951-monitor-enter-no-suspend/run b/test/1951-monitor-enter-no-suspend/run
new file mode 100755
index 0000000000..c6e62ae6cd
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1951-monitor-enter-no-suspend/src/Main.java b/test/1951-monitor-enter-no-suspend/src/Main.java
new file mode 100644
index 0000000000..3c5e1a2403
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ art.Test1951.run();
+ }
+}
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Main.java b/test/1951-monitor-enter-no-suspend/src/art/Main.java
new file mode 100644
index 0000000000..aa5498bd62
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/art/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+// Binder class so the agent's C code has something that can be bound and exposed to tests.
+// In a package to separate cleanly and work around CTS reference issues (though this class
+// should be replaced in the CTS version).
+public class Main {
+ // Load the given class with the given classloader, and bind all native methods to corresponding
+ // C methods in the agent. Will abort if any of the steps fail.
+ public static native void bindAgentJNI(String className, ClassLoader classLoader);
+ // Same as above, giving the class directly.
+ public static native void bindAgentJNIForClass(Class<?> klass);
+
+ // Common infrastructure.
+ public static native void setTag(Object o, long tag);
+ public static native long getTag(Object o);
+}
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Suspension.java b/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
new file mode 100644
index 0000000000..16e62ccac9
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class Suspension {
+ // Suspends a thread using jvmti.
+ public native static void suspend(Thread thr);
+
+ // Resumes a thread using jvmti.
+ public native static void resume(Thread thr);
+
+ public native static boolean isSuspended(Thread thr);
+
+ public native static int[] suspendList(Thread... threads);
+ public native static int[] resumeList(Thread... threads);
+}
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Test1951.java b/test/1951-monitor-enter-no-suspend/src/art/Test1951.java
new file mode 100644
index 0000000000..dc7ed9eb5a
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/art/Test1951.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+
+public class Test1951 {
+
+ // Wait up to 1 minute for the other thread to make progress.
+ public static final long WAIT_TIME_MILLIS = 1000 * 60;
+ public static void run() throws Exception {
+ Thread t = new Thread(Test1951::otherThreadStart);
+ t.setDaemon(true);
+ t.start();
+ waitForStart();
+ Suspension.suspend(t);
+ otherThreadResume();
+ long endTime = System.currentTimeMillis() + WAIT_TIME_MILLIS;
+ boolean otherProgressed = false;
+ while (true) {
+ if (otherThreadProgressed()) {
+ otherProgressed = true;
+ break;
+ } else if (System.currentTimeMillis() > endTime) {
+ break;
+ } else {
+ Thread.yield();
+ }
+ }
+ Suspension.resume(t);
+ if (otherProgressed) {
+ t.join(1000);
+ }
+ if (otherProgressed) {
+ System.out.println("Success");
+ } else {
+ System.out.println(
+ "Failure: other thread did not make progress in " + WAIT_TIME_MILLIS + " ms");
+ }
+ return;
+ }
+
+ public static native void otherThreadStart();
+ public static native void waitForStart();
+ public static native void otherThreadResume();
+ public static native boolean otherThreadProgressed();
+}
diff --git a/test/411-optimizing-arith/src/RemTest.java b/test/411-optimizing-arith/src/RemTest.java
index 1b31f63569..287f5d8799 100644
--- a/test/411-optimizing-arith/src/RemTest.java
+++ b/test/411-optimizing-arith/src/RemTest.java
@@ -89,6 +89,34 @@ public class RemTest {
expectDivisionByZero(5L);
expectDivisionByZero(Long.MAX_VALUE);
expectDivisionByZero(Long.MIN_VALUE);
+
+ expectEquals(0, $noinline$RemLoaded1(0));
+ expectEquals(0, $noinline$RemLoaded1(1));
+ expectEquals(0, $noinline$RemLoaded1(-1));
+ expectEquals(0, $noinline$RemLoaded1(12345));
+ expectEquals(0, $noinline$RemLoaded1(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$RemLoaded1(Integer.MIN_VALUE));
+
+ expectEquals(0, $noinline$RemLoadedN1(0));
+ expectEquals(0, $noinline$RemLoadedN1(1));
+ expectEquals(0, $noinline$RemLoadedN1(-1));
+ expectEquals(0, $noinline$RemLoadedN1(12345));
+ expectEquals(0, $noinline$RemLoadedN1(Integer.MAX_VALUE));
+ expectEquals(0, $noinline$RemLoadedN1(Integer.MIN_VALUE));
+
+ expectEquals(0L, $noinline$RemLoaded1(0L));
+ expectEquals(0L, $noinline$RemLoaded1(1L));
+ expectEquals(0L, $noinline$RemLoaded1(-1L));
+ expectEquals(0L, $noinline$RemLoaded1(12345L));
+ expectEquals(0L, $noinline$RemLoaded1(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$RemLoaded1(Long.MIN_VALUE));
+
+ expectEquals(0L, $noinline$RemLoadedN1(0L));
+ expectEquals(0L, $noinline$RemLoadedN1(1L));
+ expectEquals(0L, $noinline$RemLoadedN1(-1L));
+ expectEquals(0L, $noinline$RemLoadedN1(12345L));
+ expectEquals(0L, $noinline$RemLoadedN1(Long.MAX_VALUE));
+ expectEquals(0L, $noinline$RemLoadedN1(Long.MIN_VALUE));
}
static int $opt$Rem(int a, int b) {
@@ -99,6 +127,26 @@ public class RemTest {
return a % 0;
}
+ static int $noinline$RemLoaded1(int a) {
+ int[] v = {25, 1};
+ return a % v[1];
+ }
+
+ static int $noinline$RemLoadedN1(int a) {
+ int [] v = {25, -1};
+ return a % v[1];
+ }
+
+ static long $noinline$RemLoaded1(long a) {
+ long[] v = {25, 1};
+ return a % v[1];
+ }
+
+ static long $noinline$RemLoadedN1(long a) {
+ long [] v = {25, -1};
+ return a % v[1];
+ }
+
// Modulo by literals != 0 should not generate checks.
static int $opt$RemConst(int a) {
return a % 4;
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index fcc3c1a852..3d9294304d 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -1150,6 +1150,41 @@ public class Main {
return arg % 1;
}
+ /// CHECK-START: int Main.RemN1(int) constant_folding (before)
+ /// CHECK-DAG: <<Arg:i\d+>> ParameterValue
+ /// CHECK-DAG: <<ConstN1:i\d+>> IntConstant -1
+ /// CHECK-DAG: <<Rem:i\d+>> Rem [<<Arg>>,<<ConstN1>>]
+ /// CHECK-DAG: Return [<<Rem>>]
+
+ /// CHECK-START: int Main.RemN1(int) constant_folding (after)
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+ /// CHECK-DAG: Return [<<Const0>>]
+
+ /// CHECK-START: int Main.RemN1(int) constant_folding (after)
+ /// CHECK-NOT: Rem
+
+ public static int RemN1(int arg) {
+ return arg % -1;
+ }
+
+ /// CHECK-START: long Main.Rem1(long) constant_folding (before)
+ /// CHECK-DAG: <<Arg:j\d+>> ParameterValue
+ /// CHECK-DAG: <<Const1:j\d+>> LongConstant 1
+ /// CHECK-DAG: <<DivZeroCheck:j\d+>> DivZeroCheck [<<Const1>>]
+ /// CHECK-DAG: <<Rem:j\d+>> Rem [<<Arg>>,<<DivZeroCheck>>]
+ /// CHECK-DAG: Return [<<Rem>>]
+
+ /// CHECK-START: long Main.Rem1(long) constant_folding (after)
+ /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0
+ /// CHECK-DAG: Return [<<Const0>>]
+
+ /// CHECK-START: long Main.Rem1(long) constant_folding (after)
+ /// CHECK-NOT: Rem
+
+ public static long Rem1(long arg) {
+ return arg % 1;
+ }
+
/// CHECK-START: long Main.RemN1(long) constant_folding (before)
/// CHECK-DAG: <<Arg:j\d+>> ParameterValue
/// CHECK-DAG: <<ConstN1:j\d+>> LongConstant -1
@@ -1597,7 +1632,26 @@ public class Main {
assertIntEquals(-1, OrAllOnes(arbitrary));
assertLongEquals(0, Rem0(arbitrary));
assertIntEquals(0, Rem1(arbitrary));
+ assertIntEquals(0, Rem1(0));
+ assertIntEquals(0, Rem1(-1));
+ assertIntEquals(0, Rem1(Integer.MAX_VALUE));
+ assertIntEquals(0, Rem1(Integer.MIN_VALUE));
+ assertIntEquals(0, RemN1(arbitrary));
+ assertIntEquals(0, RemN1(0));
+ assertIntEquals(0, RemN1(-1));
+ assertIntEquals(0, RemN1(Integer.MAX_VALUE));
+ assertIntEquals(0, RemN1(Integer.MIN_VALUE));
+ assertIntEquals(0, RemN1(arbitrary));
+ assertLongEquals(0, Rem1((long)arbitrary));
+ assertLongEquals(0, Rem1(0L));
+ assertLongEquals(0, Rem1(-1L));
+ assertLongEquals(0, Rem1(Long.MAX_VALUE));
+ assertLongEquals(0, Rem1(Long.MIN_VALUE));
assertLongEquals(0, RemN1(arbitrary));
+ assertLongEquals(0, RemN1(0L));
+ assertLongEquals(0, RemN1(-1L));
+ assertLongEquals(0, RemN1(Long.MAX_VALUE));
+ assertLongEquals(0, RemN1(Long.MIN_VALUE));
assertIntEquals(0, Shl0(arbitrary));
assertLongEquals(0, ShlLong0WithInt(arbitrary));
assertLongEquals(0, Shr0(arbitrary));
diff --git a/test/478-checker-clinit-check-pruning/expected.txt b/test/478-checker-clinit-check-pruning/expected.txt
index 6f73b656ed..1f6e9d9337 100644
--- a/test/478-checker-clinit-check-pruning/expected.txt
+++ b/test/478-checker-clinit-check-pruning/expected.txt
@@ -1,9 +1,13 @@
Main$ClassWithClinit1's static initializer
Main$ClassWithClinit2's static initializer
-Main$ClassWithClinit3's static initializer
-Main$ClassWithClinit4's static initializer
+Main$ClassWithClinit3Static's static initializer
+Main$ClassWithClinit3Instance's static initializer
+Main$ClassWithClinit4Static's static initializer
+Main$ClassWithClinit4Instance's static initializer
Main$ClassWithClinit5's static initializer
+Main$SubClassOfClassWithoutClinit5's static initializer
Main$ClassWithClinit6's static initializer
+Main$SubClassOfClassWithoutClinit6's static initializer
Main$ClassWithClinit7's static initializer
Main$ClassWithClinit8's static initializer
Main$ClassWithClinit9's static initializer
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index ca92e7a86a..e16fa69c1c 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -16,8 +16,6 @@
public class Main {
- static boolean doThrow = false;
-
/*
* Ensure an inlined static invoke explicitly triggers the
* initialization check of the called method's declaring class, and
@@ -100,43 +98,73 @@ public class Main {
System.out.println("Main$ClassWithClinit2's static initializer");
}
- static boolean doThrow = false;
+ static boolean staticField = false;
static void $noinline$staticMethod() {
- // Try defeating inlining.
- if (doThrow) { throw new Error(); }
}
}
/*
- * Ensure an inlined call to a static method whose declaring class
- * is statically known to have been initialized does not require an
- * explicit clinit check.
+ * Ensure an inlined call from a static method to a static method
+ * of the same class does not require an explicit clinit check
+ * (already initialized or initializing in the same thread).
*/
- /// CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() builder (after)
+ /// CHECK-START: void Main$ClassWithClinit3Static.invokeStaticInlined() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() builder (after)
+ /// CHECK-START: void Main$ClassWithClinit3Static.invokeStaticInlined() builder (after)
/// CHECK-NOT: LoadClass
/// CHECK-NOT: ClinitCheck
- /// CHECK-START: void Main$ClassWithClinit3.invokeStaticInlined() inliner (after)
+ /// CHECK-START: void Main$ClassWithClinit3Static.invokeStaticInlined() inliner (after)
/// CHECK-NOT: LoadClass
/// CHECK-NOT: ClinitCheck
/// CHECK-NOT: InvokeStaticOrDirect
- static class ClassWithClinit3 {
+ static class ClassWithClinit3Static {
static void invokeStaticInlined() {
- // The invocation of invokeStaticInlined triggers the
- // initialization of ClassWithClinit3, meaning that the
- // hereinbelow call to $opt$inline$StaticMethod does not need a
- // clinit check.
+ // The invocation of invokeStaticInlined happens only after a clinit check
+ // of ClassWithClinit3Static, meaning that the hereinbelow call to
+ // $opt$inline$StaticMethod does not need another clinit check.
+ $opt$inline$StaticMethod();
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit3Static's static initializer");
+ }
+
+ static void $opt$inline$StaticMethod() {
+ }
+ }
+
+ /*
+ * Ensure an inlined call from an instance method to a static method
+ * of the same class actually requires an explicit clinit check when
+ * the class has a non-trivial initialization as we could be executing
+ * the instance method on an escaped object of an erroneous class. b/62478025
+ */
+
+ /// CHECK-START: void Main$ClassWithClinit3Instance.invokeStaticInlined() builder (after)
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main$ClassWithClinit3Instance.invokeStaticInlined() inliner (after)
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
+
+ /// CHECK-START: void Main$ClassWithClinit3Instance.invokeStaticInlined() inliner (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ static class ClassWithClinit3Instance {
+ void invokeStaticInlined() {
+ // ClinitCheck required.
$opt$inline$StaticMethod();
}
static {
- System.out.println("Main$ClassWithClinit3's static initializer");
+ System.out.println("Main$ClassWithClinit3Instance's static initializer");
}
static void $opt$inline$StaticMethod() {
@@ -144,61 +172,87 @@ public class Main {
}
/*
- * Ensure an non-inlined call to a static method whose declaring
- * class is statically known to have been initialized does not
- * require an explicit clinit check.
+ * Ensure a non-inlined call from a static method to a static method
+ * of the same class does not require an explicit clinit check
+ * (already initialized or initializing in the same thread).
*/
- /// CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() builder (after)
+ /// CHECK-START: void Main$ClassWithClinit4Static.invokeStaticNotInlined() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() builder (after)
+ /// CHECK-START: void Main$ClassWithClinit4Static.invokeStaticNotInlined() builder (after)
/// CHECK-NOT: LoadClass
/// CHECK-NOT: ClinitCheck
- /// CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() inliner (after)
+ /// CHECK-START: void Main$ClassWithClinit4Static.invokeStaticNotInlined() inliner (after)
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-START: void Main$ClassWithClinit4.invokeStaticNotInlined() inliner (after)
+ /// CHECK-START: void Main$ClassWithClinit4Static.invokeStaticNotInlined() inliner (after)
/// CHECK-NOT: LoadClass
/// CHECK-NOT: ClinitCheck
- static class ClassWithClinit4 {
+ static class ClassWithClinit4Static {
static void invokeStaticNotInlined() {
// The invocation of invokeStaticNotInlined triggers the
- // initialization of ClassWithClinit4, meaning that the
+ // initialization of ClassWithClinit4Static, meaning that the
// call to staticMethod below does not need a clinit
// check.
$noinline$staticMethod();
}
static {
- System.out.println("Main$ClassWithClinit4's static initializer");
+ System.out.println("Main$ClassWithClinit4Static's static initializer");
}
- static boolean doThrow = false;
+ static void $noinline$staticMethod() {
+ }
+ }
+
+ /*
+ * Ensure a non-inlined call from an instance method to a static method
+ * of the same class actually requires an explicit clinit check when
+ * the class has a non-trivial initialization as we could be executing
+ * the instance method on an escaped object of an erroneous class. b/62478025
+ */
+
+ /// CHECK-START: void Main$ClassWithClinit4Instance.invokeStaticNotInlined() builder (after)
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main$ClassWithClinit4Instance.invokeStaticNotInlined() inliner (after)
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ static class ClassWithClinit4Instance {
+ void invokeStaticNotInlined() {
+ // ClinitCheck required.
+ $noinline$staticMethod();
+ }
+
+ static {
+ System.out.println("Main$ClassWithClinit4Instance's static initializer");
+ }
static void $noinline$staticMethod() {
- // Try defeating inlining.
- if (doThrow) { throw new Error(); }
}
}
/*
* We used to remove clinit check for calls to static methods in a superclass. However, this
- * is not a valid optimization when instances of erroneous classes can escape. b/62478025
+ * is not a valid optimization when instances of erroneous classes can escape, therefore
+ * we avoid this optimization for classes with non-trivial initialization. b/62478025
*/
/// CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() builder (after)
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
/// CHECK-DAG: InvokeStaticOrDirect
- /// CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() builder (after)
- /// CHECK: LoadClass
- /// CHECK: ClinitCheck
-
/// CHECK-START: void Main$SubClassOfClassWithClinit5.invokeStaticInlined() inliner (after)
- /// CHECK: LoadClass
- /// CHECK: ClinitCheck
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
/// CHECK-NOT: InvokeStaticOrDirect
static class ClassWithClinit5 {
@@ -217,16 +271,50 @@ public class Main {
}
/*
+ * Ensure an inlined call to a static method whose declaring class is a super class
+ * of the caller's class does not require an explicit clinit check if the declaring
+ * class has a trivial initialization. b/62478025
+ */
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit5.invokeStaticInlined() builder (after)
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit5.invokeStaticInlined() builder (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK-NOT: ClinitCheck
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit5.invokeStaticInlined() inliner (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK-NOT: ClinitCheck
+ /// CHECK-NOT: InvokeStaticOrDirect
+
+ static class ClassWithoutClinit5 { // Mimicks ClassWithClinit5 but without the <clinit>.
+ static void $opt$inline$StaticMethod() {
+ }
+ }
+
+ static class SubClassOfClassWithoutClinit5 extends ClassWithoutClinit5 {
+ static {
+ System.out.println("Main$SubClassOfClassWithoutClinit5's static initializer");
+ }
+
+ static void invokeStaticInlined() {
+ ClassWithoutClinit5.$opt$inline$StaticMethod();
+ }
+ }
+
+ /*
* We used to remove clinit check for calls to static methods in a superclass. However, this
- * is not a valid optimization when instances of erroneous classes can escape. b/62478025
+ * is not a valid optimization when instances of erroneous classes can escape, therefore
+ * we avoid this optimization for classes with non-trivial initialization. b/62478025
*/
/// CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() builder (after)
/// CHECK-DAG: InvokeStaticOrDirect
/// CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() builder (after)
- /// CHECK: LoadClass
- /// CHECK: ClinitCheck
+ /// CHECK-DAG: LoadClass
+ /// CHECK-DAG: ClinitCheck
/// CHECK-START: void Main$SubClassOfClassWithClinit6.invokeStaticNotInlined() inliner (after)
/// CHECK-DAG: LoadClass
@@ -234,11 +322,7 @@ public class Main {
/// CHECK-DAG: InvokeStaticOrDirect
static class ClassWithClinit6 {
- static boolean doThrow = false;
-
static void $noinline$staticMethod() {
- // Try defeating inlining.
- if (doThrow) { throw new Error(); }
}
static {
@@ -252,6 +336,40 @@ public class Main {
}
}
+ /*
+ * Ensure a non-inlined call to a static method whose declaring class is a super class
+ * of the caller's class does not require an explicit clinit check if the declaring
+ * class has a trivial initialization. b/62478025
+ */
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit6.invokeStaticNotInlined() builder (after)
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit6.invokeStaticNotInlined() builder (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK-NOT: ClinitCheck
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit6.invokeStaticNotInlined() inliner (after)
+ /// CHECK-DAG: InvokeStaticOrDirect
+
+ /// CHECK-START: void Main$SubClassOfClassWithoutClinit6.invokeStaticNotInlined() inliner (after)
+ /// CHECK-NOT: LoadClass
+ /// CHECK-NOT: ClinitCheck
+
+ static class ClassWithoutClinit6 { // Mimicks ClassWithClinit6 but without the <clinit>.
+ static void $noinline$staticMethod() {
+ }
+ }
+
+ static class SubClassOfClassWithoutClinit6 extends ClassWithoutClinit6 {
+ static {
+ System.out.println("Main$SubClassOfClassWithoutClinit6's static initializer");
+ }
+
+ static void invokeStaticNotInlined() {
+ ClassWithoutClinit6.$noinline$staticMethod();
+ }
+ }
/*
* Verify that if we have a static call immediately after the load class
@@ -269,7 +387,7 @@ public class Main {
static void noClinitBecauseOfInvokeStatic() {
ClassWithClinit2.$noinline$staticMethod();
- ClassWithClinit2.doThrow = false;
+ ClassWithClinit2.staticField = false;
}
/*
@@ -286,7 +404,7 @@ public class Main {
/// CHECK-START: void Main.clinitBecauseOfFieldAccess() liveness (before)
/// CHECK-NOT: ClinitCheck
static void clinitBecauseOfFieldAccess() {
- ClassWithClinit2.doThrow = false;
+ ClassWithClinit2.staticField = false;
ClassWithClinit2.$noinline$staticMethod();
}
@@ -317,8 +435,6 @@ public class Main {
static void $noinline$someStaticMethod(Iterable<?> it) {
it.iterator();
- // We're not inlining throw at the moment.
- if (doThrow) { throw new Error(""); }
}
}
@@ -349,8 +465,6 @@ public class Main {
static void $noinline$someStaticMethod(Iterable<?> it) {
it.iterator();
- // We're not inlining throw at the moment.
- if (doThrow) { throw new Error(""); }
}
}
@@ -378,8 +492,6 @@ public class Main {
static void $noinline$someStaticMethod(Iterable<?> it) {
it.iterator();
- // We're not inlining throw at the moment.
- if (doThrow) { throw new Error(""); }
}
}
@@ -511,14 +623,11 @@ public class Main {
public static void $noinline$getIterator(Iterable<?> it) {
it.iterator();
- // We're not inlining throw at the moment.
- if (doThrow) { throw new Error(""); }
}
}
// TODO: Write checker statements.
static Object $noinline$testInliningAndNewInstance(Iterable<?> it) {
- if (doThrow) { throw new Error(); }
ClassWithClinit13.$inline$forwardToGetIterator(it);
return new ClassWithClinit13();
}
@@ -531,10 +640,14 @@ public class Main {
public static void main(String[] args) {
invokeStaticInlined();
invokeStaticNotInlined();
- ClassWithClinit3.invokeStaticInlined();
- ClassWithClinit4.invokeStaticNotInlined();
+ ClassWithClinit3Static.invokeStaticInlined();
+ new ClassWithClinit3Instance().invokeStaticInlined();
+ ClassWithClinit4Static.invokeStaticNotInlined();
+ new ClassWithClinit4Instance().invokeStaticNotInlined();
SubClassOfClassWithClinit5.invokeStaticInlined();
+ SubClassOfClassWithoutClinit5.invokeStaticInlined();
SubClassOfClassWithClinit6.invokeStaticNotInlined();
+ SubClassOfClassWithoutClinit6.invokeStaticNotInlined();
Iterable it = new Iterable() { public java.util.Iterator iterator() { return null; } };
constClassAndInvokeStatic(it);
sgetAndInvokeStatic(it);
diff --git a/test/497-inlining-and-class-loader/clear_dex_cache.cc b/test/497-inlining-and-class-loader/clear_dex_cache.cc
index c6fd56f20d..730e0741b8 100644
--- a/test/497-inlining-and-class-loader/clear_dex_cache.cc
+++ b/test/497-inlining-and-class-loader/clear_dex_cache.cc
@@ -54,7 +54,7 @@ extern "C" JNIEXPORT jobject JNICALL Java_Main_cloneResolvedMethods(JNIEnv* env,
if (sizeof(void*) == 4) {
ObjPtr<mirror::IntArray> int_array = ObjPtr<mirror::IntArray>::DownCast(decoded_array);
int_array->Set(2u * i, index);
- int_array->Set(2u * i + 1u, static_cast<jint>(reinterpret_cast<uintptr_t>(method)));
+ int_array->Set(2u * i + 1u, reinterpret_cast32<jint>(method));
} else {
ObjPtr<mirror::LongArray> long_array = ObjPtr<mirror::LongArray>::DownCast(decoded_array);
long_array->Set(2u * i, index);
@@ -81,7 +81,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_restoreResolvedMethods(
if (sizeof(void*) == 4) {
ObjPtr<mirror::IntArray> int_array = down_cast<mirror::IntArray*>(old.Ptr());
index = static_cast<uint32_t>(int_array->Get(2u * i));
- method = reinterpret_cast<ArtMethod*>(static_cast<uint32_t>(int_array->Get(2u * i + 1u)));
+ method = reinterpret_cast32<ArtMethod*>(int_array->Get(2u * i + 1u));
} else {
ObjPtr<mirror::LongArray> long_array = down_cast<mirror::LongArray*>(old.Ptr());
index = dchecked_integral_cast<uint32_t>(long_array->Get(2u * i));
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
index a5caa7bce0..935b37858d 100644
--- a/test/527-checker-array-access-split/src/Main.java
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -400,7 +400,7 @@ public class Main {
/// CHECK: ArraySet [<<Address>>,<<Index>>,<<Div>>]
public static int canMergeAfterBCE1() {
- int[] array = {0, 7, 14, 21};
+ int[] array = {0, 7, 14, 21, 28, 35, 42};
for (int i = 0; i < array.length; i++) {
array[i] = array[i] / 7;
}
@@ -513,7 +513,7 @@ public class Main {
/// CHECK-NOT: IntermediateAddress
public static int canMergeAfterBCE2() {
- int[] array = {64, 8, 4, 2 };
+ int[] array = {128, 64, 32, 8, 4, 2 };
for (int i = 0; i < array.length - 1; i++) {
array[i + 1] = array[i] << array[i + 1];
}
@@ -571,8 +571,8 @@ public class Main {
accrossGC(array, 0);
assertIntEquals(125, array[0]);
- assertIntEquals(3, canMergeAfterBCE1());
- assertIntEquals(1048576, canMergeAfterBCE2());
+ assertIntEquals(6, canMergeAfterBCE1());
+ assertIntEquals(2097152, canMergeAfterBCE2());
assertIntEquals(18, checkLongFloatDouble());
}
diff --git a/test/530-checker-peel-unroll/src/Main.java b/test/530-checker-peel-unroll/src/Main.java
index 11c29649ff..4d814407a3 100644
--- a/test/530-checker-peel-unroll/src/Main.java
+++ b/test/530-checker-peel-unroll/src/Main.java
@@ -1067,6 +1067,46 @@ public class Main {
}
}
+ /// CHECK-START: void Main.unrollingFull(int[]) loop_optimization (before)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Limit:i\d+>> IntConstant 2 loop:none
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [<<Const0>>,{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+
+ /// CHECK-START: void Main.unrollingFull(int[]) loop_optimization (after)
+ /// CHECK-DAG: <<Param:l\d+>> ParameterValue loop:none
+ /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 loop:none
+ /// CHECK-DAG: <<Const1:i\d+>> IntConstant 1 loop:none
+ /// CHECK-DAG: <<Limit:i\d+>> IntConstant 2 loop:none
+ // Two peeled iterations
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArraySet loop:none
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArrayGet loop:none
+ /// CHECK-DAG: ArraySet loop:none
+ // Loop
+ /// CHECK-DAG: <<Phi:i\d+>> Phi [{{i\d+}},{{i\d+}}] loop:<<Loop:B\d+>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArrayGet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: ArraySet loop:<<Loop>> outer_loop:none
+ /// CHECK-DAG: If [<<Const1>>] loop:<<Loop>> outer_loop:none
+ //
+ /// CHECK-NOT: ArrayGet
+ /// CHECK-NOT: ArraySet
+ private static final void unrollingFull(int[] a) {
+ for (int i = 0; i < 2; i++) {
+ a[i] += a[i + 1];
+ }
+ }
+
private static void expectEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
diff --git a/test/551-checker-clinit/src/Main.java b/test/551-checker-clinit/src/Main.java
index 86fca80292..ab92cd03fd 100644
--- a/test/551-checker-clinit/src/Main.java
+++ b/test/551-checker-clinit/src/Main.java
@@ -19,6 +19,16 @@ public class Main {
public static void main(String[] args) {}
public static int foo = 42;
+ // Primitive array initialization is trivial for purposes of the ClinitCheck. It cannot
+ // leak instances of erroneous classes or initialize subclasses of erroneous classes.
+ public static int[] array1 = new int[] { 1, 2, 3 };
+ public static int[] array2;
+ static {
+ int[] a = new int[4];
+ a[0] = 42;
+ array2 = a;
+ }
+
/// CHECK-START: void Main.inlinedMethod() builder (after)
/// CHECK: ClinitCheck
@@ -33,15 +43,17 @@ public class Main {
class Sub extends Main {
/// CHECK-START: void Sub.invokeSuperClass() builder (after)
- /// CHECK: ClinitCheck
+ /// CHECK-NOT: ClinitCheck
public void invokeSuperClass() {
- int a = Main.foo; // Class initialization check must be preserved. b/62478025
+ // No Class initialization check as Main.<clinit> is trivial. b/62478025
+ int a = Main.foo;
}
/// CHECK-START: void Sub.invokeItself() builder (after)
- /// CHECK: ClinitCheck
+ /// CHECK-NOT: ClinitCheck
public void invokeItself() {
- int a = foo; // Class initialization check must be preserved. b/62478025
+ // No Class initialization check as Sub.<clinit> and Main.<clinit> are trivial. b/62478025
+ int a = foo;
}
/// CHECK-START: void Sub.invokeSubClass() builder (after)
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 17707e1278..746887ff1a 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -140,8 +140,7 @@ public class Main {
}
/// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$getBootImageString() builder (after)
- // Note: load kind depends on PIC/non-PIC
- /// CHECK: LoadString load_kind:{{BootImageAddress|BootImageRelRo}}
+ /// CHECK: LoadString load_kind:BootImageRelRo
public static String $noinline$getBootImageString() {
// Prevent inlining to avoid the string comparison being optimized away.
@@ -168,8 +167,7 @@ public class Main {
}
/// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.Class Main.$noinline$getStringClass() builder (after)
- // Note: load kind depends on PIC/non-PIC
- /// CHECK: LoadClass load_kind:{{BootImageAddress|BootImageRelRo}} class_name:java.lang.String
+ /// CHECK: LoadClass load_kind:BootImageRelRo class_name:java.lang.String
public static Class<?> $noinline$getStringClass() {
// Prevent inlining to avoid the string comparison being optimized away.
@@ -199,8 +197,7 @@ public class Main {
/// CHECK: InvokeStaticOrDirect method_load_kind:RuntimeCall
/// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) sharpening (after)
- // Note: load kind depends on PIC/non-PIC
- /// CHECK: InvokeStaticOrDirect method_load_kind:{{BootImageRelRo|DirectAddress}}
+ /// CHECK: InvokeStaticOrDirect method_load_kind:BootImageRelRo
public static String $noinline$toHexString(int value) {
return Integer.toString(value, 16);
}
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
index 3a5fe335ab..9f4caecccd 100644
--- a/test/706-checker-scheduler/src/Main.java
+++ b/test/706-checker-scheduler/src/Main.java
@@ -409,11 +409,11 @@ public class Main {
/// CHECK-DAG: StaticFieldSet
/// CHECK-DAG: StaticFieldSet
public void accessFields() {
- static_variable = 0; // Force ClinitCheck outside the loop. b/62478025
my_obj = new ExampleObj(1, 2);
for (int i = 0; i < 10; i++) {
my_obj.n1++;
my_obj.n2++;
+ // Note: ClinitCheck(Main) is eliminated because Main initialization is trivial. b/62478025
number1++;
number2++;
}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index b8324e54e3..f3c3f03a9a 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -1,3 +1,4 @@
+JNI_OnLoad called
PackedSwitch
PackedSwitch key INT_MAX
PackedSwitch key overflow
@@ -71,4 +72,5 @@ b/29778499 (1)
b/29778499 (2)
b/30458218
b/31313170
+ConstClassAliasing
Done!
diff --git a/test/800-smali/jni.cc b/test/800-smali/jni.cc
new file mode 100644
index 0000000000..bf9e88ab45
--- /dev/null
+++ b/test/800-smali/jni.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+#include "class_linker-inl.h"
+#include "dex/dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+
+namespace art {
+namespace {
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isAotVerified(JNIEnv* env, jclass, jclass cls) {
+ ScopedObjectAccess soa(env);
+ Runtime* rt = Runtime::Current();
+
+ ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(cls);
+ const DexFile& dex_file = *klass->GetDexCache()->GetDexFile();
+ ClassStatus oat_file_class_status(ClassStatus::kNotReady);
+ bool ret = rt->GetClassLinker()->VerifyClassUsingOatFile(dex_file, klass, oat_file_class_status);
+ return ret;
+}
+
+} // namespace
+} // namespace art
diff --git a/test/800-smali/smali/ConstClassAliasing.smali b/test/800-smali/smali/ConstClassAliasing.smali
new file mode 100644
index 0000000000..a65d9a7a5d
--- /dev/null
+++ b/test/800-smali/smali/ConstClassAliasing.smali
@@ -0,0 +1,12 @@
+.class public LConstClassAliasing;
+
+.super Ljava/lang/Object;
+
+.method public static run()V
+ .registers 2
+ const-class v0, Ljava/lang/Object;
+ const-class v1, Ljava/lang/Object;
+ monitor-enter v0
+ monitor-exit v1
+ return-void
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index 8d39f0971f..9b06e9edda 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -27,13 +27,21 @@ public class Main {
private static class TestCase {
public TestCase(String testName, String testClass, String testMethodName, Object[] values,
- Throwable expectedException, Object expectedReturn) {
+ Throwable expectedException, Object expectedReturn,
+ boolean checkCompiled) {
this.testName = testName;
this.testClass = testClass;
this.testMethodName = testMethodName;
this.values = values;
this.expectedException = expectedException;
this.expectedReturn = expectedReturn;
+ this.checkCompiled = checkCompiled;
+ }
+
+ public TestCase(String testName, String testClass, String testMethodName, Object[] values,
+ Throwable expectedException, Object expectedReturn) {
+ this(testName, testClass, testMethodName, values, expectedException,
+ expectedReturn, false);
}
String testName;
@@ -42,6 +50,7 @@ public class Main {
Object[] values;
Throwable expectedException;
Object expectedReturn;
+ boolean checkCompiled;
}
private List<TestCase> testCases;
@@ -182,6 +191,8 @@ public class Main {
new IncompatibleClassChangeError(), null));
testCases.add(new TestCase("b/30458218", "B30458218", "run", null, null, null));
testCases.add(new TestCase("b/31313170", "B31313170", "run", null, null, 0));
+ testCases.add(new TestCase("ConstClassAliasing", "ConstClassAliasing", "run", null, null,
+ null, true));
}
public void runTests() {
@@ -235,6 +246,10 @@ public class Main {
errorReturn = new IllegalStateException("Expected return " +
tc.expectedReturn +
", but got " + retValue);
+ } else if (tc.checkCompiled && compiledWithOptimizing() && !isAotVerified(c)) {
+ errorReturn = new IllegalStateException("Expected method " + method.getName() +
+ " of class " + c.getName() +
+ " be verified in compile-time in test " + tc.testName);
} else {
// Expected result, do nothing.
}
@@ -260,10 +275,15 @@ public class Main {
}
public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+
Main main = new Main();
main.runTests();
System.out.println("Done!");
}
+
+ private native static boolean isAotVerified(Class<?> cls);
+ private native static boolean compiledWithOptimizing();
}
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index 995701dee1..b0a400ab75 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -79,7 +79,7 @@ From top
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -99,7 +99,7 @@ From top
---------
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -120,13 +120,13 @@ From top
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
---------
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -153,7 +153,7 @@ From bottom
###########################
From top
---------
- printOrWait (IILart/ControlData;)V 43 54
+ printOrWait (IILart/ControlData;)V 45 54
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -188,7 +188,7 @@ From top
foo (IIILart/ControlData;)I 0 21
run ()V 4 61
---------
- printOrWait (IILart/ControlData;)V 43 54
+ printOrWait (IILart/ControlData;)V 45 54
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -274,7 +274,7 @@ AllTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -284,7 +284,7 @@ AllTraces Thread 1
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -294,7 +294,7 @@ AllTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -304,7 +304,7 @@ AllTraces Thread 3
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -314,7 +314,7 @@ AllTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -324,7 +324,7 @@ AllTraces Thread 5
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -334,7 +334,7 @@ AllTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -344,7 +344,7 @@ AllTraces Thread 7
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -354,7 +354,7 @@ AllTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -364,7 +364,7 @@ AllTraces Thread 9
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -399,7 +399,7 @@ AllTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -422,7 +422,7 @@ AllTraces Thread 1
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -445,7 +445,7 @@ AllTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -468,7 +468,7 @@ AllTraces Thread 3
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -491,7 +491,7 @@ AllTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -514,7 +514,7 @@ AllTraces Thread 5
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -537,7 +537,7 @@ AllTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -560,7 +560,7 @@ AllTraces Thread 7
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -583,7 +583,7 @@ AllTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -606,7 +606,7 @@ AllTraces Thread 9
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -683,7 +683,7 @@ ThreadListTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -693,7 +693,7 @@ ThreadListTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -703,7 +703,7 @@ ThreadListTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -713,7 +713,7 @@ ThreadListTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -723,7 +723,7 @@ ThreadListTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -740,7 +740,7 @@ ThreadListTraces Thread 0
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -763,7 +763,7 @@ ThreadListTraces Thread 2
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -786,7 +786,7 @@ ThreadListTraces Thread 4
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -809,7 +809,7 @@ ThreadListTraces Thread 6
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -832,7 +832,7 @@ ThreadListTraces Thread 8
wait (JI)V -1 -2
wait (J)V 1 442
wait ()V 2 568
- printOrWait (IILart/ControlData;)V 22 47
+ printOrWait (IILart/ControlData;)V 24 47
baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
bar (IIILart/ControlData;)J 0 26
foo (IIILart/ControlData;)I 0 21
@@ -870,7 +870,7 @@ JVMTI_ERROR_ILLEGAL_ARGUMENT
[public final native void java.lang.Object.wait(long,int) throws java.lang.InterruptedException, ffffffff]
[public final void java.lang.Object.wait(long) throws java.lang.InterruptedException, 1]
[public final void java.lang.Object.wait() throws java.lang.InterruptedException, 2]
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 16]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 18]
[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
@@ -894,7 +894,7 @@ JVMTI_ERROR_NO_MORE_FRAMES
###########################
17
JVMTI_ERROR_ILLEGAL_ARGUMENT
-[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2b]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2d]
[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
diff --git a/test/983-source-transform-verify/source_transform_art.cc b/test/983-source-transform-verify/source_transform_art.cc
index 5353370ac6..fbf25b849e 100644
--- a/test/983-source-transform-verify/source_transform_art.cc
+++ b/test/983-source-transform-verify/source_transform_art.cc
@@ -24,6 +24,7 @@
#include "dex/code_item_accessors-inl.h"
#include "dex/art_dex_file_loader.h"
+#include "dex/class_accessor-inl.h"
#include "dex/dex_file.h"
#include "dex/dex_file_loader.h"
#include "dex/dex_instruction.h"
@@ -51,23 +52,15 @@ void VerifyClassData(jint class_data_len, const unsigned char* class_data) {
/*verify_checksum*/ true,
&error));
CHECK(dex.get() != nullptr) << "Failed to verify dex: " << error;
- for (uint32_t i = 0; i < dex->NumClassDefs(); i++) {
- const DexFile::ClassDef& def = dex->GetClassDef(i);
- const uint8_t* data_item = dex->GetClassData(def);
- if (data_item == nullptr) {
- continue;
- }
- for (ClassDataItemIterator it(*dex, data_item); it.HasNext(); it.Next()) {
- if (!it.IsAtMethod() || it.GetMethodCodeItem() == nullptr) {
- continue;
- }
- for (const DexInstructionPcPair& pair :
- art::CodeItemInstructionAccessor(*dex, it.GetMethodCodeItem())) {
+
+ for (ClassAccessor accessor : dex->GetClasses()) {
+ for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+ for (const DexInstructionPcPair& pair : method.GetInstructions()) {
const Instruction& inst = pair.Inst();
int forbidden_flags = (Instruction::kVerifyError | Instruction::kVerifyRuntimeOnly);
if (inst.Opcode() == Instruction::RETURN_VOID_NO_BARRIER ||
(inst.GetVerifyExtraFlags() & forbidden_flags) != 0) {
- LOG(FATAL) << "Unexpected instruction found in " << dex->PrettyMethod(it.GetMemberIndex())
+ LOG(FATAL) << "Unexpected instruction found in " << dex->PrettyMethod(method.GetIndex())
<< " [Dex PC: 0x" << std::hex << pair.DexPc() << std::dec << "] : "
<< inst.DumpString(dex.get()) << std::endl;
}
diff --git a/test/Android.bp b/test/Android.bp
index e205b7583a..a3de382059 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -289,6 +289,7 @@ art_cc_defaults {
"1943-suspend-raw-monitor-wait/native_suspend_monitor.cc",
"1946-list-descriptors/descriptors.cc",
"1950-unprepared-transform/unprepared_transform.cc",
+ "1951-monitor-enter-no-suspend/raw_monitor.cc",
],
// Use NDK-compatible headers for ctstiagent.
header_libs: [
@@ -488,6 +489,7 @@ cc_defaults {
"667-jit-jni-stub/jit_jni_stub_test.cc",
"674-hiddenapi/hiddenapi.cc",
"708-jit-cache-churn/jit.cc",
+ "800-smali/jni.cc",
"909-attach-agent/disallow_debugging.cc",
"1947-breakpoint-redefine-deopt/check_deopt.cc",
"common/runtime_state.cc",
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index bd63389da3..9344b24b5d 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -132,9 +132,13 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_compiledWithOptimizing(JNIEnv* e
constexpr const char* kInterpretOnly = "interpret-only";
constexpr const char* kVerifyNone = "verify-none";
constexpr const char* kVerifyAtRuntime = "verify-at-runtime";
+ constexpr const char* kQuicken = "quicken";
+ constexpr const char* kExtract = "extract";
if (strncmp(filter, kInterpretOnly, strlen(kInterpretOnly)) == 0 ||
strncmp(filter, kVerifyNone, strlen(kVerifyNone)) == 0 ||
- strncmp(filter, kVerifyAtRuntime, strlen(kVerifyAtRuntime)) == 0) {
+ strncmp(filter, kVerifyAtRuntime, strlen(kVerifyAtRuntime)) == 0 ||
+ strncmp(filter, kExtract, strlen(kExtract)) == 0 ||
+ strncmp(filter, kQuicken, strlen(kQuicken)) == 0) {
return JNI_FALSE;
}
}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 2b7a9b064f..ce4ebd76a5 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -355,14 +355,6 @@
"env_vars": {"ART_READ_BARRIER_TYPE": "TABLELOOKUP"}
},
{
- "tests": ["476-clinit-inline-static-invoke",
- "496-checker-inlining-class-loader",
- "508-referrer-method",
- "637-checker-throw-inline"],
- "bug": "http://b/36365552",
- "variant": "no-image & jit"
- },
- {
"tests": ["530-checker-lse",
"530-checker-lse2",
"030-bad-finalizer",
@@ -976,7 +968,8 @@
"677-fsi2",
"678-quickening",
"679-locks",
- "999-redefine-hiddenapi"],
+ "999-redefine-hiddenapi",
+ "1951-monitor-enter-no-suspend"],
"variant": "jvm",
"description": ["Doesn't run on RI."]
},
@@ -1010,6 +1003,30 @@
"description": ["Test throws exception before or during OOME."]
},
{
+ "tests": ["151-OpenFileLimit"],
+ "variant": "gcstress",
+ "bug": "b/111544552",
+ "description" : ["Gcstress requires the ability to open at least one file which means this test fails when it runs out."]
+ },
+ {
+ "tests": ["530-checker-lse2", "141-class-unload", "071-dexfile"],
+ "variant": "gcstress",
+ "bug": "b/111543628",
+ "description" : ["Test seems to timeout when run with gcstress due to slower unwinding by libbacktrace"]
+ },
+ {
+ "tests": ["712-varhandle-invocations"],
+ "variant": "interpreter & gcstress",
+ "bug": "b/111630237",
+ "description": ["Test timing out under gcstress possibly due to slower unwinding by libbacktrace"]
+ },
+ {
+ "tests": ["712-varhandle-invocations", "624-checker-stringops"],
+ "variant": "optimizing & gcstress | speed-profile & gcstress",
+ "bug": "b/111545159",
+ "description": ["These tests seem to expose some error with our gc when run in these configurations"]
+ },
+ {
"tests": ["021-string2"],
"variant": "jit & debuggable",
"bug": "b/109791792",
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index cdfeba45cb..12d3aa8eec 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -13,6 +13,9 @@ Usage:
Diff the heap dump against the given baseline heap dump FILE.
--baseline-proguard-map FILE
Use the proguard map FILE to deobfuscate the baseline heap dump.
+ --retained [strong | soft | finalizer | weak | phantom | unreachable]
+ The weakest reachability of instances to treat as retained.
+ Defaults to soft
TODO:
* Add a user guide.
@@ -34,10 +37,6 @@ TODO:
* [low priority] by site allocations won't line up if the stack has been
truncated. Is there any way to manually line them up in that case?
- * [low priority] Have a switch to choose whether unreachable objects are
- ignored or not? Is there any interest in what's unreachable, or is it only
- reachable objects that people care about?
-
Things to Test:
* That we can open a hprof without an 'app' heap and show a tabulation of
objects normally sorted by 'app' heap by default.
@@ -53,7 +52,17 @@ Reported Issues:
* Request to be able to sort tables by size.
Release History:
- 1.6 Pending
+ 1.7 Pending
+
+ 1.6 July 24, 2018
+ Distinguish between soft/weak/phantom/etc references.
+ Annotate $classOverhead byte[] arrays with their class.
+ Show progress of heap dump processing.
+ Add --retained command line option to ahat.
+ Support heap dumps generated with HotSpotDiagnosticMXBean.
+ Updated public APIs for dominators computation, reachability and parser.
+ AhatInstance no longer implements DominatorsComputation.Node
+ Bug fixes.
1.5 December 05, 2017
Distinguish between weakly reachable and unreachable instances.
diff --git a/tools/ahat/etc/ahat.mf b/tools/ahat/etc/ahat.mf
index df964838bd..8ce9863006 100644
--- a/tools/ahat/etc/ahat.mf
+++ b/tools/ahat/etc/ahat.mf
@@ -1,4 +1,4 @@
Name: ahat/
Implementation-Title: ahat
-Implementation-Version: 1.5
+Implementation-Version: 1.6
Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index f60c1a84fa..5426f7b866 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -8,7 +8,20 @@ package com.android.ahat {
package com.android.ahat.dominators {
- public class DominatorsComputation {
+ public class Dominators<Node> {
+ ctor public Dominators(com.android.ahat.dominators.Dominators.Graph);
+ method public void computeDominators(Node);
+ method public com.android.ahat.dominators.Dominators progress(com.android.ahat.progress.Progress, long);
+ }
+
+ public static abstract interface Dominators.Graph<Node> {
+ method public abstract java.lang.Object getDominatorsComputationState(Node);
+ method public abstract java.lang.Iterable<? extends Node> getReferencesForDominators(Node);
+ method public abstract void setDominator(Node, Node);
+ method public abstract void setDominatorsComputationState(Node, java.lang.Object);
+ }
+
+ public deprecated class DominatorsComputation {
method public static void computeDominators(com.android.ahat.dominators.DominatorsComputation.Node);
method public static void computeDominators(com.android.ahat.dominators.DominatorsComputation.Node, com.android.ahat.progress.Progress, long);
}
@@ -50,7 +63,7 @@ package com.android.ahat.heapdump {
method public boolean isPlaceHolder();
}
- public abstract class AhatInstance implements com.android.ahat.heapdump.Diffable com.android.ahat.dominators.DominatorsComputation.Node {
+ public abstract class AhatInstance implements com.android.ahat.heapdump.Diffable {
method public com.android.ahat.heapdump.AhatArrayInstance asArrayInstance();
method public java.awt.image.BufferedImage asBitmap();
method public com.android.ahat.heapdump.AhatClassInstance asClassInstance();
@@ -64,7 +77,6 @@ package com.android.ahat.heapdump {
method public com.android.ahat.heapdump.AhatClassObj getClassObj();
method public java.lang.String getDexCacheLocation(int);
method public java.util.List<com.android.ahat.heapdump.AhatInstance> getDominated();
- method public java.lang.Object getDominatorsComputationState();
method public com.android.ahat.heapdump.Value getField(java.lang.String);
method public deprecated java.util.List<com.android.ahat.heapdump.AhatInstance> getHardReverseReferences();
method public com.android.ahat.heapdump.AhatHeap getHeap();
@@ -73,7 +85,6 @@ package com.android.ahat.heapdump {
method public java.util.List<com.android.ahat.heapdump.PathElement> getPathFromGcRoot();
method public com.android.ahat.heapdump.Reachability getReachability();
method public com.android.ahat.heapdump.AhatInstance getRefField(java.lang.String);
- method public java.lang.Iterable<? extends com.android.ahat.dominators.DominatorsComputation.Node> getReferencesForDominators();
method public com.android.ahat.heapdump.AhatInstance getReferent();
method public com.android.ahat.heapdump.Size getRetainedSize(com.android.ahat.heapdump.AhatHeap);
method public java.util.List<com.android.ahat.heapdump.AhatInstance> getReverseReferences();
@@ -90,8 +101,6 @@ package com.android.ahat.heapdump {
method public boolean isStronglyReachable();
method public boolean isUnreachable();
method public deprecated boolean isWeaklyReachable();
- method public void setDominator(com.android.ahat.dominators.DominatorsComputation.Node);
- method public void setDominatorsComputationState(java.lang.Object);
method public abstract java.lang.String toString();
}
@@ -165,6 +174,7 @@ package com.android.ahat.heapdump {
method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.io.File, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
method public static com.android.ahat.heapdump.AhatSnapshot parseHeapDump(java.nio.ByteBuffer, com.android.ahat.proguard.ProguardMap) throws com.android.ahat.heapdump.HprofFormatException, java.io.IOException;
method public com.android.ahat.heapdump.Parser progress(com.android.ahat.progress.Progress);
+ method public com.android.ahat.heapdump.Parser retained(com.android.ahat.heapdump.Reachability);
}
public class PathElement implements com.android.ahat.heapdump.Diffable {
@@ -177,6 +187,7 @@ package com.android.ahat.heapdump {
}
public final class Reachability extends java.lang.Enum {
+ method public boolean notWeakerThan(com.android.ahat.heapdump.Reachability);
method public static com.android.ahat.heapdump.Reachability valueOf(java.lang.String);
method public static final com.android.ahat.heapdump.Reachability[] values();
enum_constant public static final com.android.ahat.heapdump.Reachability FINALIZER;
diff --git a/tools/ahat/src/main/com/android/ahat/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
index d3cfcf9e94..0c18b10424 100644
--- a/tools/ahat/src/main/com/android/ahat/Main.java
+++ b/tools/ahat/src/main/com/android/ahat/Main.java
@@ -20,6 +20,7 @@ import com.android.ahat.heapdump.AhatSnapshot;
import com.android.ahat.heapdump.Diff;
import com.android.ahat.heapdump.HprofFormatException;
import com.android.ahat.heapdump.Parser;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.progress.Progress;
import com.android.ahat.proguard.ProguardMap;
import com.sun.net.httpserver.HttpServer;
@@ -51,6 +52,9 @@ public class Main {
out.println(" Diff the heap dump against the given baseline heap dump FILE.");
out.println(" --baseline-proguard-map FILE");
out.println(" Use the proguard map FILE to deobfuscate the baseline heap dump.");
+ out.println(" --retained [strong | soft | finalizer | weak | phantom | unreachable]");
+ out.println(" The weakest reachability of instances to treat as retained.");
+ out.println(" Defaults to soft");
out.println("");
}
@@ -59,10 +63,11 @@ public class Main {
* Prints an error message and exits the application on failure to load the
* heap dump.
*/
- private static AhatSnapshot loadHeapDump(File hprof, ProguardMap map, Progress progress) {
+ private static AhatSnapshot loadHeapDump(File hprof,
+ ProguardMap map, Progress progress, Reachability retained) {
System.out.println("Processing '" + hprof + "' ...");
try {
- return new Parser(hprof).map(map).progress(progress).parse();
+ return new Parser(hprof).map(map).progress(progress).retained(retained).parse();
} catch (IOException e) {
System.err.println("Unable to load '" + hprof + "':");
e.printStackTrace();
@@ -95,6 +100,7 @@ public class Main {
File hprofbase = null;
ProguardMap map = new ProguardMap();
ProguardMap mapbase = new ProguardMap();
+ Reachability retained = Reachability.SOFT;
for (int i = 0; i < args.length; i++) {
if ("-p".equals(args[i]) && i + 1 < args.length) {
i++;
@@ -123,6 +129,20 @@ public class Main {
return;
}
hprofbase = new File(args[i]);
+ } else if ("--retained".equals(args[i]) && i + 1 < args.length) {
+ i++;
+ switch (args[i]) {
+ case "strong": retained = Reachability.STRONG; break;
+ case "soft": retained = Reachability.SOFT; break;
+ case "finalizer": retained = Reachability.FINALIZER; break;
+ case "weak": retained = Reachability.WEAK; break;
+ case "phantom": retained = Reachability.PHANTOM; break;
+ case "unreachable": retained = Reachability.UNREACHABLE; break;
+ default:
+ System.err.println("Invalid retained reference type: " + args[i]);
+ help(System.err);
+ return;
+ }
} else {
if (hprof != null) {
System.err.println("multiple input files.");
@@ -153,15 +173,16 @@ public class Main {
System.exit(1);
}
- AhatSnapshot ahat = loadHeapDump(hprof, map, new AsciiProgress());
+ AhatSnapshot ahat = loadHeapDump(hprof, map, new AsciiProgress(), retained);
if (hprofbase != null) {
- AhatSnapshot base = loadHeapDump(hprofbase, mapbase, new AsciiProgress());
+ AhatSnapshot base = loadHeapDump(hprofbase, mapbase, new AsciiProgress(), retained);
System.out.println("Diffing heap dumps ...");
Diff.snapshots(ahat, base);
}
- server.createContext("/", new AhatHttpHandler(new OverviewHandler(ahat, hprof, hprofbase)));
+ server.createContext("/",
+ new AhatHttpHandler(new OverviewHandler(ahat, hprof, hprofbase, retained)));
server.createContext("/rooted", new AhatHttpHandler(new RootedHandler(ahat)));
server.createContext("/object", new AhatHttpHandler(new ObjectHandler(ahat)));
server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat)));
diff --git a/tools/ahat/src/main/com/android/ahat/OverviewHandler.java b/tools/ahat/src/main/com/android/ahat/OverviewHandler.java
index c9f84259a9..5f0b473d1d 100644
--- a/tools/ahat/src/main/com/android/ahat/OverviewHandler.java
+++ b/tools/ahat/src/main/com/android/ahat/OverviewHandler.java
@@ -18,6 +18,7 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatSnapshot;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Size;
import java.io.File;
import java.io.IOException;
@@ -27,11 +28,13 @@ class OverviewHandler implements AhatHandler {
private AhatSnapshot mSnapshot;
private File mHprof;
private File mBaseHprof;
+ private Reachability mRetained;
- public OverviewHandler(AhatSnapshot snapshot, File hprof, File basehprof) {
+ public OverviewHandler(AhatSnapshot snapshot, File hprof, File basehprof, Reachability retained) {
mSnapshot = snapshot;
mHprof = hprof;
mBaseHprof = basehprof;
+ mRetained = retained;
}
@Override
@@ -43,6 +46,9 @@ class OverviewHandler implements AhatHandler {
doc.description(
DocString.text("ahat version"),
DocString.format("ahat-%s", OverviewHandler.class.getPackage().getImplementationVersion()));
+ doc.description(
+ DocString.text("--retained"),
+ DocString.text(mRetained.toString()));
doc.description(DocString.text("hprof file"), DocString.text(mHprof.toString()));
if (mBaseHprof != null) {
doc.description(DocString.text("baseline hprof file"), DocString.text(mBaseHprof.toString()));
diff --git a/tools/ahat/src/main/com/android/ahat/dominators/Dominators.java b/tools/ahat/src/main/com/android/ahat/dominators/Dominators.java
new file mode 100644
index 0000000000..dda0e830bd
--- /dev/null
+++ b/tools/ahat/src/main/com/android/ahat/dominators/Dominators.java
@@ -0,0 +1,476 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.ahat.dominators;
+
+import com.android.ahat.progress.NullProgress;
+import com.android.ahat.progress.Progress;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Deque;
+import java.util.Queue;
+
+/**
+ * Computes the immediate dominators of a directed graph. It can be used with
+ * any directed graph data structure that implements the
+ * {@link Dominators.Graph} interface and has some root node with no incoming
+ * edges.
+ */
+public class Dominators<Node> {
+ private final Graph<Node> graph;
+
+ private Progress progress = new NullProgress();
+ private long numNodes = 0;
+
+ /**
+ * Interface for a directed graph to perform immediate dominators
+ * computation on.
+ * The dominators computation can be used with directed graph data
+ * structures that implement this <code>Graph</code> interface. To use the
+ * dominators computation on your graph, you must make the following
+ * functionality available to the dominators computation:
+ * <ul>
+ * <li>Efficiently mapping from node to associated internal dominators
+ * computation state using the
+ * {@link #setDominatorsComputationState setDominatorsComputationState} and
+ * {@link #getDominatorsComputationState getDominatorsComputationState} methods.
+ * <li>Iterating over all outgoing edges of an node using the
+ * {@link #getReferencesForDominators getReferencesForDominators} method.
+ * <li>Setting the computed dominator for a node using the
+ * {@link #setDominator setDominator} method.
+ * </ul>
+ */
+ public interface Graph<Node> {
+ /**
+ * Associates the given dominator state with the given node. Subsequent
+ * calls to
+ * {@link #getDominatorsComputationState getDominatorsComputationState} on
+ * this node should return the state given here. At the conclusion of the
+ * dominators computation, this method will be called for
+ * each node with <code>state</code> set to null.
+ *
+ * @param node the node to associate dominator state
+ * @param state the dominator state to associate with the node
+ */
+ void setDominatorsComputationState(Node node, Object state);
+
+ /**
+ * Returns the dominator state most recently associated with the given node
+ * by a call to {@link #setDominatorsComputationState setDominatorsComputationState}.
+ * If <code>setDominatorsComputationState</code> has not yet been called
+ * on this node for this dominators computation, this method should return
+ * null.
+ *
+ * @param node the node to get the dominator state for
+ * @return the associated dominator state
+ */
+ Object getDominatorsComputationState(Node node);
+
+ /**
+ * Returns a collection of nodes referenced from the given node, for the
+ * purposes of computing dominators. This method will be called at most
+ * once for each node reachable from the root node of the dominators
+ * computation.
+ *
+ * @param node the node to get the references for
+ * @return an iterable collection of the nodes with an incoming edge from
+ * the node.
+ */
+ Iterable<? extends Node> getReferencesForDominators(Node node);
+
+ /**
+ * Sets the dominator for the given node based on the results of the
+ * dominators computation.
+ *
+ * @param node the node to set the dominator for
+ * @param dominator the computed immediate dominator of the node
+ */
+ void setDominator(Node node, Node dominator);
+ }
+
+ /**
+ * Construct an object to do dominators computation on the given graph.
+ *
+ * @param graph the graph to compute the dominators of
+ */
+ public Dominators(Graph graph) {
+ this.graph = graph;
+ }
+
+ /**
+ * Sets up a progress tracker for the dominators computation.
+ *
+ * @param progress the progress tracker to use
+ * @param numNodes an upper bound on the number of nodes in the graph
+ * @return this Dominators object
+ */
+ public Dominators progress(Progress progress, long numNodes) {
+ this.progress = progress;
+ this.numNodes = numNodes;
+ return this;
+ }
+
+ // NodeS is information associated with a particular node for the
+ // purposes of computing dominators.
+ // By convention we use the suffix 'S' to name instances of NodeS.
+ private static class NodeS {
+ // The node that this NodeS is associated with.
+ public Object node;
+
+ // Unique identifier for this node, in increasing order based on the order
+ // this node was visited in a depth first search from the root. In
+ // particular, given nodes A and B, if A.id > B.id, then A cannot be a
+ // dominator of B.
+ public long id;
+
+ // The largest id of all nodes reachable from this node.
+ // If foo.id > this.maxReachableId, then foo is not reachable from this
+ // node.
+ public long maxReachableId;
+
+ // The set of ids of nodes that have references to this node.
+ public IdSet inRefIds = new IdSet();
+
+ // The current candidate dominator for this node.
+ // The true immediate dominator of this node must have id <= domS.id.
+ public NodeS domS;
+
+ // The previous candidate dominator for this node.
+ // Invariant:
+ // * There are no nodes xS reachable from this node on a path of nodes
+ // with increasing ids (not counting xS.id) for which
+ // this.id > xS.domS.id > this.oldDomS.id.
+ // This ensures that when all nodes xS satisfy xS.domS == xS.oldDomS, we
+ // have found the true immediate dominator of each node.
+ //
+ // Note: We only use this field to tell if this node is scheduled to be
+ // revisited. We could replace it with a boolean to save space, but it
+ // probably doesn't save that much space and it's easier to explain the
+ // algorithm if we can refer to this field.
+ public NodeS oldDomS;
+
+ // The set of nodes that this node is the candidate immediate dominator
+ // of. More precisely, the set of nodes xS such that xS.domS == this.
+ public NodeSet dominated = new NodeSet();
+
+ // The set of nodes that this node is the old candidate immediate
+ // dominator of that need to be revisited. Specifically, the set of nodes
+ // xS such that:
+ // xS.oldDomS == this && xS.oldDomS != xS.domS.
+ //
+ // The empty set is represented as null instead of an empty NodeSet to
+ // save memory.
+ // Invariant:
+ // If revisit != null, this node is on the global list of nodes to be
+ // revisited.
+ public NodeSet revisit = null;
+
+ // Distance from the root to this node. Used for purposes of tracking
+ // progress only.
+ public long depth;
+ }
+
+ // A collection of node ids.
+ private static class IdSet {
+ private int size = 0;
+ private long[] ids = new long[4];
+
+ // Adds an id to the set.
+ public void add(long id) {
+ if (size == ids.length) {
+ ids = Arrays.copyOf(ids, size * 2);
+ }
+ ids[size++] = id;
+ }
+
+ // Returns the most recent id added to the set. Behavior is undefined if
+ // the set is empty.
+ public long last() {
+ assert size != 0;
+ return ids[size - 1];
+ }
+
+ // Returns true if the set contains an id in the range [low, high]
+ // inclusive, false otherwise.
+ public boolean hasIdInRange(long low, long high) {
+ for (int i = 0; i < size; ++i) {
+ if (low <= ids[i] && ids[i] <= high) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ // An unordered set of nodes data structure supporting efficient iteration
+ // over elements. The bulk of the time spent in the dominators algorithm is
+ // iterating over these sets. Using an array to store the set provides
+ // noticable performance improvements over ArrayList or a linked list.
+ private static class NodeSet {
+ public int size = 0;
+ public NodeS[] nodes = new NodeS[4];
+
+ public void add(NodeS nodeS) {
+ if (size == nodes.length) {
+ nodes = Arrays.copyOf(nodes, size * 2);
+ }
+ nodes[size++] = nodeS;
+ }
+
+ public void remove(NodeS nodeS) {
+ for (int i = 0; i < size; ++i) {
+ if (nodes[i] == nodeS) {
+ remove(i);
+ break;
+ }
+ }
+ }
+
+ public void remove(int index) {
+ nodes[index] = nodes[--size];
+ nodes[size] = null;
+ }
+ }
+
+ // A reference from a source node to a destination node to be processed
+ // during the initial depth-first traversal of nodes.
+ //
+ // Also used as a marker to indicate when the depth-first traversal has been
+ // completed for a node. In that case, srcS is the node depth-first
+ // traversal has been completed for, and dst will be set to null.
+ private static class Link<Node> {
+ public final NodeS srcS;
+ public final Node dst;
+
+ // Constructor for a reference from srcS to dst.
+ public Link(NodeS srcS, Node dst) {
+ this.srcS = srcS;
+ this.dst = dst;
+ }
+
+ // Constructor for a marker indicating depth-first traversal has been
+ // completed for srcS.
+ public Link(NodeS srcS) {
+ this.srcS = srcS;
+ this.dst = null;
+ }
+ }
+
+ /**
+ * Computes the immediate dominators of all nodes reachable from the <code>root</code> node.
+ * There must not be any incoming references to the <code>root</code> node.
+ * <p>
+ * The result of this function is to call the {@link Graph#setDominator}
+ * function on every node reachable from the root node.
+ *
+ * @param root the root node of the dominators computation
+ */
+ public void computeDominators(Node root) {
+ long id = 0;
+
+ // The set of nodes xS such that xS.revisit != null.
+ // Use a Queue instead of a Set because performance will be better. We
+ // avoid adding nodes already on the queue by checking
+ // xS == null before adding the node to the queue.
+ Queue<NodeS> revisit = new ArrayDeque<NodeS>();
+
+ // Set up the root node specially.
+ NodeS rootS = new NodeS();
+ rootS.node = root;
+ rootS.id = id++;
+ rootS.depth = 0;
+ graph.setDominatorsComputationState(root, rootS);
+
+ Deque<Link<Node>> dfs = new ArrayDeque<Link<Node>>();
+ dfs.push(new Link(rootS));
+ for (Node child : graph.getReferencesForDominators(root)) {
+ dfs.push(new Link(rootS, child));
+ }
+
+ // workBound is an upper bound on the amount of work required in the
+ // second phase of dominators computation, used solely for the purposes of
+ // tracking progress.
+ long workBound = 0;
+
+ // 1. Do a depth first search of the nodes, label them with ids and come
+ // up with initial candidate dominators for them.
+ progress.start("Initializing dominators", numNodes);
+ while (!dfs.isEmpty()) {
+ Link<Node> link = dfs.pop();
+
+ if (link.dst == null) {
+ // This is the marker link indicating we have now visited all
+ // nodes reachable from link.srcS.
+ link.srcS.maxReachableId = id - 1;
+ progress.advance();
+ } else {
+ NodeS dstS = (NodeS)graph.getDominatorsComputationState(link.dst);
+ if (dstS == null) {
+ // We are seeing the destination node for the first time.
+ // The candidate dominator is the source node.
+ dstS = new NodeS();
+ graph.setDominatorsComputationState(link.dst, dstS);
+
+ dstS.node = link.dst;
+ dstS.id = id++;
+ dstS.inRefIds.add(link.srcS.id);
+ dstS.domS = link.srcS;
+ dstS.domS.dominated.add(dstS);
+ dstS.oldDomS = link.srcS;
+ dstS.depth = link.srcS.depth + 1;
+
+ dfs.push(new Link<>(dstS));
+ for (Node child : graph.getReferencesForDominators(link.dst)) {
+ dfs.push(new Link<>(dstS, child));
+ }
+ } else {
+ // We have seen the destination node before. Update the state based
+ // on the new potential dominator.
+ if (dstS.inRefIds.size == 1) {
+ workBound += dstS.oldDomS.depth;
+ }
+
+ long seenid = dstS.inRefIds.last();
+ dstS.inRefIds.add(link.srcS.id);
+
+ // Go up the dominator chain until we reach a node we haven't already
+ // seen with a path to dstS.
+ NodeS xS = link.srcS;
+ while (xS.id > seenid) {
+ xS = xS.domS;
+ }
+
+ // The new dominator for dstS must have an id less than the node we
+ // just reached. Pull the dominator for dstS up its dominator
+ // chain until we find a suitable new dominator for dstS.
+ long domid = xS.id;
+ if (dstS.domS.id > domid) {
+ // Mark the node as needing to be revisited.
+ if (dstS.domS == dstS.oldDomS) {
+ if (dstS.oldDomS.revisit == null) {
+ dstS.oldDomS.revisit = new NodeSet();
+ revisit.add(dstS.oldDomS);
+ }
+ dstS.oldDomS.revisit.add(dstS);
+ }
+
+ // Update the node's candidate dominator.
+ dstS.domS.dominated.remove(dstS);
+ do {
+ dstS.domS = dstS.domS.domS;
+ } while (dstS.domS.id > domid);
+ dstS.domS.dominated.add(dstS);
+ }
+ }
+ }
+ }
+ progress.done();
+
+ // 2. Continue revisiting nodes until every node satisfies the requirement
+ // that domS.id == oldDomS.id.
+ progress.start("Resolving dominators", workBound);
+ while (!revisit.isEmpty()) {
+ NodeS oldDomS = revisit.poll();
+ assert oldDomS.revisit != null;
+
+ NodeSet nodes = oldDomS.revisit;
+ oldDomS.revisit = null;
+
+ // Search for pairs of nodes nodeS, xS for which
+ // nodeS.id > xS.domS.id > nodeS.oldDomS.id
+ // and there is a path of nodes with increasing ids from nodeS to xS.
+ // In that case, xS.domS must be wrong, because there is a path to xS
+ // from the root that does not go through xS.domS:
+ // * There is a path from the root to nodeS.oldDomS that doesn't go
+ // through xS.domS. Otherwise xS.domS would be a dominator of
+ // nodeS.oldDomS, but it can't be because xS.domS.id > nodeS.oldDomS.id.
+ // * There is a path from nodeS.oldDomS to nodeS that doesn't go through
+ // xS.domS, because xS.domS is not a dominator of nodeS.
+ // * There is a path from nodeS to xS that doesn't go through xS.domS,
+ // because we have a path of increasing ids from nodeS to xS, none of
+ // which can have an id smaller than nodeS as xS.domS does.
+ for (int i = 0; i < oldDomS.dominated.size; ++i) {
+ NodeS xS = oldDomS.dominated.nodes[i];
+ for (int j = 0; j < nodes.size; ++j) {
+ NodeS nodeS = nodes.nodes[j];
+ assert nodeS.oldDomS == oldDomS;
+ if (isReachableAscending(nodeS, xS)) {
+ // Update the dominator for xS.
+ if (xS.domS == xS.oldDomS) {
+ if (xS.oldDomS.revisit == null) {
+ xS.oldDomS.revisit = new NodeSet();
+ revisit.add(xS.oldDomS);
+ }
+ xS.oldDomS.revisit.add(xS);
+ }
+ oldDomS.dominated.remove(i--);
+ xS.domS = nodeS.domS;
+ xS.domS.dominated.add(xS);
+ break;
+ }
+ }
+ }
+
+ // We can now safely update oldDomS for each of the nodes nodeS while
+ // preserving the oldDomS invariant.
+ for (int i = 0; i < nodes.size; ++i) {
+ NodeS nodeS = nodes.nodes[i];
+ nodeS.oldDomS = oldDomS.oldDomS;
+ if (nodeS.oldDomS != nodeS.domS) {
+ if (nodeS.oldDomS.revisit == null) {
+ nodeS.oldDomS.revisit = new NodeSet();
+ revisit.add(nodeS.oldDomS);
+ }
+ nodeS.oldDomS.revisit.add(nodeS);
+ }
+ }
+ progress.advance((oldDomS.depth - oldDomS.oldDomS.depth) * nodes.size);
+ }
+ progress.done();
+
+
+ // 3. We have figured out the correct dominator for each node. Notify the
+ // user of the results by doing one last traversal of the nodes.
+ assert revisit.isEmpty();
+ revisit.add(rootS);
+ while (!revisit.isEmpty()) {
+ NodeS nodeS = revisit.poll();
+ assert nodeS.domS == nodeS.oldDomS;
+ assert nodeS.revisit == null;
+ graph.setDominatorsComputationState((Node)nodeS.node, null);
+ for (int i = 0; i < nodeS.dominated.size; ++i) {
+ NodeS xS = nodeS.dominated.nodes[i];
+ graph.setDominator((Node)xS.node, (Node)nodeS.node);
+ revisit.add(xS);
+ }
+ }
+ }
+
+ // Returns true if there is a path from srcS to dstS of nodes with ascending
+ // ids (not including dstS.id).
+ private static boolean isReachableAscending(NodeS srcS, NodeS dstS) {
+ if (dstS.id < srcS.id) {
+ // The first time we saw dstS was before we saw srcS. See if srcS is on
+ // the source chain for any nodes with direct references to dstS.
+ return dstS.inRefIds.hasIdInRange(srcS.id, srcS.maxReachableId);
+ }
+
+ // Otherwise dstS is only reachable from srcS on a node with ascending ids
+ // if it was visited for the first time while performing the depth-first
+ // traversal of srcS.
+ return dstS.id <= srcS.maxReachableId;
+ }
+}
diff --git a/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
index 903211eb50..7ab52cb604 100644
--- a/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
+++ b/tools/ahat/src/main/com/android/ahat/dominators/DominatorsComputation.java
@@ -18,18 +18,16 @@ package com.android.ahat.dominators;
import com.android.ahat.progress.NullProgress;
import com.android.ahat.progress.Progress;
-import java.util.ArrayDeque;
-import java.util.Arrays;
-import java.util.Deque;
-import java.util.Queue;
/**
* Provides a static method for computing the immediate dominators of a
* directed graph. It can be used with any directed graph data structure
* that implements the {@link DominatorsComputation.Node} interface and has
* some root node with no incoming edges.
+ *
+ * @deprecated Use {@link Dominators} class instead, which has a nicer interface.
*/
-public class DominatorsComputation {
+@Deprecated public class DominatorsComputation {
private DominatorsComputation() {
}
@@ -94,152 +92,6 @@ public class DominatorsComputation {
void setDominator(Node dominator);
}
- // NodeS is information associated with a particular node for the
- // purposes of computing dominators.
- // By convention we use the suffix 'S' to name instances of NodeS.
- private static class NodeS {
- // The node that this NodeS is associated with.
- public Node node;
-
- // Unique identifier for this node, in increasing order based on the order
- // this node was visited in a depth first search from the root. In
- // particular, given nodes A and B, if A.id > B.id, then A cannot be a
- // dominator of B.
- public long id;
-
- // The largest id of all nodes reachable from this node.
- // If foo.id > this.maxReachableId, then foo is not reachable from this
- // node.
- public long maxReachableId;
-
- // The set of ids of nodes that have references to this node.
- public IdSet inRefIds = new IdSet();
-
- // The current candidate dominator for this node.
- // The true immediate dominator of this node must have id <= domS.id.
- public NodeS domS;
-
- // The previous candidate dominator for this node.
- // Invariant:
- // * There are no nodes xS reachable from this node on a path of nodes
- // with increasing ids (not counting xS.id) for which
- // this.id > xS.domS.id > this.oldDomS.id.
- // This ensures that when all nodes xS satisfy xS.domS == xS.oldDomS, we
- // have found the true immediate dominator of each node.
- //
- // Note: We only use this field to tell if this node is scheduled to be
- // revisited. We could replace it with a boolean to save space, but it
- // probably doesn't save that much space and it's easier to explain the
- // algorithm if we can refer to this field.
- public NodeS oldDomS;
-
- // The set of nodes that this node is the candidate immediate dominator
- // of. More precisely, the set of nodes xS such that xS.domS == this.
- public NodeSet dominated = new NodeSet();
-
- // The set of nodes that this node is the old candidate immediate
- // dominator of that need to be revisited. Specifically, the set of nodes
- // xS such that:
- // xS.oldDomS == this && xS.oldDomS != xS.domS.
- //
- // The empty set is represented as null instead of an empty NodeSet to
- // save memory.
- // Invariant:
- // If revisit != null, this node is on the global list of nodes to be
- // revisited.
- public NodeSet revisit = null;
-
- // Distance from the root to this node. Used for purposes of tracking
- // progress only.
- public long depth;
- }
-
- // A collection of node ids.
- private static class IdSet {
- private int size = 0;
- private long[] ids = new long[4];
-
- // Adds an id to the set.
- public void add(long id) {
- if (size == ids.length) {
- ids = Arrays.copyOf(ids, size * 2);
- }
- ids[size++] = id;
- }
-
- // Returns the most recent id added to the set. Behavior is undefined if
- // the set is empty.
- public long last() {
- assert size != 0;
- return ids[size - 1];
- }
-
- // Returns true if the set contains an id in the range [low, high]
- // inclusive, false otherwise.
- public boolean hasIdInRange(long low, long high) {
- for (int i = 0; i < size; ++i) {
- if (low <= ids[i] && ids[i] <= high) {
- return true;
- }
- }
- return false;
- }
- }
-
- // An unordered set of nodes data structure supporting efficient iteration
- // over elements. The bulk of the time spent in the dominators algorithm is
- // iterating over these sets. Using an array to store the set provides
- // noticable performance improvements over ArrayList or a linked list.
- private static class NodeSet {
- public int size = 0;
- public NodeS[] nodes = new NodeS[4];
-
- public void add(NodeS nodeS) {
- if (size == nodes.length) {
- nodes = Arrays.copyOf(nodes, size * 2);
- }
- nodes[size++] = nodeS;
- }
-
- public void remove(NodeS nodeS) {
- for (int i = 0; i < size; ++i) {
- if (nodes[i] == nodeS) {
- remove(i);
- break;
- }
- }
- }
-
- public void remove(int index) {
- nodes[index] = nodes[--size];
- nodes[size] = null;
- }
- }
-
- // A reference from a source node to a destination node to be processed
- // during the initial depth-first traversal of nodes.
- //
- // Also used as a marker to indicate when the depth-first traversal has been
- // completed for a node. In that case, srcS is the node depth-first
- // traversal has been completed for, and dst will be set to null.
- private static class Link {
- public final NodeS srcS;
- public final Node dst;
-
- // Constructor for a reference from srcS to dst.
- public Link(NodeS srcS, Node dst) {
- this.srcS = srcS;
- this.dst = dst;
- }
-
- // Constructor for a marker indicating depth-first traversal has been
- // completed for srcS.
- public Link(NodeS srcS) {
- this.srcS = srcS;
- this.dst = null;
- }
- }
-
/**
* Computes the immediate dominators of all nodes reachable from the <code>root</code> node.
* There must not be any incoming references to the <code>root</code> node.
@@ -268,198 +120,28 @@ public class DominatorsComputation {
* @see Node
*/
public static void computeDominators(Node root, Progress progress, long numNodes) {
- long id = 0;
-
- // The set of nodes xS such that xS.revisit != null.
- // Use a Queue instead of a Set because performance will be better. We
- // avoid adding nodes already on the queue by checking
- // xS == null before adding the node to the queue.
- Queue<NodeS> revisit = new ArrayDeque<NodeS>();
-
- // Set up the root node specially.
- NodeS rootS = new NodeS();
- rootS.node = root;
- rootS.id = id++;
- rootS.depth = 0;
- root.setDominatorsComputationState(rootS);
-
- Deque<Link> dfs = new ArrayDeque<Link>();
- dfs.push(new Link(rootS));
- for (Node child : root.getReferencesForDominators()) {
- dfs.push(new Link(rootS, child));
- }
-
- // workBound is an upper bound on the amount of work required in the
- // second phase of dominators computation, used solely for the purposes of
- // tracking progress.
- long workBound = 0;
-
- // 1. Do a depth first search of the nodes, label them with ids and come
- // up with initial candidate dominators for them.
- progress.start("Initializing dominators", numNodes);
- while (!dfs.isEmpty()) {
- Link link = dfs.pop();
-
- if (link.dst == null) {
- // This is the marker link indicating we have now visited all
- // nodes reachable from link.srcS.
- link.srcS.maxReachableId = id - 1;
- progress.advance();
- } else {
- NodeS dstS = (NodeS)link.dst.getDominatorsComputationState();
- if (dstS == null) {
- // We are seeing the destination node for the first time.
- // The candidate dominator is the source node.
- dstS = new NodeS();
- link.dst.setDominatorsComputationState(dstS);
-
- dstS.node = link.dst;
- dstS.id = id++;
- dstS.inRefIds.add(link.srcS.id);
- dstS.domS = link.srcS;
- dstS.domS.dominated.add(dstS);
- dstS.oldDomS = link.srcS;
- dstS.depth = link.srcS.depth + 1;
-
- dfs.push(new Link(dstS));
- for (Node child : link.dst.getReferencesForDominators()) {
- dfs.push(new Link(dstS, child));
- }
- } else {
- // We have seen the destination node before. Update the state based
- // on the new potential dominator.
- if (dstS.inRefIds.size == 1) {
- workBound += dstS.oldDomS.depth;
- }
-
- long seenid = dstS.inRefIds.last();
- dstS.inRefIds.add(link.srcS.id);
-
- // Go up the dominator chain until we reach a node we haven't already
- // seen with a path to dstS.
- NodeS xS = link.srcS;
- while (xS.id > seenid) {
- xS = xS.domS;
- }
-
- // The new dominator for dstS must have an id less than the node we
- // just reached. Pull the dominator for dstS up its dominator
- // chain until we find a suitable new dominator for dstS.
- long domid = xS.id;
- if (dstS.domS.id > domid) {
- // Mark the node as needing to be revisited.
- if (dstS.domS == dstS.oldDomS) {
- if (dstS.oldDomS.revisit == null) {
- dstS.oldDomS.revisit = new NodeSet();
- revisit.add(dstS.oldDomS);
- }
- dstS.oldDomS.revisit.add(dstS);
- }
-
- // Update the node's candidate dominator.
- dstS.domS.dominated.remove(dstS);
- do {
- dstS.domS = dstS.domS.domS;
- } while (dstS.domS.id > domid);
- dstS.domS.dominated.add(dstS);
- }
- }
+ Dominators.Graph<Node> graph = new Dominators.Graph<Node>() {
+ @Override
+ public void setDominatorsComputationState(Node node, Object state) {
+ node.setDominatorsComputationState(state);
}
- }
- progress.done();
- // 2. Continue revisiting nodes until every node satisfies the requirement
- // that domS.id == oldDomS.id.
- progress.start("Resolving dominators", workBound);
- while (!revisit.isEmpty()) {
- NodeS oldDomS = revisit.poll();
- assert oldDomS.revisit != null;
-
- NodeSet nodes = oldDomS.revisit;
- oldDomS.revisit = null;
-
- // Search for pairs of nodes nodeS, xS for which
- // nodeS.id > xS.domS.id > nodeS.oldDomS.id
- // and there is a path of nodes with increasing ids from nodeS to xS.
- // In that case, xS.domS must be wrong, because there is a path to xS
- // from the root that does not go through xS.domS:
- // * There is a path from the root to nodeS.oldDomS that doesn't go
- // through xS.domS. Otherwise xS.domS would be a dominator of
- // nodeS.oldDomS, but it can't be because xS.domS.id > nodeS.oldDomS.id.
- // * There is a path from nodeS.oldDomS to nodeS that doesn't go through
- // xS.domS, because xS.domS is not a dominator of nodeS.
- // * There is a path from nodeS to xS that doesn't go through xS.domS,
- // because we have a path of increasing ids from nodeS to xS, none of
- // which can have an id smaller than nodeS as xS.domS does.
- for (int i = 0; i < oldDomS.dominated.size; ++i) {
- NodeS xS = oldDomS.dominated.nodes[i];
- for (int j = 0; j < nodes.size; ++j) {
- NodeS nodeS = nodes.nodes[j];
- assert nodeS.oldDomS == oldDomS;
- if (isReachableAscending(nodeS, xS)) {
- // Update the dominator for xS.
- if (xS.domS == xS.oldDomS) {
- if (xS.oldDomS.revisit == null) {
- xS.oldDomS.revisit = new NodeSet();
- revisit.add(xS.oldDomS);
- }
- xS.oldDomS.revisit.add(xS);
- }
- oldDomS.dominated.remove(i--);
- xS.domS = nodeS.domS;
- xS.domS.dominated.add(xS);
- break;
- }
- }
+ @Override
+ public Object getDominatorsComputationState(Node node) {
+ return node.getDominatorsComputationState();
}
- // We can now safely update oldDomS for each of the nodes nodeS while
- // preserving the oldDomS invariant.
- for (int i = 0; i < nodes.size; ++i) {
- NodeS nodeS = nodes.nodes[i];
- nodeS.oldDomS = oldDomS.oldDomS;
- if (nodeS.oldDomS != nodeS.domS) {
- if (nodeS.oldDomS.revisit == null) {
- nodeS.oldDomS.revisit = new NodeSet();
- revisit.add(nodeS.oldDomS);
- }
- nodeS.oldDomS.revisit.add(nodeS);
- }
+ @Override
+ public Iterable<? extends Node> getReferencesForDominators(Node node) {
+ return node.getReferencesForDominators();
}
- progress.advance((oldDomS.depth - oldDomS.oldDomS.depth) * nodes.size);
- }
- progress.done();
-
- // 3. We have figured out the correct dominator for each node. Notify the
- // user of the results by doing one last traversal of the nodes.
- assert revisit.isEmpty();
- revisit.add(rootS);
- while (!revisit.isEmpty()) {
- NodeS nodeS = revisit.poll();
- assert nodeS.domS == nodeS.oldDomS;
- assert nodeS.revisit == null;
- nodeS.node.setDominatorsComputationState(null);
- for (int i = 0; i < nodeS.dominated.size; ++i) {
- NodeS xS = nodeS.dominated.nodes[i];
- xS.node.setDominator(nodeS.node);
- revisit.add(xS);
+ @Override
+ public void setDominator(Node node, Node dominator) {
+ node.setDominator(dominator);
}
- }
- }
-
- // Returns true if there is a path from srcS to dstS of nodes with ascending
- // ids (not including dstS.id).
- private static boolean isReachableAscending(NodeS srcS, NodeS dstS) {
- if (dstS.id < srcS.id) {
- // The first time we saw dstS was before we saw srcS. See if srcS is on
- // the source chain for any nodes with direct references to dstS.
- return dstS.inRefIds.hasIdInRange(srcS.id, srcS.maxReachableId);
- }
+ };
- // Otherwise dstS is only reachable from srcS on a node with ascending ids
- // if it was visited for the first time while performing the depth-first
- // traversal of srcS.
- return dstS.id <= srcS.maxReachableId;
+ new Dominators(graph).progress(progress, numNodes).computeDominators(root);
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index a321ec0785..3d691c7b22 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -16,7 +16,6 @@
package com.android.ahat.heapdump;
-import com.android.ahat.dominators.DominatorsComputation;
import com.android.ahat.progress.Progress;
import java.awt.image.BufferedImage;
import java.util.ArrayDeque;
@@ -33,8 +32,7 @@ import java.util.Queue;
* kinds of Java instances, including normal Java objects, class objects, and
* arrays.
*/
-public abstract class AhatInstance implements Diffable<AhatInstance>,
- DominatorsComputation.Node {
+public abstract class AhatInstance implements Diffable<AhatInstance> {
// The id of this instance from the heap dump.
private final long mId;
@@ -681,7 +679,7 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
ref.ref.mReverseReferences = new ArrayList<AhatInstance>();
for (Reference childRef : ref.ref.getReferences()) {
- if (childRef.reachability.ordinal() <= reachability.ordinal()) {
+ if (childRef.reachability.notWeakerThan(reachability)) {
queue.add(childRef);
} else {
queues.get(childRef.reachability).add(childRef);
@@ -739,24 +737,12 @@ public abstract class AhatInstance implements Diffable<AhatInstance>,
}
}
- @Override
- public void setDominatorsComputationState(Object state) {
- setTemporaryUserData(state);
+ Iterable<AhatInstance> getReferencesForDominators(Reachability retained) {
+ return new DominatorReferenceIterator(retained, getReferences());
}
- @Override
- public Object getDominatorsComputationState() {
- return getTemporaryUserData();
- }
-
- @Override
- public Iterable<? extends DominatorsComputation.Node> getReferencesForDominators() {
- return new DominatorReferenceIterator(getReferences());
- }
-
- @Override
- public void setDominator(DominatorsComputation.Node dominator) {
- mImmediateDominator = (AhatInstance)dominator;
+ void setDominator(AhatInstance dominator) {
+ mImmediateDominator = dominator;
mImmediateDominator.mDominated.add(this);
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
index 12d3755784..3634a1ae3c 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatSnapshot.java
@@ -16,7 +16,7 @@
package com.android.ahat.heapdump;
-import com.android.ahat.dominators.DominatorsComputation;
+import com.android.ahat.dominators.Dominators;
import com.android.ahat.progress.Progress;
import java.util.List;
@@ -41,7 +41,8 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
Instances<AhatInstance> instances,
List<AhatHeap> heaps,
Site rootSite,
- Progress progress) {
+ Progress progress,
+ Reachability retained) {
mSuperRoot = root;
mInstances = instances;
mHeaps = heaps;
@@ -58,16 +59,42 @@ public class AhatSnapshot implements Diffable<AhatSnapshot> {
if (nra != null) {
nra.referent.addRegisteredNativeSize(nra.size);
}
+
+ if (retained == Reachability.UNREACHABLE && inst.isUnreachable()) {
+ mSuperRoot.addRoot(inst);
+ }
}
- DominatorsComputation.computeDominators(mSuperRoot, progress, mInstances.size());
+ Dominators.Graph<AhatInstance> graph = new Dominators.Graph<AhatInstance>() {
+ @Override
+ public void setDominatorsComputationState(AhatInstance node, Object state) {
+ node.setTemporaryUserData(state);
+ }
+
+ @Override
+ public Object getDominatorsComputationState(AhatInstance node) {
+ return node.getTemporaryUserData();
+ }
+
+ @Override
+ public Iterable<AhatInstance> getReferencesForDominators(AhatInstance node) {
+ return node.getReferencesForDominators(retained);
+ }
+
+ @Override
+ public void setDominator(AhatInstance node, AhatInstance dominator) {
+ node.setDominator(dominator);
+ }
+ };
+ new Dominators(graph).progress(progress, mInstances.size()).computeDominators(mSuperRoot);
+
AhatInstance.computeRetainedSize(mSuperRoot, mHeaps.size());
for (AhatHeap heap : mHeaps) {
heap.addToSize(mSuperRoot.getRetainedSize(heap));
}
- mRootSite.prepareForUse(0, mHeaps.size());
+ mRootSite.prepareForUse(0, mHeaps.size(), retained);
}
/**
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
index 8c8de2383b..2e819b42ad 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/DominatorReferenceIterator.java
@@ -21,14 +21,16 @@ import java.util.NoSuchElementException;
/**
* Reference iterator used for the dominators computation.
- * This visits only strong references.
+ * This visits only retained references.
*/
class DominatorReferenceIterator implements Iterator<AhatInstance>,
Iterable<AhatInstance> {
+ private final Reachability mRetained;
private Iterator<Reference> mIter;
private AhatInstance mNext;
- public DominatorReferenceIterator(Iterable<Reference> iter) {
+ public DominatorReferenceIterator(Reachability retained, Iterable<Reference> iter) {
+ mRetained = retained;
mIter = iter.iterator();
mNext = null;
}
@@ -37,7 +39,7 @@ class DominatorReferenceIterator implements Iterator<AhatInstance>,
public boolean hasNext() {
while (mNext == null && mIter.hasNext()) {
Reference ref = mIter.next();
- if (ref.reachability == Reachability.STRONG) {
+ if (ref.reachability.notWeakerThan(mRetained)) {
mNext = ref.ref;
}
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
index c18d8b120c..4e7cd43591 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
@@ -56,6 +56,7 @@ public class Parser {
private HprofBuffer hprof = null;
private ProguardMap map = new ProguardMap();
private Progress progress = new NullProgress();
+ private Reachability retained = Reachability.SOFT;
/**
* Creates an hprof Parser that parses a heap dump from a byte buffer.
@@ -105,6 +106,17 @@ public class Parser {
}
/**
+ * Specify the weakest reachability of instances to treat as retained.
+ *
+ * @param retained the weakest reachability of instances to treat as retained.
+ * @return this Parser instance.
+ */
+ public Parser retained(Reachability retained) {
+ this.retained = retained;
+ return this;
+ }
+
+ /**
* Parse the heap dump.
*
* @throws IOException if the heap dump could not be read
@@ -262,13 +274,15 @@ public class Parser {
break;
}
+ case 0x0C: // HEAP DUMP
case 0x1C: { // HEAP DUMP SEGMENT
+ int endOfRecord = hprof.tell() + recordLength;
if (classById == null) {
classById = new Instances<AhatClassObj>(classes);
}
- int subtag;
- while (!isEndOfHeapDumpSegment(subtag = hprof.getU1())) {
+ while (hprof.tell() < endOfRecord) {
progress.update(hprof.tell());
+ int subtag = hprof.getU1();
switch (subtag) {
case 0x01: { // ROOT JNI GLOBAL
long objectId = hprof.getId();
@@ -549,10 +563,6 @@ public class Parser {
String.format("Unsupported heap dump sub tag 0x%02x", subtag));
}
}
-
- // Reset the file pointer back because we read the first byte into
- // the next record.
- hprof.skip(-1);
break;
}
@@ -660,11 +670,7 @@ public class Parser {
hprof = null;
roots = null;
- return new AhatSnapshot(superRoot, mInstances, heaps.heaps, rootSite, progress);
- }
-
- private static boolean isEndOfHeapDumpSegment(int subtag) {
- return subtag == 0x1C || subtag == 0x2C;
+ return new AhatSnapshot(superRoot, mInstances, heaps.heaps, rootSite, progress, retained);
}
private static class RootData {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java b/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java
index 8df6c8ca23..5d610ddff3 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Reachability.java
@@ -67,4 +67,15 @@ public enum Reachability {
public String toString() {
return name;
}
+
+ /**
+ * Returns true if this reachability is the same or stronger than the
+ * <code>other</code> reachability.
+ *
+ * @param other the other reachability to compare this against
+ * @return true if this reachability is not weaker than <code>other</code>
+ */
+ public boolean notWeakerThan(Reachability other) {
+ return ordinal() <= other.ordinal();
+ }
}
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
index 72c0a4a750..46a17296b7 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Site.java
@@ -66,9 +66,9 @@ public class Site implements Diffable<Site> {
private Site mBaseline;
/**
- * Summary information about instances allocated at a particular allocation
- * site that are instances of a particular class and allocated on a
- * particular heap.
+ * Summary information about retained instances allocated at a particular
+ * allocation site that are instances of a particular class and allocated on
+ * a particular heap.
*/
public static class ObjectsInfo implements Diffable<ObjectsInfo> {
/**
@@ -82,7 +82,7 @@ public class Site implements Diffable<Site> {
public AhatClassObj classObj; // May be null. Not sure why.
/**
- * The number of instances included in the summary.
+ * The number of retained instances included in the summary.
*/
public long numInstances;
@@ -199,10 +199,11 @@ public class Site implements Diffable<Site> {
* @param id - The smallest id that is allowed to be used for this site or
* any of its children.
* @param numHeaps - The number of heaps in the heap dump.
+ * @param retained the weakest reachability of instances to treat as retained.
* @return An id larger than the largest id used for this site or any of its
* children.
*/
- long prepareForUse(long id, int numHeaps) {
+ long prepareForUse(long id, int numHeaps, Reachability retained) {
mId = id++;
// Count up the total sizes by heap.
@@ -211,9 +212,9 @@ public class Site implements Diffable<Site> {
mSizesByHeap[i] = Size.ZERO;
}
- // Add all reachable objects allocated at this site.
+ // Add all retained objects allocated at this site.
for (AhatInstance inst : mObjects) {
- if (inst.isStronglyReachable()) {
+ if (inst.getReachability().notWeakerThan(retained)) {
AhatHeap heap = inst.getHeap();
Size size = inst.getSize();
ObjectsInfo info = getObjectsInfo(heap, inst.getClassObj());
@@ -225,7 +226,7 @@ public class Site implements Diffable<Site> {
// Add objects allocated in child sites.
for (Site child : mChildren) {
- id = child.prepareForUse(id, numHeaps);
+ id = child.prepareForUse(id, numHeaps, retained);
for (ObjectsInfo childInfo : child.mObjectsInfos) {
ObjectsInfo info = getObjectsInfo(childInfo.heap, childInfo.classObj);
info.numInstances += childInfo.numInstances;
@@ -303,7 +304,7 @@ public class Site implements Diffable<Site> {
* {@link ObjectsInfo}. This method returns all the groups for this
* allocation site.
*
- * @return all ObjectInfo summaries for instances allocated at this site
+ * @return all ObjectInfo summaries for retained instances allocated at this site
*/
public List<ObjectsInfo> getObjectsInfos() {
return mObjectsInfos;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
index d06df900fb..a871c7e075 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/SuperRoot.java
@@ -16,12 +16,11 @@
package com.android.ahat.heapdump;
-import com.android.ahat.dominators.DominatorsComputation;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.List;
-class SuperRoot extends AhatInstance implements DominatorsComputation.Node {
+class SuperRoot extends AhatInstance {
private List<AhatInstance> mRoots = new ArrayList<AhatInstance>();
private Object mDominatorsComputationState;
diff --git a/tools/ahat/src/test/com/android/ahat/DiffTest.java b/tools/ahat/src/test/com/android/ahat/DiffTest.java
index b1952b28b0..9e927659f3 100644
--- a/tools/ahat/src/test/com/android/ahat/DiffTest.java
+++ b/tools/ahat/src/test/com/android/ahat/DiffTest.java
@@ -18,6 +18,7 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatHeap;
import com.android.ahat.heapdump.AhatInstance;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Value;
import java.io.IOException;
import org.junit.Test;
@@ -79,7 +80,7 @@ public class DiffTest {
@Test
public void diffClassRemoved() throws IOException {
- TestDump dump = TestDump.getTestDump("O.hprof", "L.hprof", null);
+ TestDump dump = TestDump.getTestDump("O.hprof", "L.hprof", null, Reachability.STRONG);
AhatHandler handler = new ObjectsHandler(dump.getAhatSnapshot());
TestHandler.testNoCrash(handler, "http://localhost:7100/objects?class=java.lang.Class");
}
diff --git a/tools/ahat/src/test/com/android/ahat/DominatorsTest.java b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
index d9af363659..955b59fb4e 100644
--- a/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
+++ b/tools/ahat/src/test/com/android/ahat/DominatorsTest.java
@@ -16,51 +16,55 @@
package com.android.ahat;
+import com.android.ahat.dominators.Dominators;
import com.android.ahat.dominators.DominatorsComputation;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class DominatorsTest {
- private static class Node implements DominatorsComputation.Node {
- public String name;
- public List<Node> depends = new ArrayList<Node>();
- public Node dominator;
- private Object dominatorsComputationState;
- public Node(String name) {
- this.name = name;
- }
+ private static class Graph implements Dominators.Graph<String> {
+ private Map<String, Object> states = new HashMap<>();
+ private Map<String, Collection<String>> depends = new HashMap<>();
+ private Map<String, String> dominators = new HashMap<>();
- public void computeDominators() {
- DominatorsComputation.computeDominators(this);
+ @Override
+ public void setDominatorsComputationState(String node, Object state) {
+ states.put(node, state);
}
- public String toString() {
- return name;
+ @Override public Object getDominatorsComputationState(String node) {
+ return states.get(node);
}
@Override
- public void setDominatorsComputationState(Object state) {
- dominatorsComputationState = state;
+ public Collection<String> getReferencesForDominators(String node) {
+ return depends.get(node);
}
@Override
- public Object getDominatorsComputationState() {
- return dominatorsComputationState;
+ public void setDominator(String node, String dominator) {
+ dominators.put(node, dominator);
}
- @Override
- public Collection<Node> getReferencesForDominators() {
- return depends;
+ /**
+ * Define a node in the graph, including all its outgoing edges.
+ */
+ public void node(String src, String... dsts) {
+ depends.put(src, Arrays.asList(dsts));
}
- @Override
- public void setDominator(DominatorsComputation.Node dominator) {
- this.dominator = (Node)dominator;
+ /**
+ * Get the computed dominator for a given node.
+ */
+ public String dom(String node) {
+ return dominators.get(node);
}
}
@@ -68,20 +72,21 @@ public class DominatorsTest {
public void singleNode() {
// --> n
// Trivial case.
- Node n = new Node("n");
- n.computeDominators();
+ Graph graph = new Graph();
+ graph.node("n");
+ new Dominators(graph).computeDominators("n");
}
@Test
public void parentWithChild() {
// --> parent --> child
// The child node is dominated by the parent.
- Node parent = new Node("parent");
- Node child = new Node("child");
- parent.depends = Arrays.asList(child);
+ Graph graph = new Graph();
+ graph.node("parent", "child");
+ graph.node("child");
+ new Dominators(graph).computeDominators("parent");
- parent.computeDominators();
- assertEquals(parent, child.dominator);
+ assertEquals("parent", graph.dom("child"));
}
@Test
@@ -90,18 +95,16 @@ public class DominatorsTest {
// --> parent child
// \-> left --->/
// The child node can be reached either by right or by left.
- Node parent = new Node("parent");
- Node right = new Node("right");
- Node left = new Node("left");
- Node child = new Node("child");
- parent.depends = Arrays.asList(left, right);
- right.depends = Arrays.asList(child);
- left.depends = Arrays.asList(child);
-
- parent.computeDominators();
- assertEquals(parent, left.dominator);
- assertEquals(parent, right.dominator);
- assertEquals(parent, child.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "left", "right");
+ graph.node("right", "child");
+ graph.node("left", "child");
+ graph.node("child");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("left"));
+ assertEquals("parent", graph.dom("right"));
+ assertEquals("parent", graph.dom("child"));
}
@Test
@@ -109,30 +112,28 @@ public class DominatorsTest {
// /-> right -->\
// --> parent -----------> child
// The child node can be reached either by right or parent.
- Node parent = new Node("parent");
- Node right = new Node("right");
- Node child = new Node("child");
- parent.depends = Arrays.asList(right, child);
- right.depends = Arrays.asList(child);
-
- parent.computeDominators();
- assertEquals(parent, child.dominator);
- assertEquals(parent, right.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "right", "child");
+ graph.node("right", "child");
+ graph.node("child");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("child"));
+ assertEquals("parent", graph.dom("right"));
}
@Test
public void subDominator() {
// --> parent --> middle --> child
// The child is dominated by an internal node.
- Node parent = new Node("parent");
- Node middle = new Node("middle");
- Node child = new Node("child");
- parent.depends = Arrays.asList(middle);
- middle.depends = Arrays.asList(child);
-
- parent.computeDominators();
- assertEquals(parent, middle.dominator);
- assertEquals(middle, child.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "middle");
+ graph.node("middle", "child");
+ graph.node("child");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("middle"));
+ assertEquals("middle", graph.dom("child"));
}
@Test
@@ -140,13 +141,12 @@ public class DominatorsTest {
// --> parent --> child -\
// \<---/
// The child points back to itself.
- Node parent = new Node("parent");
- Node child = new Node("child");
- parent.depends = Arrays.asList(child);
- child.depends = Arrays.asList(child);
+ Graph graph = new Graph();
+ graph.node("parent", "child");
+ graph.node("child", "child");
+ new Dominators(graph).computeDominators("parent");
- parent.computeDominators();
- assertEquals(parent, child.dominator);
+ assertEquals("parent", graph.dom("child"));
}
@Test
@@ -154,19 +154,16 @@ public class DominatorsTest {
// --> parent --> a --> b --> c -\
// \<------------/
// There is a loop in the graph, with only one way into the loop.
- Node parent = new Node("parent");
- Node a = new Node("a");
- Node b = new Node("b");
- Node c = new Node("c");
- parent.depends = Arrays.asList(a);
- a.depends = Arrays.asList(b);
- b.depends = Arrays.asList(c);
- c.depends = Arrays.asList(a);
-
- parent.computeDominators();
- assertEquals(parent, a.dominator);
- assertEquals(a, b.dominator);
- assertEquals(b, c.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "a");
+ graph.node("a", "b");
+ graph.node("b", "c");
+ graph.node("c", "a");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("a"));
+ assertEquals("a", graph.dom("b"));
+ assertEquals("b", graph.dom("c"));
}
@Test
@@ -176,25 +173,20 @@ public class DominatorsTest {
// \--> left --->--------/
// There is a loop in the graph, with two different ways to enter the
// loop.
- Node parent = new Node("parent");
- Node left = new Node("left");
- Node right = new Node("right");
- Node a = new Node("a");
- Node b = new Node("b");
- Node c = new Node("c");
- parent.depends = Arrays.asList(left, right);
- right.depends = Arrays.asList(a);
- left.depends = Arrays.asList(c);
- a.depends = Arrays.asList(b);
- b.depends = Arrays.asList(c);
- c.depends = Arrays.asList(a);
-
- parent.computeDominators();
- assertEquals(parent, right.dominator);
- assertEquals(parent, left.dominator);
- assertEquals(parent, a.dominator);
- assertEquals(parent, c.dominator);
- assertEquals(a, b.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "left", "right");
+ graph.node("left", "c");
+ graph.node("right", "a");
+ graph.node("a", "b");
+ graph.node("b", "c");
+ graph.node("c", "a");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("right"));
+ assertEquals("parent", graph.dom("left"));
+ assertEquals("parent", graph.dom("a"));
+ assertEquals("parent", graph.dom("c"));
+ assertEquals("a", graph.dom("b"));
}
@Test
@@ -206,33 +198,33 @@ public class DominatorsTest {
// dominator getting improperly overwritten. The relevant features of this
// case are: 'child' is visited after 'right', 'child' is dominated by
// 'parent', and 'parent' revisits 'right' after visiting 'child'.
- Node parent = new Node("parent");
- Node right = new Node("right");
- Node left = new Node("left");
- Node child = new Node("child");
- parent.depends = Arrays.asList(left, child, right);
- left.depends = Arrays.asList(right);
- right.depends = Arrays.asList(child);
-
- parent.computeDominators();
- assertEquals(parent, left.dominator);
- assertEquals(parent, child.dominator);
- assertEquals(parent, right.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "left", "child", "right");
+ graph.node("right", "child");
+ graph.node("left", "right");
+ graph.node("child");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("left"));
+ assertEquals("parent", graph.dom("child"));
+ assertEquals("parent", graph.dom("right"));
}
@Test
public void stackOverflow() {
// --> a --> b --> ... --> N
// Verify we don't smash the stack for deep chains.
- Node root = new Node("root");
- Node curr = root;
+ Graph graph = new Graph();
+ String root = "end";
+ graph.node(root);
+
for (int i = 0; i < 10000; ++i) {
- Node node = new Node("n" + i);
- curr.depends.add(node);
- curr = node;
+ String child = root;
+ root = "n" + i;
+ graph.node(root, child);
}
- root.computeDominators();
+ new Dominators(graph).computeDominators(root);
}
@Test
@@ -245,24 +237,20 @@ public class DominatorsTest {
// all reachable children's dominators to be updated too. In particular,
// c's dominator should be updated, even though b's dominator is
// unchanged.
- Node parent = new Node("parent");
- Node right = new Node("right");
- Node left = new Node("left");
- Node a = new Node("a");
- Node b = new Node("b");
- Node c = new Node("c");
- parent.depends = Arrays.asList(right, left);
- left.depends = Arrays.asList(a, c);
- right.depends = Arrays.asList(a);
- a.depends = Arrays.asList(b);
- b.depends = Arrays.asList(c);
-
- parent.computeDominators();
- assertEquals(parent, left.dominator);
- assertEquals(parent, right.dominator);
- assertEquals(parent, a.dominator);
- assertEquals(parent, c.dominator);
- assertEquals(a, b.dominator);
+ Graph graph = new Graph();
+ graph.node("parent", "right", "left");
+ graph.node("right", "a");
+ graph.node("left", "a", "c");
+ graph.node("a", "b");
+ graph.node("b", "c");
+ graph.node("c");
+ new Dominators(graph).computeDominators("parent");
+
+ assertEquals("parent", graph.dom("left"));
+ assertEquals("parent", graph.dom("right"));
+ assertEquals("parent", graph.dom("a"));
+ assertEquals("parent", graph.dom("c"));
+ assertEquals("a", graph.dom("b"));
}
@Test
@@ -276,24 +264,20 @@ public class DominatorsTest {
// to be reachable from p. Make sure that causes e's dominator to be
// refined again from a to p. The extra nodes are there to ensure the
// necessary scheduling to expose the bug we had.
- Node p = new Node("p");
- Node a = new Node("a");
- Node b = new Node("b");
- Node c = new Node("c");
- Node d = new Node("d");
- Node e = new Node("e");
- p.depends = Arrays.asList(d, a);
- a.depends = Arrays.asList(e, b);
- b.depends = Arrays.asList(d, c);
- c.depends = Arrays.asList(d);
- d.depends = Arrays.asList(e);
-
- p.computeDominators();
- assertEquals(p, a.dominator);
- assertEquals(a, b.dominator);
- assertEquals(b, c.dominator);
- assertEquals(p, d.dominator);
- assertEquals(p, e.dominator);
+ Graph graph = new Graph();
+ graph.node("p", "d", "a");
+ graph.node("a", "e", "b");
+ graph.node("b", "d", "c");
+ graph.node("c", "d");
+ graph.node("d", "e");
+ graph.node("e");
+ new Dominators(graph).computeDominators("p");
+
+ assertEquals("p", graph.dom("a"));
+ assertEquals("a", graph.dom("b"));
+ assertEquals("b", graph.dom("c"));
+ assertEquals("p", graph.dom("d"));
+ assertEquals("p", graph.dom("e"));
}
@Test
@@ -307,6 +291,70 @@ public class DominatorsTest {
// up to b. c needs to be revisited again after the dominator for f is
// pulled up to a, and that revisit of c is necessary to ensure the
// dominator for d is pulled up to a.
+ Graph graph = new Graph();
+ graph.node("a", "f", "b");
+ graph.node("b", "f", "d", "x");
+ graph.node("x", "c");
+ graph.node("c", "d");
+ graph.node("d");
+ graph.node("f", "c");
+ new Dominators(graph).computeDominators("a");
+
+ assertEquals("a", graph.dom("b"));
+ assertEquals("b", graph.dom("x"));
+ assertEquals("a", graph.dom("c"));
+ assertEquals("a", graph.dom("d"));
+ assertEquals("a", graph.dom("f"));
+ }
+
+ // Test the old dominators API.
+ private static class Node implements DominatorsComputation.Node {
+ public String name;
+ public List<Node> depends = new ArrayList<Node>();
+ public Node dominator;
+ private Object dominatorsComputationState;
+
+ public Node(String name) {
+ this.name = name;
+ }
+
+ public void computeDominators() {
+ DominatorsComputation.computeDominators(this);
+ }
+
+ public String toString() {
+ return name;
+ }
+
+ @Override
+ public void setDominatorsComputationState(Object state) {
+ dominatorsComputationState = state;
+ }
+
+ @Override
+ public Object getDominatorsComputationState() {
+ return dominatorsComputationState;
+ }
+
+ @Override
+ public Collection<Node> getReferencesForDominators() {
+ return depends;
+ }
+
+ @Override
+ public void setDominator(DominatorsComputation.Node dominator) {
+ this.dominator = (Node)dominator;
+ }
+ }
+
+ @Test
+ public void twiceRevisitOldApi() {
+ // /---->---\
+ // / /--> f -->-\
+ // --> a --> b -->--x---> c --> d
+ // \----------->----/
+ // Run the twiceRevisit test using the user api version of computing
+ // dominators.
Node a = new Node("a");
Node b = new Node("b");
Node x = new Node("x");
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index f886e9df5f..196eb1e231 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -333,6 +333,28 @@ public class InstanceTest {
}
@Test
+ public void retainedSizeByRetained() throws IOException {
+ // The test dump program should never be under enough GC pressure for the
+ // soft reference to be cleared. The referent should be included in
+ // retained size if --retained is soft, but not if --retained is strong.
+ TestDump dumpStrong = TestDump.getTestDump("test-dump.hprof",
+ "test-dump-base.hprof",
+ "test-dump.map",
+ Reachability.STRONG);
+ AhatInstance refStrong = dumpStrong.getDumpedAhatInstance("aSoftReference");
+ long sizeStrong = refStrong.getTotalRetainedSize().getSize();
+
+ TestDump dumpSoft = TestDump.getTestDump("test-dump.hprof",
+ "test-dump-base.hprof",
+ "test-dump.map",
+ Reachability.SOFT);
+ AhatInstance refSoft = dumpSoft.getDumpedAhatInstance("aSoftReference");
+ long sizeSoft = refSoft.getTotalRetainedSize().getSize();
+
+ assertTrue(sizeStrong < sizeSoft);
+ }
+
+ @Test
public void objectNotABitmap() throws IOException {
TestDump dump = TestDump.getTestDump();
AhatInstance obj = dump.getDumpedAhatInstance("anObject");
@@ -456,7 +478,7 @@ public class InstanceTest {
// On Android L, image strings were backed by a single big char array.
// Verify we show just the relative part of the string, not the entire
// char array.
- TestDump dump = TestDump.getTestDump("L.hprof", null, null);
+ TestDump dump = TestDump.getTestDump("L.hprof", null, null, Reachability.STRONG);
AhatSnapshot snapshot = dump.getAhatSnapshot();
// java.lang.String@0x6fe17050 is an image string "char" backed by a
@@ -467,7 +489,7 @@ public class InstanceTest {
@Test
public void nonDefaultHeapRoot() throws IOException {
- TestDump dump = TestDump.getTestDump("O.hprof", null, null);
+ TestDump dump = TestDump.getTestDump("O.hprof", null, null, Reachability.STRONG);
AhatSnapshot snapshot = dump.getAhatSnapshot();
// java.util.HashMap@6004fdb8 is marked as a VM INTERNAL root.
@@ -480,7 +502,7 @@ public class InstanceTest {
@Test
public void threadRoot() throws IOException {
- TestDump dump = TestDump.getTestDump("O.hprof", null, null);
+ TestDump dump = TestDump.getTestDump("O.hprof", null, null, Reachability.STRONG);
AhatSnapshot snapshot = dump.getAhatSnapshot();
// java.lang.Thread@12c03470 is marked as a thread root.
@@ -503,7 +525,7 @@ public class InstanceTest {
@Test
public void nullValueString() throws IOException {
- TestDump dump = TestDump.getTestDump("RI.hprof", null, null);
+ TestDump dump = TestDump.getTestDump("RI.hprof", null, null, Reachability.STRONG);
AhatSnapshot snapshot = dump.getAhatSnapshot();
// java.lang.String@500001a8 has a null 'value' field, which should not
@@ -515,7 +537,7 @@ public class InstanceTest {
@Test
public void classOverhead() throws IOException {
- TestDump dump = TestDump.getTestDump("O.hprof", null, null);
+ TestDump dump = TestDump.getTestDump("O.hprof", null, null, Reachability.STRONG);
AhatSnapshot snapshot = dump.getAhatSnapshot();
// class libore.io.IoTracker has byte[124]@12c028d1 as its class overhead.
diff --git a/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java b/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java
index c2f773b64b..d437d9b715 100644
--- a/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java
+++ b/tools/ahat/src/test/com/android/ahat/OverviewHandlerTest.java
@@ -17,6 +17,7 @@
package com.android.ahat;
import com.android.ahat.heapdump.AhatSnapshot;
+import com.android.ahat.heapdump.Reachability;
import java.io.File;
import java.io.IOException;
import org.junit.Test;
@@ -28,7 +29,8 @@ public class OverviewHandlerTest {
AhatSnapshot snapshot = TestDump.getTestDump().getAhatSnapshot();
AhatHandler handler = new OverviewHandler(snapshot,
new File("my.hprof.file"),
- new File("my.base.hprof.file"));
+ new File("my.base.hprof.file"),
+ Reachability.SOFT);
TestHandler.testNoCrash(handler, "http://localhost:7100");
}
}
diff --git a/tools/ahat/src/test/com/android/ahat/RiTest.java b/tools/ahat/src/test/com/android/ahat/RiTest.java
index d46cafc1b4..98ab669537 100644
--- a/tools/ahat/src/test/com/android/ahat/RiTest.java
+++ b/tools/ahat/src/test/com/android/ahat/RiTest.java
@@ -16,6 +16,8 @@
package com.android.ahat;
+import com.android.ahat.heapdump.Reachability;
+
import java.io.IOException;
import org.junit.Test;
@@ -23,7 +25,7 @@ public class RiTest {
@Test
public void loadRi() throws IOException {
// Verify we can load a heap dump generated from the RI.
- TestDump.getTestDump("ri-test-dump.hprof", null, null);
+ TestDump.getTestDump("ri-test-dump.hprof", null, null, Reachability.STRONG);
}
}
diff --git a/tools/ahat/src/test/com/android/ahat/SiteTest.java b/tools/ahat/src/test/com/android/ahat/SiteTest.java
index 0443d7f264..78ef9b3c60 100644
--- a/tools/ahat/src/test/com/android/ahat/SiteTest.java
+++ b/tools/ahat/src/test/com/android/ahat/SiteTest.java
@@ -18,6 +18,7 @@ package com.android.ahat;
import com.android.ahat.heapdump.AhatInstance;
import com.android.ahat.heapdump.AhatSnapshot;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Site;
import java.io.IOException;
import org.junit.Test;
@@ -82,4 +83,53 @@ public class SiteTest {
assertEquals(41, sOverriddenSite.getLineNumber());
assertSame(sOverriddenSite, snapshot.getSite(sOverriddenSite.getId()));
}
+
+ @Test
+ public void objectsInfos() throws IOException {
+ // Verify that objectsInfos only include counts for --retained instances.
+ // We do this by counting the number of 'Reference' instances allocated at
+ // the Site where reachabilityReferenceChain is allocated in DumpedStuff:
+ //
+ // reachabilityReferenceChain = new Reference(
+ // new SoftReference(
+ // new Reference(
+ // new WeakReference(
+ // new SoftReference(
+ // new PhantomReference(new Object(), referenceQueue))))));
+ //
+ // The first instance of 'Reference' is strongly reachable, the second is
+ // softly reachable. So if --retained is 'strong', we should see just the
+ // one reference, but if --retained is 'soft', we should see both of them.
+
+ TestDump dumpStrong = TestDump.getTestDump("test-dump.hprof",
+ "test-dump-base.hprof",
+ "test-dump.map",
+ Reachability.STRONG);
+
+ AhatInstance refStrong = dumpStrong.getDumpedAhatInstance("reachabilityReferenceChain");
+ Site siteStrong = refStrong.getSite();
+ long numReferenceStrong = 0;
+ for (Site.ObjectsInfo info : siteStrong.getObjectsInfos()) {
+ if (info.heap == refStrong.getHeap() && info.classObj == refStrong.getClassObj()) {
+ numReferenceStrong = info.numInstances;
+ break;
+ }
+ }
+ assertEquals(1, numReferenceStrong);
+
+ TestDump dumpSoft = TestDump.getTestDump("test-dump.hprof",
+ "test-dump-base.hprof",
+ "test-dump.map",
+ Reachability.SOFT);
+ AhatInstance refSoft = dumpSoft.getDumpedAhatInstance("reachabilityReferenceChain");
+ Site siteSoft = refSoft.getSite();
+ long numReferenceSoft = 0;
+ for (Site.ObjectsInfo info : siteSoft.getObjectsInfos()) {
+ if (info.heap == refSoft.getHeap() && info.classObj == refSoft.getClassObj()) {
+ numReferenceSoft = info.numInstances;
+ break;
+ }
+ }
+ assertEquals(2, numReferenceSoft);
+ }
}
diff --git a/tools/ahat/src/test/com/android/ahat/TestDump.java b/tools/ahat/src/test/com/android/ahat/TestDump.java
index a0d1021ef1..e94d1a9769 100644
--- a/tools/ahat/src/test/com/android/ahat/TestDump.java
+++ b/tools/ahat/src/test/com/android/ahat/TestDump.java
@@ -23,6 +23,7 @@ import com.android.ahat.heapdump.Diff;
import com.android.ahat.heapdump.FieldValue;
import com.android.ahat.heapdump.HprofFormatException;
import com.android.ahat.heapdump.Parser;
+import com.android.ahat.heapdump.Reachability;
import com.android.ahat.heapdump.Site;
import com.android.ahat.heapdump.Value;
import com.android.ahat.proguard.ProguardMap;
@@ -54,6 +55,7 @@ public class TestDump {
private String mHprofResource;
private String mHprofBaseResource;
private String mMapResource;
+ private Reachability mRetained;
// If the test dump fails to load the first time, it will likely fail every
// other test we try. Rather than having to wait a potentially very long
@@ -94,10 +96,14 @@ public class TestDump {
* The map resource may be null to indicate no proguard map will be used.
*
*/
- private TestDump(String hprofResource, String hprofBaseResource, String mapResource) {
+ private TestDump(String hprofResource,
+ String hprofBaseResource,
+ String mapResource,
+ Reachability retained) {
mHprofResource = hprofResource;
mHprofBaseResource = hprofBaseResource;
mMapResource = mapResource;
+ mRetained = retained;
}
/**
@@ -119,7 +125,7 @@ public class TestDump {
try {
ByteBuffer hprof = dataBufferFromResource(mHprofResource);
- mSnapshot = Parser.parseHeapDump(hprof, map);
+ mSnapshot = new Parser(hprof).map(map).retained(mRetained).parse();
mMain = findClass(mSnapshot, "Main");
assert(mMain != null);
} catch (HprofFormatException e) {
@@ -129,7 +135,7 @@ public class TestDump {
if (mHprofBaseResource != null) {
try {
ByteBuffer hprofBase = dataBufferFromResource(mHprofBaseResource);
- mBaseline = Parser.parseHeapDump(hprofBase, map);
+ mBaseline = new Parser(hprofBase).map(map).retained(mRetained).parse();
mBaselineMain = findClass(mBaseline, "Main");
assert(mBaselineMain != null);
} catch (HprofFormatException e) {
@@ -238,7 +244,10 @@ public class TestDump {
* when possible.
*/
public static synchronized TestDump getTestDump() throws IOException {
- return getTestDump("test-dump.hprof", "test-dump-base.hprof", "test-dump.map");
+ return getTestDump("test-dump.hprof",
+ "test-dump-base.hprof",
+ "test-dump.map",
+ Reachability.STRONG);
}
/**
@@ -246,17 +255,22 @@ public class TestDump {
* @param hprof - The string resouce name of the hprof file.
* @param base - The string resouce name of the baseline hprof, may be null.
* @param map - The string resouce name of the proguard map, may be null.
+ * @param retained the weakest reachability of instances to treat as retained.
* An IOException is thrown if there is an error reading the test dump hprof
* file.
* To improve performance, this returns a cached instance of the TestDump
* when possible.
*/
- public static synchronized TestDump getTestDump(String hprof, String base, String map)
+ public static synchronized TestDump getTestDump(String hprof,
+ String base,
+ String map,
+ Reachability retained)
throws IOException {
for (TestDump loaded : mCachedTestDumps) {
if (Objects.equals(loaded.mHprofResource, hprof)
&& Objects.equals(loaded.mHprofBaseResource, base)
- && Objects.equals(loaded.mMapResource, map)) {
+ && Objects.equals(loaded.mMapResource, map)
+ && Objects.equals(loaded.mRetained, retained)) {
if (loaded.mTestDumpFailed) {
throw new IOException("Test dump failed before, assuming it will again");
}
@@ -264,7 +278,7 @@ public class TestDump {
}
}
- TestDump dump = new TestDump(hprof, base, map);
+ TestDump dump = new TestDump(hprof, base, map, retained);
mCachedTestDumps.add(dump);
dump.load();
return dump;
diff --git a/tools/class2greylist/Android.bp b/tools/class2greylist/Android.bp
new file mode 100644
index 0000000000..7b1233bb85
--- /dev/null
+++ b/tools/class2greylist/Android.bp
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+java_library_host {
+ name: "class2greylistlib",
+ srcs: ["src/**/*.java"],
+ static_libs: [
+ "commons-cli-1.2",
+ "apache-bcel",
+ ],
+}
+
+java_binary_host {
+ name: "class2greylist",
+ manifest: "src/class2greylist.mf",
+ static_libs: [
+ "class2greylistlib",
+ ],
+}
+
diff --git a/tools/class2greylist/src/class2greylist.mf b/tools/class2greylist/src/class2greylist.mf
new file mode 100644
index 0000000000..ea3a3d9153
--- /dev/null
+++ b/tools/class2greylist/src/class2greylist.mf
@@ -0,0 +1 @@
+Main-Class: com.android.class2greylist.Class2Greylist
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
new file mode 100644
index 0000000000..66857525aa
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationVisitor.java
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import org.apache.bcel.Const;
+import org.apache.bcel.classfile.AnnotationEntry;
+import org.apache.bcel.classfile.DescendingVisitor;
+import org.apache.bcel.classfile.ElementValuePair;
+import org.apache.bcel.classfile.EmptyVisitor;
+import org.apache.bcel.classfile.Field;
+import org.apache.bcel.classfile.FieldOrMethod;
+import org.apache.bcel.classfile.JavaClass;
+import org.apache.bcel.classfile.Method;
+
+import java.util.Locale;
+
+/**
+ * Visits a JavaClass instance and pulls out all members annotated with a
+ * specific annotation. The signatures of such members are passed to {@link
+ * Status#greylistEntry(String)}. Any errors result in a call to {@link
+ * Status#error(String)}.
+ *
+ * If the annotation has a property "expectedSignature" the generated signature
+ * will be verified against the one specified there. If it differs, an error
+ * will be generated.
+ */
+public class AnnotationVisitor extends EmptyVisitor {
+
+ private static final String EXPECTED_SIGNATURE = "expectedSignature";
+
+ private final JavaClass mClass;
+ private final String mAnnotationType;
+ private final Status mStatus;
+ private final DescendingVisitor mDescendingVisitor;
+
+ public AnnotationVisitor(JavaClass clazz, String annotation, Status d) {
+ mClass = clazz;
+ mAnnotationType = annotation;
+ mStatus = d;
+ mDescendingVisitor = new DescendingVisitor(clazz, this);
+ }
+
+ public void visit() {
+ mStatus.debug("Visit class %s", mClass.getClassName());
+ mDescendingVisitor.visit();
+ }
+
+ private static String getClassDescriptor(JavaClass clazz) {
+ // JavaClass.getName() returns the Java-style name (with . not /), so we must fetch
+ // the original class name from the constant pool.
+ return clazz.getConstantPool().getConstantString(
+ clazz.getClassNameIndex(), Const.CONSTANT_Class);
+ }
+
+ @Override
+ public void visitMethod(Method method) {
+ visitMember(method, "L%s;->%s%s");
+ }
+
+ @Override
+ public void visitField(Field field) {
+ visitMember(field, "L%s;->%s:%s");
+ }
+
+ private void visitMember(FieldOrMethod member, String signatureFormatString) {
+ JavaClass definingClass = (JavaClass) mDescendingVisitor.predecessor();
+ mStatus.debug("Visit member %s : %s", member.getName(), member.getSignature());
+ for (AnnotationEntry a : member.getAnnotationEntries()) {
+ if (mAnnotationType.equals(a.getAnnotationType())) {
+ mStatus.debug("Method has annotation %s", mAnnotationType);
+ String signature = String.format(Locale.US, signatureFormatString,
+ getClassDescriptor(definingClass), member.getName(), member.getSignature());
+ for (ElementValuePair property : a.getElementValuePairs()) {
+ switch (property.getNameString()) {
+ case EXPECTED_SIGNATURE:
+ String expected = property.getValue().stringifyValue();
+ if (!signature.equals(expected)) {
+ error(definingClass, member,
+ "Expected signature does not match generated:\n"
+ + "Expected: %s\n"
+ + "Generated: %s", expected, signature);
+ }
+ break;
+ }
+ }
+ mStatus.greylistEntry(signature);
+ }
+ }
+ }
+
+ private void error(JavaClass clazz, FieldOrMethod member, String message, Object... args) {
+ StringBuilder error = new StringBuilder();
+ error.append(clazz.getSourceFileName())
+ .append(": ")
+ .append(clazz.getClassName())
+ .append(".")
+ .append(member.getName())
+ .append(": ")
+ .append(String.format(Locale.US, message, args));
+
+ mStatus.error(error.toString());
+ }
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
new file mode 100644
index 0000000000..3e9e320b5b
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PatternOptionBuilder;
+
+import java.io.IOException;
+
+/**
+ * Build time tool for extracting a list of members from jar files that have the @UsedByApps
+ * annotation, for building the greylist.
+ */
+public class Class2Greylist {
+
+ private static final String ANNOTATION_TYPE = "Landroid/annotation/UnsupportedAppUsage;";
+
+ public static void main(String[] args) {
+ Options options = new Options();
+ options.addOption(OptionBuilder
+ .withLongOpt("debug")
+ .hasArgs(0)
+ .withDescription("Enable debug")
+ .create("d"));
+ options.addOption(OptionBuilder
+ .withLongOpt("help")
+ .hasArgs(0)
+ .withDescription("Show this help")
+ .create("h"));
+
+ CommandLineParser parser = new GnuParser();
+ CommandLine cmd;
+
+ try {
+ cmd = parser.parse(options, args);
+ } catch (ParseException e) {
+ System.err.println(e.getMessage());
+ help(options);
+ return;
+ }
+ if (cmd.hasOption('h')) {
+ help(options);
+ }
+
+ String[] jarFiles = cmd.getArgs();
+ if (jarFiles.length == 0) {
+ System.err.println("Error: no jar files specified.");
+ help(options);
+ }
+
+ Status status = new Status(cmd.hasOption('d'));
+
+ for (String jarFile : jarFiles) {
+ status.debug("Processing jar file %s", jarFile);
+ try {
+ JarReader reader = new JarReader(status, jarFile);
+ reader.stream().forEach(clazz -> new AnnotationVisitor(
+ clazz, ANNOTATION_TYPE, status).visit());
+ reader.close();
+ } catch (IOException e) {
+ status.error(e);
+ }
+ }
+ if (status.ok()) {
+ System.exit(0);
+ } else {
+ System.exit(1);
+ }
+
+ }
+
+ private static void help(Options options) {
+ new HelpFormatter().printHelp(
+ "class2greylist path/to/classes.jar [classes2.jar ...]",
+ "Extracts greylist entries from classes jar files given",
+ options, null, true);
+ System.exit(1);
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/JarReader.java b/tools/class2greylist/src/com/android/class2greylist/JarReader.java
new file mode 100644
index 0000000000..f3a9d0b92e
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/JarReader.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import org.apache.bcel.classfile.ClassParser;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.io.IOException;
+import java.util.Objects;
+import java.util.stream.Stream;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
+
+/**
+ * Reads {@link JavaClass} members from a zip/jar file, providing a stream of them for processing.
+ * Any errors are reported via {@link Status#error(Throwable)}.
+ */
+public class JarReader {
+
+ private final Status mStatus;
+ private final String mFileName;
+ private final ZipFile mZipFile;
+
+ public JarReader(Status s, String filename) throws IOException {
+ mStatus = s;
+ mFileName = filename;
+ mZipFile = new ZipFile(mFileName);
+ }
+
+ private JavaClass openZipEntry(ZipEntry e) {
+ try {
+ mStatus.debug("Reading %s from %s", e.getName(), mFileName);
+ return new ClassParser(mZipFile.getInputStream(e), e.getName()).parse();
+ } catch (IOException ioe) {
+ mStatus.error(ioe);
+ return null;
+ }
+ }
+
+
+ public Stream<JavaClass> stream() {
+ return mZipFile.stream()
+ .filter(zipEntry -> zipEntry.getName().endsWith(".class"))
+ .map(zipEntry -> openZipEntry(zipEntry))
+ .filter(Objects::nonNull);
+ }
+
+ public void close() throws IOException {
+ mZipFile.close();
+ }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Status.java b/tools/class2greylist/src/com/android/class2greylist/Status.java
new file mode 100644
index 0000000000..d7078986d9
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/Status.java
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.class2greylist;
+
+import java.util.Locale;
+
+public class Status {
+
+ // Highlight "Error:" in red.
+ private static final String ERROR = "\u001B[31mError: \u001B[0m";
+
+ private final boolean mDebug;
+ private boolean mHasErrors;
+
+ public Status(boolean debug) {
+ mDebug = debug;
+ }
+
+ public void debug(String msg, Object... args) {
+ if (mDebug) {
+ System.err.println(String.format(Locale.US, msg, args));
+ }
+ }
+
+ public void error(Throwable t) {
+ System.err.print(ERROR);
+ t.printStackTrace(System.err);
+ mHasErrors = true;
+ }
+
+ public void error(String message) {
+ System.err.print(ERROR);
+ System.err.println(message);
+ mHasErrors = true;
+ }
+
+ public void greylistEntry(String signature) {
+ System.out.println(signature);
+ }
+
+ public boolean ok() {
+ return !mHasErrors;
+ }
+}
diff --git a/tools/class2greylist/test/Android.mk b/tools/class2greylist/test/Android.mk
new file mode 100644
index 0000000000..23f4156f6d
--- /dev/null
+++ b/tools/class2greylist/test/Android.mk
@@ -0,0 +1,32 @@
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# Only compile source java files in this apk.
+LOCAL_SRC_FILES := $(call all-java-files-under, src)
+
+LOCAL_MODULE := class2greylisttest
+
+LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host
+
+# tag this module as a cts test artifact
+LOCAL_COMPATIBILITY_SUITE := general-tests
+
+include $(BUILD_HOST_JAVA_LIBRARY)
+
+# Build the test APKs using their own makefiles
+include $(call all-makefiles-under,$(LOCAL_PATH)) \ No newline at end of file
diff --git a/tools/class2greylist/test/AndroidTest.xml b/tools/class2greylist/test/AndroidTest.xml
new file mode 100644
index 0000000000..66bb63446f
--- /dev/null
+++ b/tools/class2greylist/test/AndroidTest.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="class2greylist tests">
+ <test class="com.android.compatibility.common.tradefed.testtype.JarHostTest" >
+ <option name="jar" value="class2greylisttest.jar" />
+ <option name="runtime-hint" value="1m" />
+ </test>
+</configuration> \ No newline at end of file
diff --git a/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
new file mode 100644
index 0000000000..2d9721803c
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/javac/AnnotationVisitorTest.java
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.javac;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import com.android.class2greylist.Status;
+import com.android.class2greylist.AnnotationVisitor;
+
+import com.google.common.base.Joiner;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+
+import java.io.IOException;
+
+public class AnnotationVisitorTest {
+
+ private static final String ANNOTATION = "Lannotation/Anno;";
+
+ private Javac mJavac;
+ @Mock
+ private Status mStatus;
+
+ @Before
+ public void setup() throws IOException {
+ initMocks(this);
+ mJavac = new Javac();
+ mJavac.addSource("annotation.Anno", Joiner.on('\n').join(
+ "package annotation;",
+ "import static java.lang.annotation.RetentionPolicy.CLASS;",
+ "import java.lang.annotation.Retention;",
+ "import java.lang.annotation.Target;",
+ "@Retention(CLASS)",
+ "public @interface Anno {",
+ " String expectedSignature() default \"\";",
+ "}"));
+ }
+
+ private void assertNoErrors() {
+ verify(mStatus, never()).error(any(Throwable.class));
+ verify(mStatus, never()).error(any(String.class));
+ }
+
+ @Test
+ public void testGreylistMethod() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno",
+ " public void method() {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
+ }
+
+ @Test
+ public void testGreylistConstructor() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno",
+ " public Class() {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;-><init>()V");
+ }
+
+ @Test
+ public void testGreylistField() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno",
+ " public int i;",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->i:I");
+ }
+
+ @Test
+ public void testGreylistMethodExpectedSignature() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(expectedSignature=\"La/b/Class;->method()V\")",
+ " public void method() {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class;->method()V");
+ }
+
+ @Test
+ public void testGreylistMethodExpectedSignatureWrong() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " @Anno(expectedSignature=\"La/b/Class;->nomethod()V\")",
+ " public void method() {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ verify(mStatus, times(1)).error(any(String.class));
+ }
+
+ @Test
+ public void testGreylistInnerClassMethod() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "import annotation.Anno;",
+ "public class Class {",
+ " public class Inner {",
+ " @Anno",
+ " public void method() {}",
+ " }",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class$Inner"), ANNOTATION,
+ mStatus).visit();
+
+ assertNoErrors();
+ ArgumentCaptor<String> greylist = ArgumentCaptor.forClass(String.class);
+ verify(mStatus, times(1)).greylistEntry(greylist.capture());
+ assertThat(greylist.getValue()).isEqualTo("La/b/Class$Inner;->method()V");
+ }
+
+ @Test
+ public void testMethodNotGreylisted() throws IOException {
+ mJavac.addSource("a.b.Class", Joiner.on('\n').join(
+ "package a.b;",
+ "public class Class {",
+ " public void method() {}",
+ "}"));
+ assertThat(mJavac.compile()).isTrue();
+
+ new AnnotationVisitor(mJavac.getCompiledClass("a.b.Class"), ANNOTATION, mStatus)
+ .visit();
+
+ assertNoErrors();
+ verify(mStatus, never()).greylistEntry(any(String.class));
+ }
+
+}
diff --git a/tools/class2greylist/test/src/com/android/javac/Javac.java b/tools/class2greylist/test/src/com/android/javac/Javac.java
new file mode 100644
index 0000000000..202f4121fc
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/javac/Javac.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.javac;
+
+import com.google.common.io.Files;
+
+import org.apache.bcel.classfile.ClassParser;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+
+import javax.tools.DiagnosticCollector;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.StandardLocation;
+import javax.tools.ToolProvider;
+
+/**
+ * Helper class for compiling snippets of Java source and providing access to the resulting class
+ * files.
+ */
+public class Javac {
+
+ private final JavaCompiler mJavac;
+ private final StandardJavaFileManager mFileMan;
+ private final List<JavaFileObject> mCompilationUnits;
+ private final File mClassOutDir;
+
+ public Javac() throws IOException {
+ mJavac = ToolProvider.getSystemJavaCompiler();
+ mFileMan = mJavac.getStandardFileManager(null, Locale.US, null);
+ mClassOutDir = Files.createTempDir();
+ mFileMan.setLocation(StandardLocation.CLASS_OUTPUT, Arrays.asList(mClassOutDir));
+ mFileMan.setLocation(StandardLocation.CLASS_PATH, Arrays.asList(mClassOutDir));
+ mCompilationUnits = new ArrayList<>();
+ }
+
+ private String classToFileName(String classname) {
+ return classname.replace('.', '/');
+ }
+
+ public Javac addSource(String classname, String contents) {
+ JavaFileObject java = new SimpleJavaFileObject(URI.create(
+ String.format("string:///%s.java", classToFileName(classname))),
+ JavaFileObject.Kind.SOURCE
+ ){
+ @Override
+ public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException {
+ return contents;
+ }
+ };
+ mCompilationUnits.add(java);
+ return this;
+ }
+
+ public boolean compile() {
+ JavaCompiler.CompilationTask task = mJavac.getTask(
+ null,
+ mFileMan,
+ null,
+ null,
+ null,
+ mCompilationUnits);
+ return task.call();
+ }
+
+ public InputStream getClassFile(String classname) throws IOException {
+ Iterable<? extends JavaFileObject> objs = mFileMan.getJavaFileObjects(
+ new File(mClassOutDir, String.format("%s.class", classToFileName(classname))));
+ if (!objs.iterator().hasNext()) {
+ return null;
+ }
+ return objs.iterator().next().openInputStream();
+ }
+
+ public JavaClass getCompiledClass(String classname) throws IOException {
+ return new ClassParser(getClassFile(classname),
+ String.format("%s.class", classToFileName(classname))).parse();
+ }
+}
diff --git a/tools/dexanalyze/dexanalyze.cc b/tools/dexanalyze/dexanalyze.cc
index c90bb9cf6a..841719b821 100644
--- a/tools/dexanalyze/dexanalyze.cc
+++ b/tools/dexanalyze/dexanalyze.cc
@@ -58,7 +58,7 @@ class DexAnalyze {
<< " -a (Run all experiments)\n"
<< " -n <int> (run experiment with 1 .. n as argument)\n"
<< " -d (Dump on per Dex basis)\n"
- << " -v (Verbose dumping)\n";
+ << " -v (quiet(0) to everything(2))\n";
return kExitCodeUsageError;
}
@@ -71,7 +71,17 @@ class DexAnalyze {
verify_checksum_ = false;
run_dex_file_verifier_ = false;
} else if (arg == "-v") {
- verbose_ = true;
+ if (i + 1 >= argc) {
+ return Usage(argv);
+ }
+ std::istringstream iss(argv[i + 1]);
+ size_t verbose_level = 0u;
+ iss >> verbose_level;
+ if (verbose_level > static_cast<size_t>(VerboseLevel::kEverything)) {
+ return Usage(argv);
+ }
+ ++i;
+ verbose_level_ = static_cast<VerboseLevel>(verbose_level);
} else if (arg == "-a") {
run_all_experiments_ = true;
} else if (arg == "-n") {
@@ -104,7 +114,7 @@ class DexAnalyze {
return 0;
}
- bool verbose_ = false;
+ VerboseLevel verbose_level_ = VerboseLevel::kNormal;
bool verify_checksum_ = true;
bool run_dex_file_verifier_ = true;
bool dump_per_input_dex_ = false;
@@ -147,7 +157,7 @@ class DexAnalyze {
}
}
for (const std::unique_ptr<Experiment>& experiment : experiments_) {
- experiment->dump_ = options->verbose_;
+ experiment->verbose_level_ = options->verbose_level_;
}
}
@@ -185,6 +195,7 @@ class DexAnalyze {
return result;
}
+ DexFileLoaderErrorCode error_code;
std::string error_msg;
Analysis cumulative(&options);
for (const std::string& filename : options.filenames_) {
@@ -201,6 +212,7 @@ class DexAnalyze {
filename.c_str(),
options.run_dex_file_verifier_,
options.verify_checksum_,
+ &error_code,
&error_msg,
&dex_files)) {
LOG(ERROR) << "OpenAll failed for " + filename << " with " << error_msg << std::endl;
diff --git a/tools/dexanalyze/dexanalyze_bytecode.cc b/tools/dexanalyze/dexanalyze_bytecode.cc
index d18b0dfdbd..1c5a5d548b 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.cc
+++ b/tools/dexanalyze/dexanalyze_bytecode.cc
@@ -88,7 +88,7 @@ void NewRegisterInstructions::ProcessDexFiles(
if (method.GetCodeItem() == nullptr || !visited.insert(method.GetCodeItem()).second) {
continue;
}
- if (dump_) {
+ if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << std::endl
<< "Processing " << dex_file->PrettyMethod(method.GetIndex(), true);
}
@@ -122,8 +122,6 @@ void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const
os << "Total Dex code bytes: " << Percent(dex_code_bytes_, total_size) << "\n";
os << "Total output code bytes: " << Percent(output_size_, total_size) << "\n";
os << "Total deduped code bytes: " << Percent(deduped_size_, total_size) << "\n";
- os << "Missing field idx count: " << missing_field_idx_count_ << "\n";
- os << "Missing method idx count: " << missing_method_idx_count_ << "\n";
std::vector<std::pair<size_t, std::vector<uint8_t>>> pairs;
for (auto&& pair : instruction_freq_) {
if (pair.second > 0 && !pair.first.empty()) {
@@ -133,11 +131,14 @@ void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const
}
}
std::sort(pairs.rbegin(), pairs.rend());
- os << "Top instruction bytecode sizes and hex dump" << "\n";
+ static constexpr size_t kMaxMacros = 128;
uint64_t top_instructions_savings = 0u;
- for (size_t i = 0; i < 128 && i < pairs.size(); ++i) {
+ for (size_t i = 0; i < kMaxMacros && i < pairs.size(); ++i) {
top_instructions_savings += pairs[i].first;
- if (dump_ || (true)) {
+ }
+ if (verbose_level_ >= VerboseLevel::kNormal) {
+ os << "Top " << kMaxMacros << " instruction bytecode sizes and hex dump" << "\n";
+ for (size_t i = 0; i < kMaxMacros && i < pairs.size(); ++i) {
auto bytes = pairs[i].second;
// Remove opcode bytes.
bytes.erase(bytes.begin());
@@ -145,6 +146,12 @@ void NewRegisterInstructions::Dump(std::ostream& os, uint64_t total_size) const
<< Instruction::Name(static_cast<Instruction::Code>(pairs[i].second[0]))
<< "(" << bytes << ")\n";
}
+ os << "Move result register distribution" << "\n";
+ const size_t move_result_total =
+ std::accumulate(move_result_reg_.begin(), move_result_reg_.end(), 0u);
+ for (size_t i = 0; i < move_result_reg_.size(); ++i) {
+ os << i << ": " << Percent(move_result_reg_[i], move_result_total) << "\n";
+ }
}
os << "Top instructions 1b macro savings "
<< Percent(top_instructions_savings, total_size) << "\n";
@@ -167,7 +174,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
if (inst == code_item.end()) {
break;
}
- if (dump_) {
+ if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << std::endl;
std::cout << inst->DumpString(nullptr);
if (skip_next) {
@@ -323,6 +330,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
next->Opcode() == Instruction::MOVE_RESULT_OBJECT;
if (next_move_result) {
dest_reg = next->VRegA_11x();
+ ++move_result_reg_[dest_reg];
}
}
@@ -406,9 +414,9 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
++current_type.types_.FindOrAdd(type_idx)->second;
} else {
bool next_is_init = false;
- if (opcode == Instruction::NEW_INSTANCE && inst != code_item.end()) {
+ if (opcode == Instruction::NEW_INSTANCE) {
auto next = std::next(inst);
- if (next->Opcode() == Instruction::INVOKE_DIRECT) {
+ if (next != code_item.end() && next->Opcode() == Instruction::INVOKE_DIRECT) {
uint32_t args[6] = {};
uint32_t arg_count = next->GetVarArgs(args);
uint32_t method_idx = DexMethodIndex(next.Inst());
@@ -449,7 +457,7 @@ void NewRegisterInstructions::ProcessCodeItem(const DexFile& dex_file,
Add(new_opcode, inst.Inst());
}
}
- if (dump_) {
+ if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << std::endl
<< "Bytecode size " << code_item.InsnsSizeInBytes() << " -> " << buffer_.size();
std::cout << std::endl;
@@ -504,7 +512,7 @@ bool NewRegisterInstructions::InstNibblesAndIndex(uint8_t opcode,
}
bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint32_t>& args) {
- if (dump_) {
+ if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << " ==> " << Instruction::Name(static_cast<Instruction::Code>(opcode)) << " ";
for (int v : args) {
std::cout << v << ", ";
@@ -512,7 +520,7 @@ bool NewRegisterInstructions::InstNibbles(uint8_t opcode, const std::vector<uint
}
for (int v : args) {
if (v >= 16) {
- if (dump_) {
+ if (verbose_level_ >= VerboseLevel::kEverything) {
std::cout << "(OUT_OF_RANGE)";
}
return false;
diff --git a/tools/dexanalyze/dexanalyze_bytecode.h b/tools/dexanalyze/dexanalyze_bytecode.h
index 9ea819bec2..ed40ba7d9b 100644
--- a/tools/dexanalyze/dexanalyze_bytecode.h
+++ b/tools/dexanalyze/dexanalyze_bytecode.h
@@ -17,6 +17,7 @@
#ifndef ART_TOOLS_DEXANALYZE_DEXANALYZE_BYTECODE_H_
#define ART_TOOLS_DEXANALYZE_DEXANALYZE_BYTECODE_H_
+#include <array>
#include <vector>
#include <map>
@@ -75,9 +76,8 @@ class NewRegisterInstructions : public Experiment {
uint64_t output_size_ = 0u;
uint64_t deduped_size_ = 0u;
uint64_t dex_code_bytes_ = 0u;
- uint64_t missing_field_idx_count_ = 0u;
- uint64_t missing_method_idx_count_ = 0u;
uint64_t experiments_ = std::numeric_limits<uint64_t>::max();
+ std::array<size_t, 256> move_result_reg_;
std::map<std::vector<uint8_t>, size_t> instruction_freq_;
// Output instruction buffer.
std::vector<uint8_t> buffer_;
diff --git a/tools/dexanalyze/dexanalyze_experiments.cc b/tools/dexanalyze/dexanalyze_experiments.cc
index b9a2ede97e..1f6fe4694e 100644
--- a/tools/dexanalyze/dexanalyze_experiments.cc
+++ b/tools/dexanalyze/dexanalyze_experiments.cc
@@ -208,37 +208,40 @@ void AnalyzeDebugInfo::Dump(std::ostream& os, uint64_t total_size) const {
<< Percent(total_unique_non_header_bytes_, total_size) << "\n";
}
-void AnalyzeStrings::ProcessDexFile(const DexFile& dex_file) {
- std::vector<std::string> strings;
- for (size_t i = 0; i < dex_file.NumStringIds(); ++i) {
- uint32_t length = 0;
- const char* data = dex_file.StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
- // Analyze if the string has any UTF16 chars.
- bool have_wide_char = false;
- const char* ptr = data;
- for (size_t j = 0; j < length; ++j) {
- have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
- }
- if (have_wide_char) {
- wide_string_bytes_ += 2 * length;
- } else {
- ascii_string_bytes_ += length;
+void AnalyzeStrings::ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) {
+ std::set<std::string> unique_strings;
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ for (size_t i = 0; i < dex_file->NumStringIds(); ++i) {
+ uint32_t length = 0;
+ const char* data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i), &length);
+ // Analyze if the string has any UTF16 chars.
+ bool have_wide_char = false;
+ const char* ptr = data;
+ for (size_t j = 0; j < length; ++j) {
+ have_wide_char = have_wide_char || GetUtf16FromUtf8(&ptr) >= 0x100;
+ }
+ if (have_wide_char) {
+ wide_string_bytes_ += 2 * length;
+ } else {
+ ascii_string_bytes_ += length;
+ }
+ string_data_bytes_ += ptr - data;
+ unique_strings.insert(data);
}
- string_data_bytes_ += ptr - data;
-
- strings.push_back(data);
}
- // Note that the strings are probably already sorted.
- std::sort(strings.begin(), strings.end());
+ // Unique strings only since we want to exclude savings from multidex duplication.
+ std::vector<std::string> strings(unique_strings.begin(), unique_strings.end());
+ unique_strings.clear();
// Tunable parameters.
- static const size_t kMinPrefixLen = 3;
- static const size_t kPrefixConstantCost = 5;
+ static const size_t kMinPrefixLen = 1;
+ static const size_t kMaxPrefixLen = 255;
+ static const size_t kPrefixConstantCost = 4;
static const size_t kPrefixIndexCost = 2;
// Calculate total shared prefix.
std::vector<size_t> shared_len;
- std::set<std::string> prefixes;
+ prefixes_.clear();
for (size_t i = 0; i < strings.size(); ++i) {
size_t best_len = 0;
if (i > 0) {
@@ -247,19 +250,65 @@ void AnalyzeStrings::ProcessDexFile(const DexFile& dex_file) {
if (i < strings.size() - 1) {
best_len = std::max(best_len, PrefixLen(strings[i], strings[i + 1]));
}
+ best_len = std::min(best_len, kMaxPrefixLen);
std::string prefix;
if (best_len >= kMinPrefixLen) {
prefix = strings[i].substr(0, best_len);
- prefixes.insert(prefix);
- total_prefix_savings_ += prefix.length();
+ ++prefixes_[prefix];
}
total_prefix_index_cost_ += kPrefixIndexCost;
}
- total_num_prefixes_ += prefixes.size();
- for (const std::string& s : prefixes) {
+ // Optimize the result by moving long prefixes to shorter ones if it causes savings.
+ while (true) {
+ bool have_savings = false;
+ auto it = prefixes_.begin();
+ std::vector<std::string> longest;
+ for (const auto& pair : prefixes_) {
+ longest.push_back(pair.first);
+ }
+ std::sort(longest.begin(), longest.end(), [](const std::string& a, const std::string& b) {
+ return a.length() > b.length();
+ });
+ // Do longest first since this provides the best results.
+ for (const std::string& s : longest) {
+ it = prefixes_.find(s);
+ CHECK(it != prefixes_.end());
+ const std::string& prefix = it->first;
+ int64_t best_savings = 0u;
+ int64_t best_len = -1;
+ for (int64_t len = prefix.length() - 1; len >= 0; --len) {
+ auto found = prefixes_.find(prefix.substr(0, len));
+ if (len != 0 && found == prefixes_.end()) {
+ continue;
+ }
+ // Calculate savings from downgrading the prefix.
+ int64_t savings = kPrefixConstantCost + prefix.length() -
+ (prefix.length() - len) * it->second;
+ if (savings > best_savings) {
+ best_savings = savings;
+ best_len = len;
+ break;
+ }
+ }
+ if (best_len != -1) {
+ prefixes_[prefix.substr(0, best_len)] += it->second;
+ it = prefixes_.erase(it);
+ optimization_savings_ += best_savings;
+ have_savings = true;
+ } else {
+ ++it;
+ }
+ }
+ if (!have_savings) {
+ break;
+ }
+ }
+ total_num_prefixes_ += prefixes_.size();
+ for (const auto& pair : prefixes_) {
// 4 bytes for an offset, one for length.
- total_prefix_dict_ += s.length();
+ total_prefix_dict_ += pair.first.length();
total_prefix_table_ += kPrefixConstantCost;
+ total_prefix_savings_ += pair.first.length() * pair.second;
}
}
@@ -277,8 +326,17 @@ void AnalyzeStrings::Dump(std::ostream& os, uint64_t total_size) const {
net_savings -= total_prefix_dict_;
net_savings -= total_prefix_table_;
net_savings -= total_prefix_index_cost_;
- os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
os << "Prefix dictionary elements " << total_num_prefixes_ << "\n";
+ os << "Optimization savings " << Percent(optimization_savings_, total_size) << "\n";
+ os << "Prefix net savings " << Percent(net_savings, total_size) << "\n";
+ if (verbose_level_ >= VerboseLevel::kEverything) {
+ std::vector<std::pair<std::string, size_t>> pairs(prefixes_.begin(), prefixes_.end());
+ // Sort lexicographically.
+ std::sort(pairs.begin(), pairs.end());
+ for (const auto& pair : pairs) {
+ os << pair.first << " : " << pair.second << "\n";
+ }
+ }
}
void CountDexIndices::ProcessDexFiles(
diff --git a/tools/dexanalyze/dexanalyze_experiments.h b/tools/dexanalyze/dexanalyze_experiments.h
index 468b74bc00..4e66b3cf3b 100644
--- a/tools/dexanalyze/dexanalyze_experiments.h
+++ b/tools/dexanalyze/dexanalyze_experiments.h
@@ -21,6 +21,7 @@
#include <iosfwd>
#include <memory>
#include <set>
+#include <unordered_map>
#include <vector>
#include "base/macros.h"
@@ -32,6 +33,12 @@ class DexFile;
namespace dexanalyze {
+enum class VerboseLevel : size_t {
+ kQuiet,
+ kNormal,
+ kEverything,
+};
+
bool IsRange(Instruction::Code code);
uint16_t NumberOfArgs(const Instruction& inst);
@@ -52,13 +59,13 @@ class Experiment {
virtual void ProcessDexFile(const DexFile&) {}
virtual void Dump(std::ostream& os, uint64_t total_size) const = 0;
- bool dump_ = false;
+ VerboseLevel verbose_level_ = VerboseLevel::kNormal;
};
// Analyze string data and strings accessed from code.
class AnalyzeStrings : public Experiment {
public:
- void ProcessDexFile(const DexFile& dex_file) OVERRIDE;
+ void ProcessDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files) OVERRIDE;
void Dump(std::ostream& os, uint64_t total_size) const OVERRIDE;
private:
@@ -70,6 +77,8 @@ class AnalyzeStrings : public Experiment {
int64_t total_prefix_table_ = 0u;
int64_t total_prefix_index_cost_ = 0u;
int64_t total_num_prefixes_ = 0u;
+ int64_t optimization_savings_ = 0u;
+ std::unordered_map<std::string, size_t> prefixes_;
};
// Analyze debug info sizes.
diff --git a/tools/dexfuzz/Android.mk b/tools/dexfuzz/Android.mk
index 473f6de3e5..06d3f62a9a 100644
--- a/tools/dexfuzz/Android.mk
+++ b/tools/dexfuzz/Android.mk
@@ -33,4 +33,5 @@ LOCAL_SRC_FILES := dexfuzz
include $(BUILD_PREBUILT)
# --- dexfuzz script with core image dependencies ----------------
+.PHONY: fuzzer
fuzzer: $(LOCAL_BUILT_MODULE) $(HOST_CORE_IMG_OUTS)
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index eb33da2267..a435f2e03e 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -68,6 +68,7 @@ test="org.apache.harmony.jpda.tests.share.AllTests"
mode="target"
# Use JIT compiling by default.
use_jit=true
+instant_jit=false
variant_cmdline_parameter="--variant=X32"
dump_command="/bin/true"
# Timeout of JDWP test in ms.
@@ -129,6 +130,11 @@ while true; do
elif [[ $1 == -Ximage:* ]]; then
image="$1"
shift
+ elif [[ "$1" == "--instant-jit" ]]; then
+ instant_jit=true
+ # Remove the --instant-jit from the arguments.
+ args=${args/$1}
+ shift
elif [[ "$1" == "--no-jit" ]]; then
use_jit=false
# Remove the --no-jit from the arguments.
@@ -310,6 +316,10 @@ if $use_jit; then
debuggee_args="$debuggee_args -Xcompiler-option --compiler-filter=quicken"
fi
+if $instant_jit; then
+ debuggee_args="$debuggee_args -Xjitthreshold:0"
+fi
+
if [[ $mode != "ri" ]]; then
vm_args="$vm_args --vm-arg -Xusejit:$use_jit"
debuggee_args="$debuggee_args -Xusejit:$use_jit"
diff --git a/tools/run-libjdwp-tests.sh b/tools/run-libjdwp-tests.sh
index e116facd98..bd052517fd 100755
--- a/tools/run-libjdwp-tests.sh
+++ b/tools/run-libjdwp-tests.sh
@@ -24,13 +24,18 @@ if [[ `uname` != 'Linux' ]]; then
exit 2
fi
-args=("$@")
+declare -a args=("$@")
debug="no"
has_variant="no"
has_mode="no"
mode="target"
has_timeout="no"
+has_verbose="no"
+# The bitmap of log messages in libjdwp. See list in the help message for more
+# info on what these are. The default is 'errors | callbacks'
+verbose_level=0xC0
+arg_idx=0
while true; do
if [[ $1 == "--debug" ]]; then
debug="yes"
@@ -38,6 +43,7 @@ while true; do
elif [[ $1 == --test-timeout-ms ]]; then
has_timeout="yes"
shift
+ arg_idx=$((arg_idx + 1))
shift
elif [[ "$1" == "--mode=jvm" ]]; then
has_mode="yes"
@@ -47,6 +53,22 @@ while true; do
has_mode="yes"
mode="host"
shift
+ elif [[ $1 == --verbose-all ]]; then
+ has_verbose="yes"
+ verbose_level=0xFFF
+ unset args[arg_idx]
+ shift
+ elif [[ $1 == --verbose ]]; then
+ has_verbose="yes"
+ shift
+ elif [[ $1 == --verbose-level ]]; then
+ shift
+ verbose_level=$1
+ # remove both the --verbose-level and the argument.
+ unset args[arg_idx]
+ arg_idx=$((arg_idx + 1))
+ unset args[arg_idx]
+ shift
elif [[ $1 == --variant=* ]]; then
has_variant="yes"
shift
@@ -55,6 +77,7 @@ while true; do
else
shift
fi
+ arg_idx=$((arg_idx + 1))
done
if [[ "$has_mode" = "no" ]]; then
@@ -68,7 +91,17 @@ fi
if [[ "$has_timeout" = "no" ]]; then
# Double the timeout to 20 seconds
args+=(--test-timeout-ms)
- args+=(20000)
+ if [[ "$has_verbose" = "no" ]]; then
+ args+=(20000)
+ else
+ # Even more time if verbose is set since those can be quite heavy.
+ args+=(200000)
+ fi
+fi
+
+if [[ "$has_verbose" = "yes" ]]; then
+ args+=(--vm-arg)
+ args+=(-Djpda.settings.debuggeeAgentExtraOptions=directlog=y,logfile=/proc/self/fd/2,logflags=$verbose_level)
fi
# We don't use full paths since it is difficult to determine them for device
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index bcd4815a38..1d3a4fbee9 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -269,6 +269,7 @@ class Veridex {
}
const DexFileLoader dex_file_loader;
+ DexFileLoaderErrorCode error_code;
static constexpr bool kVerifyChecksum = true;
static constexpr bool kRunDexFileVerifier = true;
if (!dex_file_loader.OpenAll(reinterpret_cast<const uint8_t*>(content.data()),
@@ -276,8 +277,13 @@ class Veridex {
filename.c_str(),
kRunDexFileVerifier,
kVerifyChecksum,
+ &error_code,
error_msg,
dex_files)) {
+ if (error_code == DexFileLoaderErrorCode::kEntryNotFound) {
+ LOG(INFO) << "No .dex found, skipping analysis.";
+ return true;
+ }
return false;
}